Beispiel #1
0
def generate_random_points_in_polygon(poly, num_points, seed=None):
	"""
	Create a list of randomly generated points within a polygon.

	Parameters
	----------
	poly : Polygon
	num_points : int
		The number of random points to create within the polygon
	seed : int, optional
		A random seed

	Returns
	-------
	List
	"""
	min_x, min_y, max_x, max_y = poly.bounds
	points = []
	i = 0
	while len(points) < num_points:
		s = RandomState(seed + i) if seed else RandomState(seed)
		random_point = Point([s.uniform(min_x, max_x), s.uniform(min_y, max_y)])
		if random_point.within(poly):
			points.append(random_point)
		i += 1
	return points
def randomRotations(D, rng=None):
    from math import pi, sin, cos, sqrt
    from numpy.random import RandomState
    from itertools import product as cartesian, repeat
    from .util import frange

    if not isinstance(rng, RandomState):
        rng = RandomState(rng)

    if D == 2:
        while True:
            yield (rng.uniform(-pi, pi),)
    elif D == 3:
        # Ken Shoemake
        # Graphics Gems III, pp 124-132
        from .quaternion import Quaternion
        while True:
            X = rng.uniform(0, 1)
            theta = rng.uniform(0, 2*pi), rng.uniform(0, 2*pi)
            R = (sqrt(1-X), sqrt(X))
            yield Quaternion(sin(theta[0]) * R[0], cos(theta[0]) * R[0],
                             sin(theta[1]) * R[1], cos(theta[1]) * R[1]).axis_angle
    else:
        raise NotImplementedError("Only defined for D in [2..3], not {}"
                                  .format(D))
Beispiel #3
0
class TestHandModel(unittest.TestCase):
    """Test kinematics module."""
    def setUp(self):
        self.random = RandomState(7)

    def test_constructor(self):
        model_type_list = [(HandModel20, 20), (HandModel45, 45)]

        for model_type, dofs_number in model_type_list:
            hand_model = model_type()
            self.assertEqual(hand_model.dofs_number, dofs_number)
            self.assertEqual(len(hand_model.dofs_limits[0]), dofs_number)
            self.assertEqual(len(hand_model.dofs_limits[1]), dofs_number)
            self.assertEqual(len(hand_model.joints), len(hand_model.origins()))

    def test_conversions(self):
        """Test mano pose <-> joint angles conversion."""
        model_type_list = [HandModel20, HandModel45]

        for model_type, left_hand in itertools.product(model_type_list,
                                                       (False, True)):
            with self.subTest(f"{model_type.__name__}(left_hand={left_hand})"):
                hand_model = model_type(left_hand)

                ang1 = self.random.uniform(*hand_model.dofs_limits)
                mat1 = quat2mat(self.random.uniform(-1, 1, size=(4, )))
                pose1 = hand_model.angles_to_mano(ang1, mat1)
                ang2, mat2 = hand_model.mano_to_angles(pose1)
                np.testing.assert_almost_equal(mat1, mat2)
                np.testing.assert_almost_equal(ang1, ang2)

                pose2 = hand_model.angles_to_mano(ang2, mat2)
                np.testing.assert_almost_equal(pose1, pose2)
def test_coupled_oscillators(num_experiments):
    from dyadic_interaction.dynamical_systems import spring_mass_system
    transfer_entropy = []
    norm_entropy = []
    rs = RandomState(0)
    for _ in range(num_experiments):
        spring_data = spring_mass_system(masses=rs.uniform(1.0, 10.0, 2),
                                         constants=rs.uniform(1.0, 50.0, 2),
                                         lengths=rs.uniform(0.1, 5.0, 2))
        pos = np.column_stack((spring_data[:, 0], spring_data[:, 2]))
        # transfer_entropy, local_te = get_transfer_entropy(pos, local=True)
        norm_entropy.append(
            get_shannon_entropy_2d(pos, min_v=pos.min(), max_v=pos.max()))
        transfer_entropy.append(
            get_transfer_entropy(pos, min_v=pos.min(), max_v=pos.max()))

        print("Transfer Entropy of spring positions: {}".format(
            transfer_entropy))
        print("Shannon Entropy of spring positions: {}".format(norm_entropy))
        # plt.plot(pos)
        # plt.show()
        # vel = np.column_stack((spring_data[:, 1], spring_data[:, 3]))
        # transfer_entropy = get_transfer_entropy(vel, log=True)
        # norm_entropy = get_shannon_entropy_2d(vel)
        # print("Transfer Entropy of spring velocities: {}".format(transfer_entropy))
        # print("Shannon Entropy of spring velocities: {}".format(norm_entropy))
        # plt.plot(vel)
        # plt.show()
        # plt.plot(local_te[0])
        # plt.plot(local_te[1])
        # plt.show()
    return norm_entropy, transfer_entropy
def randomRotations(D, rng=None):
    from math import pi, sin, cos, sqrt
    from numpy.random import RandomState

    if not isinstance(rng, RandomState):
        rng = RandomState(rng)

    if D == 2:
        while True:
            yield (rng.uniform(-pi, pi), )
    elif D == 3:
        # Ken Shoemake
        # Graphics Gems III, pp 124-132
        from .quaternion import Quaternion
        while True:
            X = rng.uniform(0, 1)
            theta = rng.uniform(0, 2 * pi), rng.uniform(0, 2 * pi)
            R = (sqrt(1 - X), sqrt(X))
            yield Quaternion(
                sin(theta[0]) * R[0],
                cos(theta[0]) * R[0],
                sin(theta[1]) * R[1],
                cos(theta[1]) * R[1]).axis_angle
    else:
        raise NotImplementedError(
            "Only defined for D in [2..3], not {}".format(D))
Beispiel #6
0
class WeightGenerationRegime:
    def __init__(self, nStates, nBivariateFeat, prng=None, seed=None, stationaryWeights=None, bivariateWeights=None):
        self.nStates = nStates
        self.nBivariateFeat = nBivariateFeat
        if prng is not None:
            self.prng = prng
        else:
            if seed is not None:
                self.prng = RandomState(seed)
            else:
                self.defaultRandomSeed = defaultSeed
                prng = RandomState(1234567890)

        if prng is not None and seed is not None:
            warnings.warn("both prng and seed are provided but we use the provided prng", DeprecationWarning)

        self.stationaryWeights = stationaryWeights
        self.bivariateWeights = bivariateWeights


    def generateStationaryWeightsFromNormal(self):
        self.stationaryWeights = self.prng.normal(0, 1, self.nStates)
        return self.stationaryWeights

    def generateBivariateWeightsFromNormal(self):
        self.bivariateWeights = self.prng.normal(0, 1, self.nBivariateFeat)
        return self.bivariateWeights

    def generateStationaryWeightsFromUniform(self):
        self.stationaryWeights = self.prng.uniform(0, 1, self.nStates)
        return self.stationaryWeights

    def generateBivariateWeightsFromUniform(self):
        self.bivariateWeights = self.prng.uniform(0, 1, self.nBivariateFeat)
        return self.bivariateWeights
Beispiel #7
0
    def __call__(self, shape, dtype=None):

        if self.nb_filters is not None:
            kernel_shape = tuple(self.kernel_size) + (int(self.input_dim), self.nb_filters)
        else:
            kernel_shape = (int(self.input_dim), self.kernel_size[-1])

        fan_in, fan_out = initializers._compute_fans(
            tuple(self.kernel_size) + (self.input_dim, self.nb_filters)
        )

        if self.criterion == 'glorot':
            s = 1. / (fan_in + fan_out)
        elif self.criterion == 'he':
            s = 1. / fan_in
        else:
            raise ValueError('Invalid criterion: ' + self.criterion)
        rng = RandomState(1337)

        modulus = rng.uniform(low=-np.sqrt(s)*np.sqrt(3), high=np.sqrt(s)*np.sqrt(3), size=kernel_shape)
        
        phase = rng.uniform(low=-np.pi/2, high=np.pi/2, size=kernel_shape)

        wm = modulus
        wp = phase
        weight = np.concatenate([wp, wm], axis=-1)

        return weight
def quaternion_init(in_features, out_features, rng, criterion='glorot'):

    if criterion == 'glorot':
        s = 1. / np.sqrt(2 * (in_features + out_features))
    elif criterion == 'he':
        s = 1. / np.sqrt(2 * in_features)
    else:
        raise ValueError('Invalid criterion: ' + criterion)
    rng = RandomState(123)

    #Generating randoms and purely imaginary quaternions :
    kernel_shape = (in_features, out_features)
    number_of_weights = np.prod(kernel_shape)
    v_i = np.random.uniform(0.0, 1.0, number_of_weights)
    v_j = np.random.uniform(0.0, 1.0, number_of_weights)
    v_k = np.random.uniform(0.0, 1.0, number_of_weights)
    #Make these purely imaginary quaternions unitary
    for i in range(0, number_of_weights):
        norm = np.sqrt(v_i[i]**2 + v_j[i]**2 + v_k[i]**2) + 0.0001
        v_i[i] /= norm
        v_j[i] /= norm
        v_k[i] /= norm
    v_i = v_i.reshape(kernel_shape)
    v_j = v_j.reshape(kernel_shape)
    v_k = v_k.reshape(kernel_shape)

    modulus = rng.uniform(low=-s, high=s, size=kernel_shape)
    phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)

    weight_r = modulus * np.cos(phase)
    weight_i = modulus * v_i * np.sin(phase)
    weight_j = modulus * v_j * np.sin(phase)
    weight_k = modulus * v_k * np.sin(phase)

    return (weight_r, weight_i, weight_j, weight_k)
Beispiel #9
0
def quaternion_init(in_features,
                    out_features,
                    rng,
                    kernel_size=None,
                    criterion='glorot'):

    if kernel_size is not None:
        receptive_field = np.prod(kernel_size)
        fan_in = in_features * receptive_field
        fan_out = out_features * receptive_field
    else:
        fan_in = in_features
        fan_out = out_features

    if criterion == 'glorot':
        s = 1. / np.sqrt(2 * (fan_in + fan_out))
    elif criterion == 'he':
        s = 1. / np.sqrt(2 * fan_in)
    else:
        raise ValueError('Invalid criterion: ' + criterion)
    rng = RandomState(123)

    # Generating randoms and purely imaginary quaternions :
    if kernel_size is None:
        kernel_shape = (in_features, out_features)
    else:
        if type(kernel_size) is int:
            kernel_shape = (out_features, in_features) + tuple((kernel_size, ))
        else:
            kernel_shape = (out_features, in_features) + (*kernel_size, )

    number_of_weights = np.prod(kernel_shape)
    v_i = np.random.uniform(0.0, 1.0, number_of_weights)
    v_j = np.random.uniform(0.0, 1.0, number_of_weights)
    v_k = np.random.uniform(0.0, 1.0, number_of_weights)

    # Purely imaginary quaternions unitary
    for i in range(0, number_of_weights):
        norm = np.sqrt(v_i[i]**2 + v_j[i]**2 + v_k[i]**2) + 0.0001
        v_i[i] /= norm
        v_j[i] /= norm
        v_k[i] /= norm
    v_i = v_i.reshape(kernel_shape)
    v_j = v_j.reshape(kernel_shape)
    v_k = v_k.reshape(kernel_shape)

    modulus = rng.uniform(low=-s, high=s, size=kernel_shape)
    phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)

    weight_r = modulus * np.cos(phase)
    weight_i = modulus * v_i * np.sin(phase)
    weight_j = modulus * v_j * np.sin(phase)
    weight_k = modulus * v_k * np.sin(phase)

    return (weight_r, weight_i, weight_j, weight_k)
Beispiel #10
0
class FitRNNv2(RNNv2, FitModel):
    def run(self):
        self.random = RandomState(self.random_seed)

        # Split the orders into training and validation sets and write them to separate files
        orders_path = self.requires()['orders'].output().path
        training_fd = tempfile.NamedTemporaryFile(mode='w+', delete=True)
        validation_fd = tempfile.NamedTemporaryFile(mode='w+', delete=True)
        with open(orders_path) as input_fd:
            for line in input_fd:
                if self.global_orders_ratio >= 1 or self.random.uniform(
                ) <= self.global_orders_ratio:
                    if self.random.uniform() <= self.validation_orders_ratio:
                        validation_fd.write(line)
                    else:
                        training_fd.write(line)
        validation_fd.flush()
        training_fd.flush()

        _, validation_inputs, validation_predictions = self._load_data(
            validation_fd.name)
        training_generator, training_steps_per_epoch = \
            self._create_data_generator(training_fd.name, self.users_per_batch, max_prior_orders=self.max_prior_orders)

        model = self._build_model()
        model.summary()

        callbacks = [
            EarlyStopping(monitor='val_loss', patience=10),
            ModelCheckpoint(os.path.abspath(self.output().path),
                            verbose=1,
                            save_weights_only=True,
                            save_best_only=True),
        ]
        if self.mode == 'evaluation':
            log_dir = os.path.join(OUTPUT_DIR, 'tensorboard', self.mode,
                                   self.model_name)
            callbacks.append(
                TensorBoard(log_dir=log_dir,
                            histogram_freq=5,
                            write_graph=False))

        model.fit_generator(training_generator,
                            training_steps_per_epoch,
                            validation_data=(validation_inputs,
                                             validation_predictions),
                            epochs=self.epochs,
                            verbose=2,
                            callbacks=callbacks)

        validation_fd.close()
        training_fd.close()
 def __init__(self, knapsack_capacity, total_posible_elements, seed, max_cost, max_value, elements=None):
     self.knapsack_capacity = knapsack_capacity
     if elements is not None and type(elements) == type([]):
         self.elements = elements
         self.total_posible_elements = len(elements)
     else:
         self.total_posible_elements = total_posible_elements
         rnd = RandomState(seed)
         #list of tuples with value and cost
         self.elements = []
         #fill elements
         for _ in range(total_posible_elements):
             self.elements.append((rnd.uniform(0, max_value), rnd.uniform(0, max_cost)))
Beispiel #12
0
class ParticleBetaModelSource(object):
    def __init__(self):

        self.prng = RandomState(24)
        self.kT = kT
        self.Z = Z

        num_particles = 1000000

        rr = np.linspace(0.0, R, 10000)
        # This formula assumes beta = 2/3
        M_r = 4 * np.pi * rho_c * r_c * r_c * (rr - r_c * np.arctan(rr / r_c))
        M_r *= cm_per_mpc**3

        pmass = M_r[-1] * np.ones(num_particles) / num_particles
        M_r /= M_r[-1]
        u = self.prng.uniform(size=num_particles)

        radius = np.interp(u, M_r, rr, left=0.0, right=1.0)
        dens = rho_c * (1. + (radius / r_c)**2)**(-1.5 * beta)
        radius /= (2. * R)
        theta = np.arccos(
            self.prng.uniform(low=-1., high=1., size=num_particles))
        phi = 2. * np.pi * self.prng.uniform(size=num_particles)

        temp = self.kT * K_per_keV * np.ones(num_particles)
        velz = self.prng.normal(loc=v_shift, scale=v_width, size=num_particles)

        bbox = np.array([[-0.5, 0.5], [-0.5, 0.5], [-0.5, 0.5]])

        data = {}
        data["io", "density"] = (dens, "g/cm**3")
        data["io", "temperature"] = (temp, "K")
        data["io",
             "particle_position_x"] = (radius * np.sin(theta) * np.cos(phi),
                                       "code_length")
        data["io",
             "particle_position_y"] = (radius * np.sin(theta) * np.sin(phi),
                                       "code_length")
        data["io",
             "particle_position_z"] = (radius * np.cos(theta), "code_length")
        data["io", "particle_velocity_x"] = (np.zeros(num_particles), "cm/s")
        data["io", "particle_velocity_y"] = (np.zeros(num_particles), "cm/s")
        data["io", "particle_velocity_z"] = (velz, "cm/s")
        data["io", "particle_mass"] = (pmass, "g")
        data["io",
             "smoothing_length"] = (0.02 / (2. * R) * np.ones(num_particles),
                                    "code_length")

        self.ds = load_particles(data, length_unit=(2 * R, "Mpc"), bbox=bbox)
Beispiel #13
0
    def solve(self, optimization_function):
        rand = RandomState(self.seed)

        X = optimization_function.X
        n_dim = X.shape[1] - 1

        candidates = {}

        no_improvements = 0
        best_value = np.inf

        f = 1
        while no_improvements < 50:
            if len(candidates) == 0:
                # first run
                for i in range(self.n_scan):
                    candidate = rand.uniform(0, 1, n_dim)
                    value = optimization_function.compute(candidate)
                    candidates[value] = candidate
            else:
                # evolution
                vectors = list(candidates.values())
                ranges = [
                    np.max([v[i] for v in vectors]) -
                    np.min([v[i] for v in vectors]) for i in range(n_dim)
                ]

                values_sorted = sorted(candidates.keys())
                best_value = values_sorted[0]
                for i in range(self.n_keep):
                    i_candidate = i * len(values_sorted) // self.n_keep
                    candidate = candidates[values_sorted[i_candidate]]
                    # perturbation = ranges * rand.uniform(-1, 1, len(ranges))
                    perturbation = f * rand.uniform(-1, 1, len(ranges))
                    new_candidate = candidate + perturbation
                    new_candidate = np.clip(new_candidate, 0, 1)
                    value = optimization_function.compute(new_candidate)
                    candidates[value] = new_candidate

                f *= self.spread_factor

            # only keep the best candidates
            values_sorted = sorted(candidates.keys())
            values_sorted = values_sorted[:self.n_keep]
            if values_sorted[0] < best_value:
                no_improvements = 0
            else:
                no_improvements += 1

            candidates = {v: candidates[v] for v in values_sorted}
Beispiel #14
0
def gen_random_points_poly(poly, num_points, seed = None):
    """
    Returns a list of N randomly generated points within a polygon. 

    """
    min_x, min_y, max_x, max_y = poly.bounds
    points = []
    i=0
    while len(points) < num_points:
        s=RandomState(seed+i) if seed else RandomState(seed)
        random_point = Point([s.uniform(min_x, max_x), s.uniform(min_y, max_y)])
        if random_point.within(poly):
            points.append(random_point)
        i+=1
    return points
Beispiel #15
0
def sample_group_counts(
    random_state: RandomState, total: int, lam_low: float = 1.0, lam_high: float = 8.0
) -> List[int]:
    """
    Sample a list of integers which sum up to `total`.
    The probability of sampling an integer follows exponential decay, k ~ np.exp(-k * lam),
    where lam is a hyperparam sampled from a range [lam_low, lam_high).

    :param random_state: numpy random state
    :param total: the expected sum of sampled numbers.
    :param lam_low: lower bound for lambda in exponential decay.
    :param lam_high: higher bound for lambda in exponential decay.
    :return:
    """
    current_max = total
    counts = []
    while current_max > 0:
        candidates = range(1, current_max + 1)
        lam = random_state.uniform(lam_low, lam_high)
        probs = np.array([np.exp(-i * lam) for i in candidates])
        probs /= sum(probs)
        selected = random_state.choice(candidates, p=probs)
        counts.append(selected)
        current_max -= selected

    assert sum(counts) == total
    return counts
Beispiel #16
0
def transpose_characters(token, index_to_char, n=1, char_pool=None, seed=17):
    if isinstance(seed, RandomState):
        rng = seed
    else:
        rng = RandomState(seed)

    chars = set(token)
    if len(chars) == 1:
        return token

    new_token = token
    for i in six.moves.range(n):
        idx = max(1, rng.randint(len(new_token)))
        neighbor = 0
        if idx == 0:
            neighbor == 1
        elif idx == len(new_token) - 1:
            neighbor = len(new_token) - 2
        else:
            if rng.uniform() > 0.5:
                neighbor = idx + 1
            else:
                neighbor = idx - 1
        left = min(idx, neighbor) 
        right = max(idx, neighbor)
        new_token = unicode(new_token[0:left] + new_token[right] + new_token[left] + new_token[right+1:])
    return new_token
Beispiel #17
0
    def get_weight(self, date_time, how, error_magnitude=0., error_type=None):
        if self.tv_type == 'day:hour':
            if how == 'linear':
                day = date_time.date()
                m = (self.data[day][date_time.hour + 1] -
                     self.data[day][date_time.hour])
                w = self.data[day][date_time.hour] + m * (date_time.minutes /
                                                          60)
            elif how == 'last':
                w = self.data[date_time.day()][date_time.hour]
            elif how == 'next':
                w = self.data[date_time.day()][date_time.hour + 1]
            else:
                raise AttributeError("Invalid input for argument 'how'.")
        elif self.tv_type == 'day:schedule':
            w = min([
                t - date_time.time() for t in self.data[date_time.isoweekday()]
                if t > date_time.time()
            ])
        elif self.tv_type is None:
            w = self.data
        else:
            raise TypeError("Time variance type corrupted.")

        if error_type.lower() in ['normal', 'gaussian']:
            w += RandomState.normal(loc=error_magnitude[0],
                                    scale=error_magnitude[1])
        elif error_type.lower() == 'uniform':
            w += RandomState.uniform(low=error_magnitude[0],
                                     high=error_magnitude[1])
        else:
            pass

        return w
Beispiel #18
0
def transform(X_input):
    """Calculate the random fourier features for an Gaussian kernel

    Keyword arguments:
    X_input - the input matrix X_input which will be transformed
    """

    # Parameters
    m = 3000
    gamma = 60

    # Copy
    X = X_input

    # Get the dimensions
    d = 0
    if len(X.shape)<=1:
        d = len(X)
    else:
        d = X.shape[1]

    # Draw iid m samples omega from p and b from [0,2pi]
    random_state = RandomState(124)
    omega = np.sqrt(2.0 * gamma) * random_state.normal(size=(d, m))
    b = random_state.uniform(0, 2 * np.pi, size=m)

    # Transform the input
    projection = np.dot(X, omega) + b
    Z = np.sqrt(2.0/m) * np.cos(projection)

    return Z
Beispiel #19
0
def test_mutual_information(seed):
    num_data_points = 500

    rs = RandomState(seed)
    mi_random_uniform = compute_mutual_information(
        rs.uniform(-1, 1, size=(2, num_data_points)))

    rs = RandomState(seed)
    mi_random_gaussian = compute_mutual_information(
        rs.normal(size=(2, num_data_points)))

    rs = RandomState(seed)
    mi_random_gaussian = compute_mutual_information(
        rs.normal(size=(2, num_data_points)))

    rs = RandomState(seed)
    mi_constant = compute_mutual_information(
        np.ones((2, num_data_points)) +
        rs.normal(0, 1e-15, size=(2, num_data_points)))

    rs = RandomState(seed)
    mi_correlated = compute_mutual_information(
        generate_correlated_data(num_data_points, cov=0.9, rs=rs))
    print('MI random uniform:', mi_random_uniform)
    print('MI random gaussian:', mi_random_gaussian)
    print('MI constant:', mi_constant)
    print('MI correlated:', mi_correlated)
Beispiel #20
0
    def __call__(self, shape, dtype = None):
        if self.flattened is True:
            # Dense
            num_rows = np.prod(shape)
            num_cols = 1
            fan_in = np.prod(shape[:-1])
        else:
            # Conv
            num_rows = np.prod(shape[-2:])
            num_cols = np.prod(shape[:-2])
            fan_in = shape[-2]
        fan_out = shape[-1]

        flat_shape = (num_rows, num_cols)
        rng = RandomState(self.seed)
        x = rng.uniform(size = flat_shape)
        u, _, v = np.linalg.svd(x)
        orthogonal_x = np.dot(u, np.dot(np.eye(flat_shape), v.T))
        independent_filters = np.reshape(orthogonal_x, shape)

        if self.criterion == 'glorot':
            desired_var = 2. / (fan_in + fan_out)
        elif self.criterion == 'he':
            desired_var = 2. / fan_in
        else:
            raise ValueError('Invalid criterion: ' + self.criterion)

        multip_constant = np.sqrt(desired_var / np.var(independent_filters))
        weight = multip_constant * independent_filters
        return weight
Beispiel #21
0
class RandomGenerator(object):
    def __init__(self, seed=None):
        self._random = RandomState(seed=seed)

    def seed(self, seed):
        self._random.seed(seed)

    def random(self):
        return self._random.rand()

    def randint(self, a, b=None):
        if b is None:
            b = a
            a = 0
        r = self._random.randint(a, high=b, size=1)
        return r[0]

    def sample(self, population, k):
        if k == 0:
            return []
        return list(self._random.choice(population, size=k, replace=False))

    def __getattr__(self, attr):
        return getattr(self._random, attr)

    def __getstate__(self):
        return {'_random': self._random}

    def __setstate__(self, d):
        self._random = d['_random']

    def uniform(self, low=0.0, high=1.0, size=None):
        return self._random.uniform(low, high, size)
Beispiel #22
0
class FitRNNv1(RNNv1, FitModel):

    def _build_model(self):
        product = Input(shape=(1, ))
        product_embedded = Embedding(input_dim=self.num_products + 1, output_dim=self.embedding_dim, input_length=1)(product)
        product_embedded = Reshape(target_shape=(self.embedding_dim, ))(product_embedded)
        product_embedding = Model(inputs=product, outputs=product_embedded, name='product_embedding')

        user_input = Input(shape=(self.max_products_per_user, 1), name='user_input')
        user_embedded = Masking(name='masking')(user_input)
        user_embedded = TimeDistributed(product_embedding, name='user_embedded')(user_embedded)
        user_embedded = Bidirectional(LSTM(self.embedding_dim), merge_mode='concat', name='lstm')(user_embedded)
        user_embedded = Dense(self.embedding_dim, activation='relu', name='hidden')(user_embedded)

        product_input = Input(shape=(1, ), name='product_input')
        product_embedded = product_embedding(product_input)

        score = dot([user_embedded, product_embedded], axes=[-1, -1], name='score')

        model = Model(inputs=[product_input, user_input], outputs=score)

        model.compile(loss=hinge_loss, optimizer='adam')

        return model

    def run(self):
        self.random = RandomState(self.random_seed)

        orders_path = self.requires()['orders'].output().path
        with tempfile.NamedTemporaryFile(mode='w+', delete=True) as training_fd:
            with tempfile.NamedTemporaryFile(mode='w+', delete=True) as validation_fd:
                # Split the orders file into training and validation
                with open(orders_path) as input_fd:
                    for line in input_fd:
                        if self.random.uniform() <= 0.1:
                            validation_fd.write(line)
                        else:
                            training_fd.write(line)
                validation_fd.flush()
                training_fd.flush()
                # Fit the model
                training_generator, training_steps_per_epoch = \
                    self.create_generator(training_fd.name, users_per_batch=self.users_per_batch,
                                          shuffle=True, include_prior_orders=True)
                validation_generator, validation_steps_per_epoch = \
                    self.create_generator(validation_fd.name, users_per_batch=self.users_per_batch,
                                          shuffle=False, include_prior_orders=False)

                model = self._build_model()
                model.summary()

                callbacks = [
                    EarlyStopping(monitor='val_loss', patience=10),
                    ModelCheckpoint(self.output().path, verbose=1, save_best_only=True),
                    TensorBoard(log_dir=os.path.join(OUTPUT_DIR, 'tensorboard', self.model_name), write_graph=False),
                ]
                model.fit_generator(training_generator, training_steps_per_epoch,
                                    validation_data=validation_generator,
                                    validation_steps=validation_steps_per_epoch,
                                    epochs=1000, verbose=1, callbacks=callbacks)
Beispiel #23
0
def edge_grab(
    two_qubit_gate_prob: float,
    connectivity_graph: nx.Graph,
    random_state: random.RandomState,
) -> nx.Graph:
    """Returns a set of edges for which two qubit gates
    are to be applied given a two qubit gate density
    and the connectivity graph that must be satisfied.

    Args:
        two_qubit_gate_prob: Probability of an edge being chosen
            from the set of candidate edges.
        connectivity_graph: The connectivity graph for the backend
            on which the circuit will be run.
        random_state: Random state to select edges (uniformly at random).
    """
    connectivity_graph = connectivity_graph.copy()
    candidate_edges = nx.Graph()

    final_edges = nx.Graph()
    final_edges.add_nodes_from(connectivity_graph)

    while connectivity_graph.edges:
        num = random_state.randint(connectivity_graph.size())
        edges = list(connectivity_graph.edges)
        curr_edge = edges[num]
        candidate_edges.add_edge(*curr_edge)
        connectivity_graph.remove_nodes_from(curr_edge)

    for edge in candidate_edges.edges:
        if random_state.uniform(0.0, 1.0) < two_qubit_gate_prob:
            final_edges.add_edge(*edge)
    return final_edges
def generate_rand_obs(nExp, nObs, domainBounds, sigvar=None, rng=None):
    # create random sets of observations for each experiment
    if not sigvar: sigvar = 1.
    if not rng: rng = RandomState()

    domainBounds = npa(domainBounds)
    dimX = domainBounds.shape[0]
    minX = domainBounds[:, 0]
    maxX = domainBounds[:, 1]
    rangeX = maxX - minX
    xObs = rng.uniform(size=(nExp, nObs, dimX))
    xObs *= rangeX
    xObs += minX
    yObs = empty(shape=(nExp, nObs))
    for iexp in xrange(nExp):
        good = False
        while not good:
            yObs0 = rng.normal(size=(nObs))
            if yObs0.max() > 0: good = True
        yObs[iexp, :] = yObs0
    # yObs = rng.normal(size=(nExp, nObs, 1))
    yObs *= sigvar

    return {'x': xObs,
            'y': yObs}
Beispiel #25
0
def update_metropolis(field: np.ndarray, states: States, free_energy: float, interaction: Interaction,
                      interaction_coefficient: float, magnetization_coefficient: float, temperature: float,
                      random_state: RandomState) -> (np.ndarray, float):
    assert states
    assert field.shape[0] == field.shape[1]

    size = field.shape[0]
    min_x = 0 if FIX_LEFT is None else 1
    max_x = size if FIX_RIGHT is None else size - 1
    min_y = 0 if FIX_BOTTOM is None else 1
    max_y = size if FIX_TOP is None else size - 1
    random_x = random_state.randint(min_x, max_x)  # dim
    random_y = random_state.randint(min_y, max_y)

    new_spin = field[random_x, random_y]
    # spin flip always needs to lead to a change of spin
    while new_spin == field[random_x, random_y]:
        new_spin = random_state.choice(states)

    energy_delta, field_updated = calculate_energy_difference(field, random_x, random_y, new_spin, interaction,
                                                              interaction_coefficient, magnetization_coefficient)
    random_number = random_state.uniform()
    acceptance_probability = np.exp(-1. / temperature * energy_delta)
    print_if_verbose(f'Energy delta: {energy_delta}, random number: {random_number}, '
                     f'acceptance_probability: {acceptance_probability}')
    if energy_delta <= 0 or random_number < acceptance_probability:
        # free_energy_updated = free_energy - energy_delta
        print_if_verbose('Change accepted')
        return field_updated, free_energy - energy_delta
    else:
        print_if_verbose('Not accepted')
        return field, free_energy
Beispiel #26
0
 def _run(self, op):
     rng = RandomState(self.random_seed)
     with self.output().open('w') as output_fd:
         with self.input().open('r') as input_fd:
             for line in input_fd:
                 if op(rng.uniform(), self.test_size):
                     output_fd.write(line)
Beispiel #27
0
def rws_test():
    size = 10000
    selection = 1000
    random_state = RandomState()
    probs = random_state.uniform(size=size)
    probs /= sum(probs)

    random_state.seed(5)

    def standard_method():
        t.tic()
        result = []
        cum_probs = np.cumsum(probs)
        for _ in range(selection):
            r = random_state.random()
            for i in range(size):
                if r <= cum_probs[i]:
                    result.append(i)
                    break
        return result

    def numpy_method():
        return random_state.choice(size, size=selection, replace=True, p=probs)

    t = TicToc()
    t.tic()
    result_standard_method = standard_method()
    elp_std = t.tocvalue(restart=True)
    result_numpy_method = numpy_method()
    elp_np = t.tocvalue()
    print('standard: {}'.format(elp_std))
    print('numpy: {}'.format(elp_np))
    print(result_numpy_method)
    print(result_standard_method)
Beispiel #28
0
class RandomGenerator(object):
    def __init__(self, seed=None):
        self._random = RandomState(seed=seed)

    def seed(self, seed):
        self._random = RandomState(seed=seed)

    def random(self):
        return self._random.rand()

    def randint(self, a, b=None):
        if b is None:
            b = a
            a = 0
        r = self._random.randint(a, high=b, size=1)
        return r[0]

    def sample(self, population, k):
        if k == 0:
            return []
        return list(self._random.choice(population, size=k, replace=False))

    def __getattr__(self, attr):
        return getattr(self._random, attr)

    def __getstate__(self):
        return {'_random': self._random}

    def __setstate__(self, d):
        self._random = d['_random']

    def uniform(self, low=0.0, high=1.0, size=None):
        return self._random.uniform(low, high, size)
Beispiel #29
0
    def __call__(self, shape, dtype=None):

        if self.nb_filters is not None:
            kernel_shape = shape
            # kernel_shape = tuple(self.kernel_size) + (int(self.input_dim),
            #                      self.nb_filters)
        else:
            kernel_shape = (int(self.input_dim), self.kernel_size[-1])

        fan_in, fan_out = _compute_fans(
            # tuple(self.kernel_size) + (self.input_dim, self.nb_filters)
            kernel_shape
        )

        # fix for ValueError: The initial value's shape (...) is not compatible with the explicitly supplied `shape` argument
        reim_shape = list(kernel_shape)
        reim_shape[-1] //= 2
        reim_shape = tuple(reim_shape)

        if self.criterion == 'glorot':
            s = 1. / (fan_in + fan_out)
        elif self.criterion == 'he':
            s = 1. / fan_in
        else:
            raise ValueError('Invalid criterion: ' + self.criterion)
        rng = RandomState(self.seed)
        modulus = rng.rayleigh(scale=s, size=reim_shape)
        phase = rng.uniform(low=-np.pi, high=np.pi, size=reim_shape)
        weight_real = modulus * np.cos(phase)
        weight_imag = modulus * np.sin(phase)
        weight = np.concatenate([weight_real, weight_imag], axis=-1)

        return weight
Beispiel #30
0
    def __call__(self, shape, dtype=None):

        if self.nb_filters is not None:
            kernel_shape = tuple(self.kernel_size) + (int(self.input_dim), self.nb_filters)
        else:
            kernel_shape = (int(self.input_dim), self.kernel_size[-1])

        fan_in, fan_out = initializers._compute_fans(
            tuple(self.kernel_size) + (self.input_dim, self.nb_filters)
        )

        if self.criterion == 'glorot':
            s = 1. / (fan_in + fan_out)
        elif self.criterion == 'he':
            s = 1. / fan_in
        else:
            raise ValueError('Invalid criterion: ' + self.criterion)
        rng = RandomState(self.seed)
        modulus = rng.rayleigh(scale=s, size=kernel_shape)
        phase = rng.uniform(low=-np.pi, high=np.pi, size=kernel_shape)
        weight_real = modulus * np.cos(phase)
        weight_imag = modulus * np.sin(phase)
        weight = np.concatenate([weight_real, weight_imag], axis=-1)

        return weight
Beispiel #31
0
def test_minp_one_pvalue():
    prng = RandomState(55)
    pvalues = np.array([1])
    distr = prng.uniform(low=0, high=10, size=20).reshape(20, 1)
    npc(pvalues, distr, "fisher", "greater")

# TODO: more fwer_minp tests
Beispiel #32
0
 def convolution(self):
     #initialize my conv kernel
     self.kernels_real = []
     self.kernels_imag = []
     for i, filter_size in enumerate(self.filter_sizes):
         with tf.name_scope('conv-pool-%s' % filter_size):
             filter_shape = [filter_size, filter_size, 1, self.num_filters]
             input_dim = 2
             fan_in = np.prod(filter_shape[:-1])
             fan_out = (filter_shape[-1] * np.prod(filter_shape[:2]))
             s = 1. / fan_in
             rng = RandomState(23455)
             modulus = rng.rayleigh(scale=s, size=filter_shape)
             phase = rng.uniform(low=-np.pi, high=np.pi, size=filter_shape)
             W_real = modulus * np.cos(phase)
             W_imag = modulus * np.sin(phase)
             W_real = tf.Variable(W_real, dtype='float32')
             W_imag = tf.Variable(W_imag, dtype='float32')
             self.kernels_real.append(W_real)
             self.kernels_imag.append(W_imag)
             # self.para.append(W_real)
             # self.para.append(W_imag)
     self.num_filters_total = self.num_filters * len(self.filter_sizes)
     self.qa_real = self.narrow_convolution(
         tf.expand_dims(self.M_qa_real, -1),
         self.kernels_real) - self.narrow_convolution(
             tf.expand_dims(self.M_qa_imag, -1), self.kernels_imag)
     print(self.qa_real)
     self.qa_imag = self.narrow_convolution(
         tf.expand_dims(self.M_qa_imag, -1),
         self.kernels_real) + self.narrow_convolution(
             tf.expand_dims(self.M_qa_real, -1), self.kernels_imag)
     print(self.qa_imag)
Beispiel #33
0
def test_minp_one_pvalue():
    prng = RandomState(55)
    pvalues = np.array([1])
    distr = prng.uniform(low=0, high=10, size=20).reshape(20, 1)
    npc(pvalues, distr, "fisher", "greater")


# TODO: more fwer_minp tests
Beispiel #34
0
def test_npc_callable_combine():
    prng = RandomState(55)
    pvalues = np.linspace(0.05, 0.9, num=5)
    distr = prng.uniform(low=0, high=10, size=500).reshape(100, 5)
    size = np.array([2, 4, 6, 4, 2])
    combine = lambda p: inverse_n_weight(p, size)
    res = npc(pvalues, distr, combine, "greater")
    np.testing.assert_equal(res, 0.39)
Beispiel #35
0
def test_npc_callable_combine():
    prng = RandomState(55)
    pvalues = np.linspace(0.05, 0.9, num=5)
    distr = prng.uniform(low=0, high=10, size=500).reshape(100, 5)
    size = np.array([2, 4, 6, 4, 2])
    combine = lambda p: inverse_n_weight(p, size)
    res = npc(pvalues, distr, combine, "greater", plus1=False)
    np.testing.assert_equal(res, 0.39)
Beispiel #36
0
def test_pixel_to_cel():

    prng = RandomState(24)

    n_evt = 100000

    sky_center = YTArray([30.0, 45.0], "deg")

    rr = YTQuantity(100.0, "kpc")*prng.uniform(size=n_evt)
    theta = 2.0*np.pi*prng.uniform(size=n_evt)
    xx = rr*np.cos(theta)
    yy = rr*np.sin(theta)

    D_A = YTQuantity(100.0, "Mpc")

    d_a = D_A.to("kpc").v

    xx = xx.d / d_a
    yy = yy.d / d_a

    xsky1 = xx.copy()
    ysky1 = yy.copy()

    pixel_to_cel(xsky1, ysky1, sky_center.d)

    xx = np.rad2deg(xx) * 3600.0  # to arcsec
    yy = np.rad2deg(yy) * 3600.0  # to arcsec

    # We set a dummy pixel size of 1 arcsec just to compute a WCS
    dtheta = 1.0 / 3600.0

    wcs = pywcs.WCS(naxis=2)
    wcs.wcs.crpix = [0.0, 0.0]
    wcs.wcs.crval = list(sky_center)
    wcs.wcs.cdelt = [-dtheta, dtheta]
    wcs.wcs.ctype = ["RA---TAN", "DEC--TAN"]
    wcs.wcs.cunit = ["deg"]*2

    xsky2, ysky2 = wcs.wcs_pix2world(xx, yy, 1)

    assert_allclose(xsky1, xsky2)
    assert_allclose(ysky1, ysky2)
Beispiel #37
0
class ParticleBetaModelSource(object):
    def __init__(self):

        self.prng = RandomState(35)
        self.kT = kT
        self.Z = Z

        num_particles = 1000000

        rr = np.linspace(0.0, R, 10000)
        # This formula assumes beta = 2/3
        M_r = 4*np.pi*rho_c*r_c*r_c*(rr-r_c*np.arctan(rr/r_c))
        M_r *= cm_per_mpc**3

        pmass = M_r[-1]*np.ones(num_particles)/num_particles
        M_r /= M_r[-1]
        u = self.prng.uniform(size=num_particles)

        radius = np.interp(u, M_r, rr, left=0.0, right=1.0)
        dens = rho_c*(1.+(radius/r_c)**2)**(-1.5*beta)
        radius /= (2.*R)
        theta = np.arccos(self.prng.uniform(low=-1.,high=1.,size=num_particles))
        phi = 2.*np.pi*self.prng.uniform(size=num_particles)

        temp = self.kT*K_per_keV*np.ones(num_particles)
        velz = self.prng.normal(loc=v_shift,scale=v_width,size=num_particles)

        bbox = np.array([[-0.5,0.5],[-0.5,0.5],[-0.5,0.5]])

        data = {}
        data["io", "density"] = (dens, "g/cm**3")
        data["io", "temperature"] = (temp, "K")
        data["io", "particle_position_x"] = (radius*np.sin(theta)*np.cos(phi), "code_length")
        data["io", "particle_position_y"] = (radius*np.sin(theta)*np.sin(phi), "code_length")
        data["io", "particle_position_z"] = (radius*np.cos(theta), "code_length")
        data["io", "particle_velocity_x"] = (np.zeros(num_particles), "cm/s")
        data["io", "particle_velocity_y"] = (np.zeros(num_particles), "cm/s")
        data["io", "particle_velocity_z"] = (velz, "cm/s")
        data["io", "particle_mass"] = (pmass, "g")

        self.ds = load_particles(data, length_unit=(2*R, "Mpc"), bbox=bbox)
Beispiel #38
0
    def _initializeWeights(self, size, fanIn, fanOut, randomNumGen) :
        '''Initialize the weights according to the activation type selected.

           Distributions :
           sigmoid : (-sqrt(6/(fanIn+fanOut))*4, sqrt(6/(fanIn+fanOut))*4)
           tanh    : (-sqrt(6/(fanIn+fanOut)), sqrt(6/(fanIn+fanOut)))
           relu    : (-rand()*sqrt(2/fanIn), rand()*sqrt(2/fanIn))

           size         : Shape of the weight buffer
           fanIn        : Number of neurons in the previous layer
           fanOut       : Number of neurons in the this layer
           randomNumGen : generator for the initial weight values - type is 
                          numpy.random.RandomState
        '''
        import numpy as np
        import theano.tensor as t
        from theano import shared, config

        # create a rng if its needed
        if randomNumGen is None :
           from numpy.random import RandomState
           from time import time
           randomNumGen = RandomState(int(time()))

        if self._activation == t.nnet.relu :
            scaleFactor = np.sqrt(2. / fanIn)
            initialWeights = np.resize(np.asarray(
                randomNumGen.randn(np.prod(np.array(size))) * scaleFactor, 
                dtype=config.floatX), size)

        elif self._activation == t.nnet.sigmoid or \
             self._activation == t.tanh or \
             self._activation == None :

            scaleFactor = np.sqrt(6. / (fanIn + fanOut))

            # re-adjust for sigmoid
            if self._activation == t.nnet.sigmoid :
                scaleFactor *= 4.

            initialWeights = np.asarray(randomNumGen.uniform(
                low=-scaleFactor, high=scaleFactor, size=size),
                dtype=config.floatX)

        else :
            raise ValueError('Unsupported activation encountered. Add weight-'\
                             'initialization support for this activation type')

        # load the weights into shared variables
        self._weights = shared(value=initialWeights, borrow=True)

        initialThresholds = np.zeros((fanOut,), dtype=config.floatX)
        self._thresholds = shared(value=initialThresholds, borrow=True)
def add_timecourse(topo_view,y, fixed_points=8, change_magnitude=0.5):
    # topo view shd be bc01
    rng = RandomState(np.uint64(hash('decreasetimecourse')))
    trials = topo_view.shape[0]
    samples = topo_view.shape[2]
    fixed_points_x = np.linspace(0,samples,fixed_points)
    fixed_points_y = [rng.uniform(1-change_magnitude,1+change_magnitude,
                                  fixed_points) for _ in xrange(trials)]
    interp_fun = interp1d(fixed_points_x, fixed_points_y, 'linear')
    timecourses = interp_fun(np.arange(600)) # now has shape trialsxsamples
    timecourses = timecourses[:,np.newaxis,:,np.newaxis]
    
    topo_view = topo_view * timecourses
    return topo_view
Beispiel #40
0
 def dowork(self, work_index):
     '''
     This can return anything, but note that it will be binary serialized (pickleable), and you don't want to have more than is required there for reduce
     '''
     import scipy as sp
     from numpy.random import RandomState
     # seed the global random number generator with work_index xor'd with an arbitrary constant
     randomstate = RandomState(work_index ^ 284882)
     sum = 0.0
     for i in xrange(self.dart_count):
         x = randomstate.uniform(2)
         y = randomstate.uniform(2)
         is_in_circle = sp.sqrt((x-1)**2+(y-1)**2) < 1
         if is_in_circle:
             sum += 1
     fraction_in_circle = sum / self.dart_count
     return fraction_in_circle
Beispiel #41
0
def test_npc():
    prng = RandomState(55)
    pvalues = np.linspace(0.05, 0.9, num=5)
    distr = prng.uniform(low=0, high=10, size=500).reshape(100, 5)
    res = npc(pvalues, distr, "fisher", "greater")
    np.testing.assert_almost_equal(res, 0.33)
    res = npc(pvalues, distr, "fisher", "less")
    np.testing.assert_almost_equal(res, 0.33)
    res = npc(pvalues, distr, "fisher", "two-sided")
    np.testing.assert_almost_equal(res, 0.31)
    res = npc(pvalues, distr, "liptak", "greater")
    np.testing.assert_almost_equal(res, 0.35)
    res = npc(pvalues, distr, "tippett", "greater")
    np.testing.assert_almost_equal(res, 0.25)
    res = npc(pvalues, distr, "fisher",
              alternatives=np.array(["less", "greater", "less",
                                     "greater", "two-sided"]))
    np.testing.assert_almost_equal(res, 0.38)
    def perform_optimization(self, training_strategy):

        OutputLog().write('----------------------------------------------------------')
        OutputLog().write('layer_sizes correlations cca_correlations time')

        hyper_parameters = self.hyper_parameters.copy()
        random_rng = RandomState()
        best_correlation = 0


        for layer_number in xrange(int(self.layer_number_start), int(self.layer_number_end + 1)):

            for iteration in xrange(int(self.rounds_number)):

                hyper_parameters.layer_sizes = random_rng.uniform(self.start_value,
                                                                  self.end_value,
                                                                  layer_number)

                hyper_parameters.layer_sizes = [int(round(layer_size)) for layer_size in hyper_parameters.layer_sizes]

                if self.symmetric_layers:
                    for i in xrange(layer_number):
                        hyper_parameters.layer_sizes[(layer_number - 1) - i] = hyper_parameters.layer_sizes[i]

                correlation, execution_time = self.train(training_strategy=training_strategy, hyper_parameters=hyper_parameters)

                if correlation > best_correlation:

                    best_correlation = correlation
                    self.hyper_parameters.layer_sizes = hyper_parameters.layer_sizes

                OutputLog().write('%s, %f, %f\n' % (print_list(hyper_parameters.layer_sizes),
                                                    correlation,
                                                    execution_time))


        OutputLog().write('----------------------------------------------------------')

        return True
Beispiel #43
0
def generateDegradation(args, seed):
    from numpy.random import RandomState
    from numpy.linalg import norm

    rs = RandomState(seed)

    if args.D == 2:
        rotation = (rs.uniform(*args.rotate),)
    if args.D == 3:
        angle = rs.uniform(*args.rotate)
        axis = rs.uniform(size=3)
        axis = axis/norm(axis)
        rotation = angle, axis
    translation = rs.uniform(*args.translate, size=args.D)
    scale = rs.uniform(*args.scale)
    if args.drop[0] == args.drop[1]:
        ndrops = args.drop[0]
    else:
        ndrops = rs.randint(*sorted(args.drop))
    drops = rs.choice(range(args.N), size=ndrops, replace=False)
    duplications = rs.choice(range(args.duplicate[0], args.duplicate[1] + 1), size=args.N - ndrops)
    noise = rs.uniform(*args.noise) * rs.randn(sum(duplications), args.D)

    return rotation, translation, scale, drops, duplications, noise
        os.makedirs(directory)

SAVEDIR = SAVEDIR + RUNID + '/'
WDIR = SAVEDIR + 'W/'
mkdir(SAVEDIR)
mkdir(WDIR)
PRINTTO = SAVEDIR + 'generated_output.txt'


SEQLEN = rng.randint(20, 150)
SHUF_DATA = rng.randint(2)
NLAYER = rng.randint(1, 4)
NHID = [rng.randint(50, 500) for _ in xrange(NLAYER)]
PDROP = [rng.rand() for _ in xrange(NLAYER)]
BATCHSIZE = rng.randint(50, 200)
LRINIT = rng.uniform(0.001, 0.1)
RNGSEED = rng.randint(4525348)

rng = RandomState(RNGSEED)

runparams = {'SEQLEN': SEQLEN,
             'SHUF_DATA': SHUF_DATA,
             'NLAYER': NLAYER,
             'NHID': NHID,
             'PDROP': PDROP,
             'BATCHSIZE': BATCHSIZE,
             'LRINIT': LRINIT,
             'RNGSEED': RNGSEED}

pickle(runparams, SAVEDIR+'runparams.pkl')
Beispiel #45
0
    def __init__ (self, layerID, input, inputSize, kernelSize, 
                  downsampleFactor, learningRate=0.001, momentumRate=0.9,
                  dropout=None, initialWeights=None, initialThresholds=None,
                  activation=tanh, randomNumGen=None) :
        Layer.__init__(self, layerID, learningRate, momentumRate, dropout)

        # TODO: this check is likely unnecessary
        if inputSize[2] == kernelSize[2] or inputSize[3] == kernelSize[3] :
            raise ValueError('ConvolutionalLayer Error: ' +
                             'inputSize cannot equal kernelSize')
        if inputSize[1] != kernelSize[1] :
            raise ValueError('ConvolutionalLayer Error: ' +
                             'Number of Channels must match in ' +
                             'inputSize and kernelSize')
        from theano.tensor.nnet.conv import conv2d
        from theano.tensor.signal.downsample import max_pool_2d

        # theano variables don't actually preserve buffer sizing
        self.input = input if isinstance(input, tuple) else (input, input)

        self._inputSize = inputSize
        self._kernelSize = kernelSize
        self._downsampleFactor = downsampleFactor

        # setup initial values for the weights -- if necessary
        if initialWeights is None :
            # create a rng if its needed
            if randomNumGen is None :
                from numpy.random import RandomState
                from time import time
                randomNumGen = RandomState(int(time()))

            # this creates optimal initial weights by randomizing them
            # to an appropriate range around zero, which leads to better
            # convergence.
            downRate = np.prod(self._downsampleFactor)
            fanIn = np.prod(self._kernelSize[1:])
            fanOut = self._kernelSize[0] * \
                     np.prod(self._kernelSize[2:]) / downRate
            scaleFactor = np.sqrt(6. / (fanIn + fanOut))
            initialWeights = np.asarray(randomNumGen.uniform(
                    low=-scaleFactor, high=scaleFactor, size=self._kernelSize),
                    dtype=config.floatX)
        self._weights = shared(value=initialWeights, borrow=True)

        # setup initial values for the thresholds -- if necessary
        if initialThresholds is None :
            initialThresholds = np.zeros((self._kernelSize[0],),
                                         dtype=config.floatX)
        self._thresholds = shared(value=initialThresholds, borrow=True)

        def findLogits(input, weights, 
                       inputSize, kernelSize, downsampleFactor, thresholds) :
            # create a function to perform the convolution
            convolve = conv2d(input, weights, inputSize, kernelSize)

            # create a function to perform the max pooling
            pooling = max_pool_2d(convolve, downsampleFactor, True)

            # the output buffer is now connected to a sequence of operations
            return pooling + thresholds.dimshuffle('x', 0, 'x', 'x')

        outClass = findLogits(self.input[0], self._weights,
                              self._inputSize, self._kernelSize,
                              self._downsampleFactor, self._thresholds)
        outTrain = findLogits(self.input[1], self._weights,
                              self._inputSize, self._kernelSize,
                              self._downsampleFactor, self._thresholds)

        # determine dropout if requested
        if self._dropout is not None :
            # here there are two possible paths --
            # outClass : path of execution intended for classification. Here
            #            all neurons are present and weights must be scaled by
            #            the dropout factor. This ensures resultant 
            #            probabilities fall within intended bounds when all
            #            neurons are present.
            # outTrain : path of execution for training with dropout. Here each
            #            neuron's output goes through a Bernoulli Trial. This
            #            retains a neuron with the probability specified by the
            #            dropout factor.
            outClass = outClass / self._dropout
            outTrain = switch(self._randStream.binomial(
                size=self.getOutputSize()[1:], p=self._dropout), outTrain, 0)

        # activate the layer --
        # output is a tuple to represent two possible paths through the
        # computation graph. 
        self.output = (outClass, outTrain) if activation is None else \
                      (activation(outClass), activation(outTrain))

        # we can call this method to activate the layer
        self.activate = function([self.input[0]], self.output[0])
Beispiel #46
0
    def __init__ (self, layerID, input, inputSize, numNeurons,
                  learningRate=0.001, momentumRate=0.9, dropout=None,
                  initialWeights=None, initialThresholds=None, activation=tanh,
                  randomNumGen=None) :
        Layer.__init__(self, layerID, learningRate, momentumRate, dropout)

        # adjust the input for the correct number of dimensions        
        if isinstance(input, tuple) :
            if input[1].ndim > 2 : input = input[0].flatten(2), \
                                           input[1].flatten(2)
        else :
            if input.ndim > 2 : input = input.flatten(2)

        # store the input buffer -- this can either be a tuple or scalar
        # The input layer will only have a scalar so its duplicated here
        self.input = input if isinstance(input, tuple) else (input, input)
        self._inputSize = inputSize
        if isinstance(self._inputSize, six.integer_types) or \
           len(self._inputSize) is not 2 :
            self._inputSize = (1, inputSize)
        self._numNeurons = numNeurons

        # setup initial values for the weights
        if initialWeights is None :
            # create a rng if its needed
            if randomNumGen is None :
               from numpy.random import RandomState
               from time import time
               randomNumGen = RandomState(int(time()))

            initialWeights = np.asarray(randomNumGen.uniform(
                low=-np.sqrt(6. / (self._inputSize[1] + self._numNeurons)),
                high=np.sqrt(6. / (self._inputSize[1] + self._numNeurons)),
                size=(self._inputSize[1], self._numNeurons)),
                dtype=config.floatX)
            if activation == sigmoid :
                initialWeights *= 4.
        self._weights = shared(value=initialWeights, borrow=True)

        # setup initial values for the thresholds
        if initialThresholds is None :
            initialThresholds = np.zeros((self._numNeurons,),
                                         dtype=config.floatX)
        self._thresholds = shared(value=initialThresholds, borrow=True)

        # create the logits
        def findLogit(input, weights, thresholds) :
            return dot(input, weights) + thresholds
        outClass = findLogit(self.input[0], self._weights, self._thresholds)
        outTrain = findLogit(self.input[1], self._weights, self._thresholds)

        # determine dropout if requested
        if self._dropout is not None :
            # here there are two possible paths --
            # outClass : path of execution intended for classification. Here
            #            all neurons are present and weights must be scaled by
            #            the dropout factor. This ensures resultant 
            #            probabilities fall within intended bounds when all
            #            neurons are present.
            # outTrain : path of execution for training with dropout. Here each
            #            neuron's output goes through a Bernoulli Trial. This
            #            retains a neuron with the probability specified by the
            #            dropout factor.
            outClass = outClass / self._dropout
            outTrain = switch(self._randStream.binomial(
                size=(self._numNeurons,), p=self._dropout), outTrain, 0)

        # activate the layer --
        # output is a tuple to represent two possible paths through the
        # computation graph. 
        self.output = (outClass, outTrain) if activation is None else \
                      (activation(outClass), activation(outTrain))

        # create a convenience function
        self.activate = function([self.input[0]], self.output[0])
Beispiel #47
0
def test_npc_bad_distr():
    prng = RandomState(55)
    pvalues = np.linspace(0.05, 0.9, num=5)
    distr = prng.uniform(low=0, high=10, size=20).reshape(10, 2)
    npc(pvalues, distr, "fisher", "greater")
Beispiel #48
0
def test_npc_bad_alternative():
    prng = RandomState(55)
    pvalues = np.linspace(0.05, 0.9, num=5)
    distr = prng.uniform(low=0, high=10, size=50).reshape(10, 5)
    npc(pvalues, distr, "fisher", np.array(["greater", "less"]))
from numpy.random import RandomState
import matplotlib.pyplot as plt
random_generator = RandomState(seed=55)
r = random_generator.uniform(0., 1., size=2000000)
Beispiel #50
0
def test_mono_checker_in_npc():
    prng = RandomState(55)
    pvalues = np.linspace(0.05, 0.9, num=5)
    distr = prng.uniform(low=0, high=10, size=500).reshape(100, 5)
    bad_comb_function = lambda p: -1*fisher(p)
    npc(pvalues, distr, bad_comb_function)
Beispiel #51
0
def randomu(seed, di=None, binomial=None, double=False, gamma=False,
            normal=False, poisson=False):
    """
    Replicates the randomu function avaiable within IDL
    (Interactive Data Language, EXELISvis).
    Returns an array of uniformly distributed random numbers of the
    specified dimensions.
    The randomu function returns one or more pseudo-random numbers
    with one or more of the following distributions:
    Uniform (default)
    Gaussian
    binomial
    gamma
    poisson

    :param seed:
        If seed is not of type mtrand.RandomState, then a new state is
        initialised. Othersise seed will be used to generate the random
        values.

    :param di:
        A list specifying the dimensions of the resulting array. If di
        is a scalar then randomu returns a scalar.
        Dimensions are D1, D2, D3...D8 (x,y,z,lambda...).
        The list will be inverted to suit Python's inverted dimensions
        i.e. (D3,D2,D1).

    :param binomial:
        Set this keyword to a list of length 2, [n,p], to generate
        random deviates from a binomial distribution. If an event
        occurs with probablility p, with n trials, then the number of
        times it occurs has a binomial distribution.

    :param double:
        If set to True, then randomu will return a double precision
        random numbers.

    :param gamma:
        Set this keyword to an integer order i > 0 to generate random
        deviates from a gamm distribution.

    :param Long:
        If set to True, then randomu will return integer uniform
        random deviates in the range [0...2^31-1], using the Mersenne
        Twister algorithm. All other keywords will be ignored.

    :param normal:
        If set to True, then random deviates will be generated from a
        normal distribution.

    :param poisson:
        Set this keyword to the mean number of events occurring during
        a unit of time. The poisson keword returns a random deviate
        drawn from a poisson distribution with that mean.

    :param ULong:
        If set to True, then randomu will return unsigned integer
        uniform deviates in the range [0..2^32-1], using the Mersenne
        Twister algorithm. All other keywords will be ignored.

    :return:
        A NumPy array of uniformly distributed random numbers of the
        specified dimensions.

    Example:
        >>> seed = None
        >>> x, sd = randomu(seed, [10,10])
        >>> x, sd = randomu(seed, [100,100], binomial=[10,0.5])
        >>> x, sd = randomu(seed, [100,100], gamma=2)
        >>> # 200x by 100y array of normally distributed values
        >>> x, sd = randomu(seed, [200,100], normal=True)
        >>> # 1000 deviates from a poisson distribution with a mean of 1.5
        >>> x, sd = randomu(seed, [1000], poisson=1.5)
        >>> # Return a scalar from a uniform distribution
        >>> x, sd = randomu(seed)

    :author:
        Josh Sixsmith, [email protected], [email protected]

    :copyright:
        Copyright (c) 2014, Josh Sixsmith
        All rights reserved.

        Redistribution and use in source and binary forms, with or without
        modification, are permitted provided that the following conditions are met:

        1. Redistributions of source code must retain the above copyright notice, this
           list of conditions and the following disclaimer.
        2. Redistributions in binary form must reproduce the above copyright notice,
           this list of conditions and the following disclaimer in the documentation
           and/or other materials provided with the distribution.

        THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
        ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
        WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
        DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
        ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
        (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
        LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
        ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
        (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
        SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

        The views and conclusions contained in the software and documentation are those
        of the authors and should not be interpreted as representing official policies,
        either expressed or implied, of the FreeBSD Project.
    """

    # Initialise the data type
    if double:
        dtype = 'float64'
    else:
        dtype = 'float32'

    # Check the seed
    # http://stackoverflow.com/questions/5836335/consistenly-create-same-random-numpy-array
    if type(seed) != mtrand.RandomState:
        seed = RandomState()

    if di is not None:
        if type(di) is not list:
            raise TypeError("Dimensions must be a list or None.")
        if len(di) > 8:
            raise ValueError("Error. More than 8 dimensions specified.")
        # Invert the dimensions list
        dims = di[::-1]
    else:
        dims = 1

    # Python has issues with overflow:
    # OverflowError: Python int too large to convert to C long
    # Occurs with Long and ULong
    #if Long:
    #    res = seed.random_integers(0, 2**31-1, dims)
    #    if di is None:
    #        res = res[0]
    #    return res, seed

    #if ULong:
    #    res = seed.random_integers(0, 2**32-1, dims)
    #    if di is None:
    #        res = res[0]
    #    return res, seed

    # Check for other keywords
    distributions = 0
    kwds = [binomial, gamma, normal, poisson]
    for kwd in kwds:
        if kwd:
            distributions += 1

    if distributions > 1:
        print("Conflicting keywords.")
        return

    if binomial:
        if len(binomial) != 2:
            msg = "Error. binomial must contain [n,p] trials & probability."
            raise ValueError(msg)

        n = binomial[0]
        p = binomial[1]

        res = seed.binomial(n, p, dims)

    elif gamma:
        res = seed.gamma(gamma, size=dims)

    elif normal:
        res = seed.normal(0, 1, dims)

    elif poisson:
        res = seed.poisson(poisson, dims)

    else:
        res = seed.uniform(size=dims)

    res = res.astype(dtype)

    if di is None:
        res = res[0]

    return res, seed