Exemplo n.º 1
0
def randomRotation():
    """
    Get random rotation matrix.

    Written by Michael Habeck.

    :return: 3 x 3 array of float
    :rtype: array
    """
    alpha = R.random_sample() * 2 * N0.pi
    gamma = R.random_sample() * 2 * N0.pi
    beta  = N0.arccos(2*(R.random_sample() - 0.5))

    return eulerRotation(alpha, beta, gamma)
Exemplo n.º 2
0
def randomRotation():
    """
    Get random rotation matrix.

    Written by Michael Habeck.

    :return: 3 x 3 array of float
    :rtype: array
    """
    alpha = R.random_sample() * 2 * N0.pi
    gamma = R.random_sample() * 2 * N0.pi
    beta = N0.arccos(2 * (R.random_sample() - 0.5))

    return eulerRotation(alpha, beta, gamma)
Exemplo n.º 3
0
def test_change(dset_key, conn, fun):
    """Takes in a data set key and computes g(r) with different amounts of noise added
    to the positions returns a list of the g(r)"""
    max_rng = 100
    nbins = 1000
    buff = 1
    cull_rate = 0.5

    (fname, comp_num) = conn.execute(
        "select fout,comp_key from comps where dset_key =?" + " and function = 'Iden'", (dset_key,)
    ).fetchone()
    F = h5py.File(fname, "r")
    frame = 10

    x = F["/frame%(#)06d" % {"#": frame} + "/x_%(#)07d" % {"#": comp_num}]
    y = F["/frame%(#)06d" % {"#": frame} + "/y_%(#)07d" % {"#": comp_num}]

    parts = [a for a in itertools.izip(x, y) if nrm.random_sample() < cull_rate]

    steps = 5

    gs = []

    for j in range(0, steps):
        hc = sc.hash_case((np.ceil(np.max(x)), np.ceil(np.max(y))), max_rng)
        gofr = sc.gofr_comp(nbins, max_rng)
        for p in parts:
            hc.add_particle(fun(p, j))
            pass
        hc.compute_corr(buff, gofr.add_particle)

        gs.append(cord_pairs(gofr.bin_edges, gofr.vals))

    return gs
Exemplo n.º 4
0
 def generate(self):
     matrix = 8 * random_sample((3, 8)) - 4
     for i in range(3):
         for j in range(8):
             if i + 6 == j:
                 matrix[i][j] = 0
     return matrix
Exemplo n.º 5
0
def random_Butter(n=(5, 10),
                  Wc=(0.1, 0.8),
                  W1=(0.1, 0.5),
                  W2=(0.5, 0.8),
                  form=None,
                  onlyEven=True,
                  seed=None):
    """
	Generate a n-th order Butterworh filters
	Parameters
		----------
		- n: (int) The order of the filter
		- Wc: used if btype is 'lowpass' or 'highpass'
			Wc is a tuple (min,max) for the cut frequency
		- W1 and W2: used if btype is ‘bandpass’, ‘bandstop’
			W1 and W2 are tuple (min,max) for the two start/stop frequencies
		- form: (string) {None, ‘lowpass’, ‘highpass’, ‘bandpass’, ‘bandstop’}. Gives the type of filter. If None, the type is randomized
		- onlyEven: if True, only even order filter are generated
		- seed: if not None, indicates the seed toi use for the random part (in order to be reproductible, the seed is stored in the name of the filter)
	"""
    # change the seed if asked
    if seed:
        numpy_seed(seed)
    # choose a form if asked
    if form is None:
        form = choice(("lowpass", "highpass", "bandpass", "bandstop"))
    # choose Wn
    if form in ("bandpass", "bandstop"):
        # choose 2 frequencies
        if W2[1] <= W1[0]:
            raise ValueError("iter_random_Butter: W1 should be lower than W2")
        Wn1 = (W1[1] - W1[0]) * random_sample() + W1[0]
        Wn2 = (W2[1] - W2[0]) * random_sample() + W2[0]
        while Wn2 <= Wn1:
            Wn2 = (W2[1] - W2[0]) * random_sample() + W2[0]
        W = [Wn1, Wn2]
    else:
        # choose 1 frequency
        W = (Wc[1] - Wc[0]) * random_sample() + Wc[0]
    # choose order
    order = randint(*n)
    if onlyEven and order % 2 == 0:
        order += 1

    return Butter(order, W, form, name='Butterworth-random-%d' % seed)
Exemplo n.º 6
0
def random_TF(n=(5, 10), Wc=(0.1, 0.8), W1=(0.1, 0.5), W2=(0.5, 0.8)):
    """Generate one n-th order stable butterworth filter ((num, den) of the transfer function)"""
    # choose a form
    form = choice(['lowpass', 'highpass', 'bandpass', 'bandstop'])
    # choose Wn
    if form in ("bandpass", "bandstop"):
        # choose 2 frequencies such that Wn2<=Wn1
        Wn1 = (W1[1] - W1[0]) * random_sample() + W1[0]
        Wn2 = (W2[1] - W2[0]) * random_sample() + W2[0]
        while Wn2 <= Wn1:
            Wn2 = (W2[1] - W2[0]) * random_sample() + W2[0]
        W = [Wn1, Wn2]
    else:
        # choose 1 frequency
        W = (Wc[1] - Wc[0]) * random_sample() + Wc[0]
    # choose order
    order = randint(*n)
    num, den = butter(order, W, form)
    return num, den
Exemplo n.º 7
0
    def test_FuzzyCluster( self):
        """FuzzyCluster test"""
        import gnuplot as G

        x1 = R.random_sample((500,2))
        x2 = R.random_sample((500,2)) + 1
        x3 = R.random_sample((500,2)) + 2

        self.x = N0.concatenate((x1, x2, x3))

        self.fuzzy = FuzzyCluster(self.x, n_cluster=5, weight=1.5)

        self.centers = self.fuzzy.go(1.e-30, n_iterations=50, nstep=10,
                                     verbose=self.local)

        if self.local:
            print "cluster centers are displayed in green"
            G.scatter( self.x, self.centers )

        self.assertEqual( N0.shape(self.centers), (5, 2) )
Exemplo n.º 8
0
    def test_FuzzyCluster( self):
        """FuzzyCluster test"""
        import biskit.gnuplot as G

        x1 = R.random_sample((500,2))
        x2 = R.random_sample((500,2)) + 1
        x3 = R.random_sample((500,2)) + 2

        self.x = N0.concatenate((x1, x2, x3))

        self.fuzzy = FuzzyCluster(self.x, n_cluster=5, weight=1.5)

        self.centers = self.fuzzy.go(1.e-30, n_iterations=50, nstep=10,
                                     verbose=self.local)

        if self.local:
            print("cluster centers are displayed in green")
            G.scatter( self.x, self.centers )

        self.assertEqual( N0.shape(self.centers), (5, 2) )
Exemplo n.º 9
0
def random_Elliptic(n=(5, 10),
                    rp=(10, 50),
                    rs=(10, 50),
                    Wc=(0.1, 0.8),
                    W1=(0.1, 0.5),
                    W2=(0.5, 0.8),
                    form=None,
                    seed=None,
                    quant=None):
    """
	Generate a n-th order Butterworh filters
	Parameters
		----------
		- n: (int) The order of the filter
		- Wc: used if btype is 'lowpass' or 'highpass'
			Wc is a tuple (min,max) for the cut frequency
		- W1 and W2: used if btype is ‘bandpass’, ‘bandstop’
			W1 and W2 are tuple (min,max) for the two start/stop frequencies
		- form: (string) {None, ‘lowpass’, ‘highpass’, ‘bandpass’, ‘bandstop’}. Gives the type of filter. If None, the type is randomized
		- quant: quantized the coefficients with quant bits (None by default -> no quantization)
		- seed: if not None, indicates the seed toi use for the random part (in order to be reproductible, the seed is stored in the name of the filter)
	"""
    # change the seed if asked
    if seed:
        numpy_seed(seed)
    # choose a form if asked
    if form is None:
        form = choice(("lowpass", "highpass", "bandpass", "bandstop"))
    # choose Wn
    if form in ("bandpass", "bandstop"):
        # choose 2 frequencies
        if W2[1] <= W1[0]:
            raise ValueError(
                "iter_random_Elliptic: W1 should be lower than W2")
        Wn1 = (W1[1] - W1[0]) * random_sample() + W1[0]
        Wn2 = (W2[1] - W2[0]) * random_sample() + W2[0]
        while Wn2 <= Wn1:
            Wn2 = (W2[1] - W2[0]) * random_sample() + W2[0]
        W = [Wn1, Wn2]
    else:
        # choose 1 frequency
        W = (Wc[1] - Wc[0]) * random_sample() + Wc[0]
    # choose rp and rs
    rip = (rp[1] - rp[0]) * random_sample() + rp[0]
    rst = (rs[1] - rs[0]) * random_sample() + rs[0]
    # choose order
    order = randint(*n)
    # do we quantified the parameter (to be able to print them exactly, for example)
    if quant:
        rip = quantify(rip, quant)
        rst = quantify(rst, quant)
        if isinstance(W, list):
            W = [quantify(W[0], quant), quantify(W[1], quant)]

    return Elliptic(order,
                    rp=rip,
                    rs=rst,
                    Wn=W,
                    etype=form,
                    name='Elliptic-random-%d' % seed)
Exemplo n.º 10
0
    def __random_translation( self ):
        """
        Random translation on a sphere around 0,0,0 with fixed radius
        The radius is the sum of the (max) radius of receptor and ligand

        @return: translation array 3 x 1 of float
        @rtype: array
        """
        radius = (self.d_max_rec + self.d_max_lig) / 2.0
        xyz = R.random_sample( 3 ) - 0.5

        scale = radius*1.0 / N0.sqrt( N0.sum( xyz**2 ) )

        return scale * xyz
Exemplo n.º 11
0
    def __random_translation( self ):
        """
        Random translation on a sphere around 0,0,0 with fixed radius
        The radius is the sum of the (max) radius of receptor and ligand

        @return: translation array 3 x 1 of float
        @rtype: array
        """
        radius = (self.d_max_rec + self.d_max_lig) / 2.0
        xyz = R.random_sample( 3 ) - 0.5

        scale = radius*1.0 / N0.sqrt( N0.sum( xyz**2 ) )

        return scale * xyz
Exemplo n.º 12
0
    def create_membership_matrix(self):
        """
        Create a random membership matrix.

        @return: random array of shape length of data to
                 cluster times number of clusters
        @rtype: array('f')
        """
        ## default signature has changed oldnumeric->numpy
        if (self.seedx == 0 or self.seedy == 0):
            R.seed()
        else:
            R.seed((self.seedx, self.seedy))

        r = R.random_sample((self.npoints, self.n_cluster))
        return N0.transpose(r / N0.sum(r))
Exemplo n.º 13
0
    def create_membership_matrix(self):
        """
        Create a random membership matrix.

        @return: random array of shape length of data to
                 cluster times number of clusters
        @rtype: array('f')
        """
        ## default signature has changed oldnumeric->numpy
        if (self.seedx==0 or self.seedy==0):  
            R.seed()
        else:
            R.seed((self.seedx, self.seedy))

        r = R.random_sample((self.npoints, self.n_cluster))
        return N0.transpose(r / N0.sum(r))
Exemplo n.º 14
0
    def compute_clusters(self, pXY, qXhatYhat, qXhat, qXxhat, cXY, axis):
        """ Compute the best cluster assignment along a single axis, given all the distributions and clusters on other axes.

        Args:
            pXY: the original probability distribution matrix
            qXhatYhat: the joint distribution over the clusters
            qXhat: the marginal distributions of qXhatYhat
            qXxhat: the distribution conditioned on the clustering in a list
            cXY: current cluster assignments along each dimension
            axis: the axis (dimension) over which clusters are being computed

        Return:
            Best cluster assignment along a single axis as a list
        """
        if not isinstance(pXY, SparseMatrix) or not isinstance(qXhatYhat, SparseMatrix):
            raise Exception("Arguments to compute_clusters not an instance of SparseMatrix.")
        # To assign clusters, we calculate argmin_xhat D(p(Y,Z|x) || q(Y,Z|xhat)),
        # where D(P|Q) = \sum_i P_i log (P_i / Q_i)
        dPQ = np.zeros(shape=(pXY.N[axis], qXhatYhat.N[axis]))
        # iterate though all non-zero elements; here we are making use of the sparsity to reduce computation
        for coords, p_i in pXY.nonzero_elements.iteritems():
            coord_this_axis = coords[axis]
            px = self.pX[axis][coord_this_axis]
            p_i = 1 if px == 0 else p_i / px  # calculate p(y|x) = p(x,y)/p(x), but we should be careful if px == 0
            current_cluster_assignments = [cXY[i][coords[i]] for i in
                                           xrange(self.dim)]  # cluster assignments on each axis
            for xhat in xrange(self.K[axis]):
                current_cluster_assignments[axis] = xhat  # temporarily assign dth dimension to this xhat
                current_qXhatYhat = qXhatYhat.get(tuple(current_cluster_assignments))
                current_qXhat = qXhat[axis][xhat]
                q_i = 1.0
                if current_qXhatYhat == 0 and current_qXhat == 0:
                    q_i = 0  # Here we define 0/0=0
                else:
                    q_i *= current_qXhatYhat / current_qXhat
                    for i in xrange(self.dim):
                        if i == axis: continue
                        q_i *= qXxhat[i][coords[i]]
                if q_i == 0:  # this can definitely happen if cluster joint distribution has zero element
                    dPQ[coord_this_axis, xhat] = INFINITE
                else:
                    dPQ[coord_this_axis, xhat] += p_i * math.log(p_i / q_i)

        # add random jitter to break ties
        dPQ += self.jitter_max * random_sample(dPQ.shape)
        return list(dPQ.argmin(1))  # return the closest cluster assignment under KL-divergence
Exemplo n.º 15
0
    def solve(self):
        self.t = 0
        self.initialize()
        self.evaluate()
        best = np.max(self.fitness)
        newpop1 = self.population
        bestIndex = np.argmax(self.fitness)
        self.best = copy.deepcopy(self.population[bestIndex])
        while (self.t < self.MAXGEN and best != 32):
            self.t += 1
            newpop = []
            arr = []
            fitness_sum = []
            population_sum = []
            for i in range(self.sizepop):
                bestmatrix = self.best.matrix
                bestmatrix1 = 8 * random_sample((3, 8)) - 4
                ind = GAIndividual()
                ind.matrix = bestmatrix1
                newpop.append(ind)
            population_sum = newpop1 + newpop
            for i in range(2 * self.sizepop):
                fitness_sum.append(population_sum[i].calculateFitness())
            fitness_sum = np.array(fitness_sum)
            arr = np.argsort(-fitness_sum)
            for i in range(self.sizepop):
                newpop[i] = population_sum[arr[i]]
            self.population = []
            self.population = newpop
            self.evaluate()
            best = np.max(self.fitness)
            bestIndex = np.argmax(self.fitness)
            best = np.max(self.fitness)
            newpop1 = self.population
            if best == 32:
                break
        matrix2 = self.population[bestIndex].matrix

        for i in range(3):
            for j in range(8):
                print(matrix2[i][j], end=' ')
            print("")
        print(self.iteration)
Exemplo n.º 16
0
    def initialize_cluster_centers(self, pXY, K):
        """ Initializes the cluster assignments along each axis, by first selecting k centers, 
        and then map each row to its closet center under cosine similarity.

        Args:
            pXY: original data matrix
            K: numbers of clusters desired in each dimension

        Return:
            new_C: a list of list of cluster id that the current index in the current axis is assigned to.
        """
        if not isinstance(pXY, SparseMatrix):
            raise Exception("Matrix argument to initialize_cluster_centers is not an instance of SparseMatrix.")
        new_C = [[-1] * Ni for Ni in pXY.N]

        for axis in xrange(len(K)): # loop over each dimension
            # choose cluster centers
            axis_length = pXY.N[axis]
            center_indices = random.sample(xrange(axis_length), K[axis])
            cluster_ids = {}
            for i in xrange(K[axis]):  # assign identifiers to clusters
                center_index = center_indices[i]
                cluster_ids[center_index] = i
            centers = defaultdict(lambda: defaultdict(float))  # all nonzero indices for each center
            for coords in pXY.nonzero_elements:
                coord_this_axis = coords[axis]
                if coord_this_axis in cluster_ids: # is a center
                    reduced_coords = tuple([coords[i] for i in xrange(len(coords)) if i != axis]) # coords without the current axis
                    centers[cluster_ids[coord_this_axis]][reduced_coords] = pXY.nonzero_elements[coords] # (cluster_id, other coords) -> value

            # assign rows to clusters
            scores = np.zeros(shape=(pXY.N[axis], K[axis])) # scores: axis_size x cluster_number
            denoms_P = np.zeros(shape=(pXY.N[axis]))
            denoms_Q = np.zeros(shape=(K[axis]))
            for coords in pXY.nonzero_elements:
                coord_this_axis = coords[axis]
                if coord_this_axis in center_indices:
                    continue  # don't reassign cluster centers, please
                reduced_coords = tuple([coords[i] for i in xrange(len(coords)) if i != axis])
                for cluster_index in cluster_ids:
                    xhat = cluster_ids[cluster_index]  # need cluster ID, not the axis index
                    if reduced_coords in centers[xhat]:  # overlapping point
                        P_i = pXY.nonzero_elements[coords]
                        Q_i = centers[xhat][reduced_coords]
                        scores[coords[axis]][xhat] += P_i * Q_i  # now doing based on cosine similarity
                        denoms_P[coords[axis]] += P_i * P_i  # magnitude of this slice of original matrix
                        denoms_Q[xhat] += Q_i * Q_i  # magnitude of cluster centers

            # normalize scores
            scores = divide(scores, outer(sqrt(denoms_P), sqrt(denoms_Q)))
            scores[scores == 0] = -1.0

            # add random jitter to scores to handle tie-breaking
            scores += self.jitter_max * random_sample(scores.shape)
            new_cXYi = list(scores.argmax(1))  # this needs to be argmax because cosine similarity

            # make sure to assign the cluster centers to themselves
            for center_index in cluster_ids:
                new_cXYi[center_index] = cluster_ids[center_index]

            # ensure numbers of clusters are correct
            self.ensure_correct_number_clusters(new_cXYi, K[axis])
            new_C[axis] = new_cXYi
        return new_C
Exemplo n.º 17
0
std_deviation = 0.3
noise = std_deviation * randn(N)
t = tlab + noise

for idx, val in enumerate(mDegrees):
    w = polyfit(x, t, val)
    row = idx / 2
    col = idx % 2
    fig = ax[row, col]
    fig.set_title('M=%d' % val)
    fig.plot(xlab, ylab)
    fig.plot(x, t, 'ro')
    fig.plot(xlab, polynomial(w, xlab), 'g')

NTest = 8
xTest = random_sample(NTest)
yTest = sin(2 * pi * xTest) + randn(NTest) * std_deviation

test_err = []
train_err = []

maxOrder = 10

figError = plt.figure()

for m in range(0, maxOrder):
    weights = polyfit(x, t, m)
    train_err.append(rms(weights, x, t))
    test_err.append(rms(weights, xTest, yTest))

plt.xlabel("M")
Exemplo n.º 18
0
def random(shape=[]):
    "random(n) or random([n, m, ...]) returns array of random numbers"
    if shape == []:
        shape = None
    return mt.random_sample(shape)
def random(shape=[]):
    "random(n) or random([n, m, ...]) returns array of random numbers"
    if shape == []:
        shape = None
    return mt.random_sample(shape)
Exemplo n.º 20
0
t = tlab + noise

for idx, val in enumerate(mDegrees):
    w = polyfit(x, t, val)
    row = idx / 2
    col = idx % 2
    fig = ax[row, col]
    fig.set_title('M=%d' % val)
    fig.plot(xlab, ylab)
    fig.plot(x, t, 'ro')
    fig.plot(xlab, polynomial(w, xlab), 'g')



NTest = 8
xTest = random_sample(NTest)
yTest = sin(2 * pi * xTest) + randn(NTest) * std_deviation


test_err = []
train_err = []

maxOrder = 10

figError = plt.figure()

for m in range(0, maxOrder):
    weights = polyfit(x, t, m)
    train_err.append(rms(weights, x, t))
    test_err.append(rms(weights, xTest, yTest))