Esempio n. 1
0
    def zernike(self, G, N):
        V = np.zeros([N + 1, N + 1, N + 1], dtype=complex)
        for a, b, c, alpha in nest(lambda: range(int(N / 2) + 1),
                                   lambda _a: range(N - 2 * _a + 1),
                                   lambda _a, _b: range(N - 2 * _a - _b + 1),
                                   lambda _a, _b, _c: range(_a + _c + 1),
                                   ):
            V[a, b, c] += np.power(IMAG_CONST, alpha) * \
                nchoosek(a + c, alpha) * G[2 * a + c - alpha, alpha, b]

        W = np.zeros([N + 1, N + 1, N + 1], dtype=complex)
        for a, b, c, alpha in nest(lambda: range(int(N / 2) + 1),
                                   lambda _a: range(N - 2 * _a + 1),
                                   lambda _a, _b: range(N - 2 * _a - _b + 1),
                                   lambda _a, _b, _c: range(_a + 1),
                                   ):
            W[a, b, c] += np.power(-1, alpha) * np.power(2, a - alpha) * \
                nchoosek(a, alpha) * V[a - alpha, b, c + 2 * alpha]

        X = np.zeros([N + 1, N + 1, N + 1], dtype=complex)
        for a, b, c, alpha in nest(lambda: range(int(N / 2) + 1),
                                   lambda _a: range(N - 2 * _a + 1),
                                   lambda _a, _b: range(N - 2 * _a - _b + 1),
                                   lambda _a, _b, _c: range(_a + 1),
                                   ):
            X[a, b, c] += nchoosek(a, alpha) * W[a - alpha, b + 2 * alpha, c]

        Y = np.zeros([N + 1, N + 1, N + 1], dtype=complex)
        for l, nu, m, j in nest(lambda: range(N + 1),
                                lambda _l: range(int((N - _l) / 2) + 1),
                                lambda _l, _nu: range(_l + 1),
                                lambda _l, _nu, _m: range(int((_l - _m) / 2) + 1),
                                ):
            Y[l, nu, m] += self.Yljm(l, j, m) * X[nu + j, l - m - 2 * j, m]

        Z = np.zeros([N + 1, N + 1, N + 1], dtype=complex)
        for n, l, m, nu, in nest(lambda: range(N + 1),
                                 lambda _n: range(_n + 1),
                                 # there's an if...mod missing in this but it
                                 # still works?
                                 lambda _n, _l: range(_l + 1),
                                 lambda _n, _l, _m: range(int((_n - _l) / 2) + 1),
                                 ):
            # integer required for k when used as power in Qklnu below:
            k = int((n - l) / 2)
            Z[n, l, m] += (3 / (4 * PI_CONST)) * \
                self.Qklnu(k, l, nu) * np.conj(Y[l, nu, m])

        for n, l, m in nest(lambda: range(N + 1),
                            lambda _n: range(n + 1),
                            lambda _n, _l: range(l + 1),
                            ):
            if np.mod(np.sum([n, l, m]), 2) == 0:
                Z[n, l, m] = np.real(
                    Z[n, l, m]) - np.imag(Z[n, l, m]) * IMAG_CONST
            else:
                Z[n, l, m] = -np.real(Z[n, l, m]) + \
                    np.imag(Z[n, l, m]) * IMAG_CONST

        return Z
Esempio n. 2
0
def p_read(theta, N, X, B, mutation):
    # Parse the mutation string
    if isinstance(mutation,(str)):
        if mutation.isdigit():
            mutation = float(mutation)
        else:
            mutation = mutation.upper()
            # turn string input into numeric cases
            mutation = {
                'DEL': 1.0,
                'UPD': 2.0
            }.get(mutation,mutation)
    # raise exception if the result is not numeric
    if not isinstance(mutation,(int, long, float)):
        raise Exception("Unrecognized mutation type %s" %(mutation))
    # probability of reads based on type of mutation
    if mutation == 1.0: # DEL
        p_A = B / (1.0-theta+B*theta)
        p_B = (B-B*theta) / (1.0-B*theta)
    elif mutation == 2.0: # UPD
        p_A = (B+B*theta) / (1.0-theta+2.0*B*theta)
        p_B = (B-B*theta) / (1.0+theta-2.0*B*theta)
    elif mutation > 2.0: # AMPLIFICATION
        p_A = ((mutation-2.0)*B*theta+theta) / ((mutation-2.0)*B*theta+1.0)
        p_B = ((mutation-1.0)*theta+(2.0-mutation)*B*theta) / ((mutation-2.0)*theta+(2.0-mutation)*B*theta+1.0)
    else: # catch other invalid copy numbers / mutations input
        raise Exception("Invalid copy number mutation %d" %(mutation))
    f_A = nchoosek(N,X) * (p_A**X) * ((1.0-p_A)**(N-X))
    f_B = nchoosek(N,X) * (p_B**X) * ((1.0-p_B)**(N-X))
    return (p_A, p_B, f_A, f_B)
Esempio n. 3
0
def find_most_distant(input_sample, N, num_params, k_choices, groups=None):
    '''
    Finds the 'k_choices' most distant choices from the
    'N' trajectories contained in 'input_sample'
    '''
    # Now evaluate the (N choose k_choices) possible combinations
    if nchoosek(N, k_choices) >= sys.maxsize:
        raise ValueError("Number of combinations is too large")
    number_of_combinations = int(nchoosek(N, k_choices))

    # First compute the distance matrix for each possible pairing
    # of trajectories and store in a shared-memory array
    distance_matrix = compute_distance_matrix(input_sample, N, num_params,
                                              groups)

    # Initialise the output array

    chunk = int(1e6)
    if chunk > number_of_combinations:
        chunk = number_of_combinations

    counter = 0
    # Generate a list of all the possible combinations
    # combos = np.array([x for x in combinations(range(N),k_choices)])
    combo_gen = combinations(list(range(N)), k_choices)
    scores = np.empty(number_of_combinations, dtype=np.float32)
    # Generate the pairwise indices once
    pairwise = np.array([y for y in combinations(list(range(k_choices)), 2)])

    for combos in grouper(chunk, combo_gen):
        scores[(counter * chunk):((counter + 1) * chunk)] = mappable(
            combos, pairwise, distance_matrix)
        counter += 1
    return scores
Esempio n. 4
0
 def Qklnu(self, k, l, nu):
     aux_1 = np.power(-1, k + nu) / np.power(4.0, k)
     aux_2 = np.sqrt((2 * l + 4 * k + 3) / 3.0)
     aux_3 = self.trinomial(
         nu, k - nu, l + nu + 1) * nchoosek(2 * (l + nu + 1 + k), l + nu + 1 + k)
     aux_4 = nchoosek(2.0 * (l + nu + 1), l + nu + 1)
     return (aux_1 * aux_2 * aux_3) / aux_4
Esempio n. 5
0
def p_read(theta, N, X, B, mutation):
    # Parse the mutation string
    if isinstance(mutation, (str)):
        if mutation.isdigit():
            mutation = float(mutation)
        else:
            mutation = mutation.upper()
            # turn string input into numeric cases
            mutation = {'DEL': 1.0, 'UPD': 2.0}.get(mutation, mutation)
    # raise exception if the result is not numeric
    if not isinstance(mutation, (int, long, float)):
        raise Exception("Unrecognized mutation type %s" % (mutation))
    # probability of reads based on type of mutation
    if mutation == 1.0:  # DEL
        p_A = B / (1.0 - theta + B * theta)
        p_B = (B - B * theta) / (1.0 - B * theta)
    elif mutation == 2.0:  # UPD
        p_A = (B + B * theta) / (1.0 - theta + 2.0 * B * theta)
        p_B = (B - B * theta) / (1.0 + theta - 2.0 * B * theta)
    elif mutation > 2.0:  # AMPLIFICATION
        p_A = ((mutation - 2.0) * B * theta + theta) / (
            (mutation - 2.0) * B * theta + 1.0)
        p_B = ((mutation - 1.0) * theta + (2.0 - mutation) * B * theta) / (
            (mutation - 2.0) * theta + (2.0 - mutation) * B * theta + 1.0)
    else:  # catch other invalid copy numbers / mutations input
        raise Exception("Invalid copy number mutation %d" % (mutation))
    f_A = nchoosek(N, X) * (p_A**X) * ((1.0 - p_A)**(N - X))
    f_B = nchoosek(N, X) * (p_B**X) * ((1.0 - p_B)**(N - X))
    return (p_A, p_B, f_A, f_B)
Esempio n. 6
0
 def Qklnu(self, k, l, nu):
     aux_1 = np.power(-1, k + nu) / np.power(4.0, k)
     aux_2 = np.sqrt((2 * l + 4 * k + 3) / 3.0)
     aux_3 = self.trinomial(nu, k - nu, l + nu + 1) * nchoosek(
         2 * (l + nu + 1 + k), l + nu + 1 + k)
     aux_4 = nchoosek(2.0 * (l + nu + 1), l + nu + 1)
     return (aux_1 * aux_2 * aux_3) / aux_4
Esempio n. 7
0
def equidistant_barycentric_weights( n ):
    w = numpy.zeros( n, numpy.double )
    for i in xrange( 0, n - n%2, 2 ):
        w[i] = 1. * nchoosek( n-1, i )
        w[i+1] = -1. * nchoosek( n-1, i+1 )
    if ( n%2 == 1 ): 
        w[n-1] = 1.
    return w
Esempio n. 8
0
 def likelihood_func_deriv(theta, N, X, B, z_A, z_B, mutation):
     # derivative of the likelihood of the function for theta
     p_A, p_B, f_A, f_B = p_read(theta, N, X, B, mutation)
     p_A_deriv = B * (1.0-B) * ((1-theta+B*theta) **(-2.0))
     p_B_deriv = ((1.0-B*theta) * (-B) - (B-B*theta) * (-B))/ ((1.0-B*theta)**2)
     f_A_deriv = nchoosek(N,X) * X *(p_A_deriv) * ((1.0-p_A)**(N-X)) + nchoosek(N,X) * (p_A**X) * (N-X) * (-p_A_deriv)
     f_B_deriv = nchoosek(N,X) * X *(p_B_deriv) * ((1.0-p_B)**(N-X)) + nchoosek(N,X) * (p_B**X) * (N-X) * (-p_B_deriv)
     l_deriv = -1.0*nsum(1.0/(z_A*f_A + z_B*f_B) * (z_A * f_A_deriv + z_B * f_B_deriv))
     return l_deriv
Esempio n. 9
0
    def find_most_distant(self,
                          input_sample,
                          num_samples,
                          num_params,
                          k_choices,
                          num_groups=None):
        """
        Finds the 'k_choices' most distant choices from the
        'num_samples' trajectories contained in 'input_sample'

        Arguments
        ---------
        input_sample : numpy.ndarray
        num_samples : int
            The number of samples to generate
        num_params : int
            The number of parameters
        k_choices : int
            The number of optimal trajectories
        num_groups : int, default=None
            The number of groups

        Returns
        -------
        numpy.ndarray
        """
        # Now evaluate the (N choose k_choices) possible combinations
        if nchoosek(num_samples, k_choices) >= sys.maxsize:
            raise ValueError("Number of combinations is too large")
        number_of_combinations = int(nchoosek(num_samples, k_choices))

        # First compute the distance matrix for each possible pairing
        # of trajectories and store in a shared-memory array
        distance_matrix = self.compute_distance_matrix(input_sample,
                                                       num_samples, num_params,
                                                       num_groups)

        # Initialise the output array
        chunk = int(1e6)
        if chunk > number_of_combinations:
            chunk = number_of_combinations

        counter = 0
        # Generate a list of all the possible combinations
        combo_gen = combinations(list(range(num_samples)), k_choices)
        scores = np.zeros(number_of_combinations, dtype=np.float32)
        # Generate the pairwise indices once
        pairwise = np.array(
            [y for y in combinations(list(range(k_choices)), 2)])

        for combos in self.grouper(chunk, combo_gen):
            scores[(counter * chunk):((counter + 1) * chunk)] \
                = self.mappable(combos, pairwise, distance_matrix)
            counter += 1
        return scores
Esempio n. 10
0
    def find_most_distant(self, input_sample, num_samples,
                          num_params, k_choices, num_groups=None):
        """
        Finds the 'k_choices' most distant choices from the
        'num_samples' trajectories contained in 'input_sample'

        Arguments
        ---------
        input_sample : numpy.ndarray
        num_samples : int
            The number of samples to generate
        num_params : int
            The number of parameters
        k_choices : int
            The number of optimal trajectories
        num_groups : int, default=None
            The number of groups

        Returns
        -------
        numpy.ndarray
        """
        # Now evaluate the (N choose k_choices) possible combinations
        if nchoosek(num_samples, k_choices) >= sys.maxsize:
            raise ValueError("Number of combinations is too large")
        number_of_combinations = int(nchoosek(num_samples, k_choices))

        # First compute the distance matrix for each possible pairing
        # of trajectories and store in a shared-memory array
        distance_matrix = self.compute_distance_matrix(input_sample,
                                                       num_samples,
                                                       num_params,
                                                       num_groups)

        # Initialise the output array
        chunk = int(1e6)
        if chunk > number_of_combinations:
            chunk = number_of_combinations

        counter = 0
        # Generate a list of all the possible combinations
        combo_gen = combinations(list(range(num_samples)), k_choices)
        scores = np.zeros(number_of_combinations, dtype=np.float32)
        # Generate the pairwise indices once
        pairwise = np.array(
            [y for y in combinations(list(range(k_choices)), 2)])

        for combos in self.grouper(chunk, combo_gen):
            scores[(counter * chunk):((counter + 1) * chunk)] \
                = self.mappable(combos, pairwise, distance_matrix)
            counter += 1
        return scores
Esempio n. 11
0
 def likelihood_func_deriv(theta, N, X, B, z_A, z_B, mutation):
     # derivative of the likelihood of the function for theta
     p_A, p_B, f_A, f_B = p_read(theta, N, X, B, mutation)
     p_A_deriv = B * (1.0 - B) * ((1 - theta + B * theta)**(-2.0))
     p_B_deriv = ((1.0 - B * theta) * (-B) - (B - B * theta) *
                  (-B)) / ((1.0 - B * theta)**2)
     f_A_deriv = nchoosek(N, X) * X * (p_A_deriv) * (
         (1.0 - p_A)**
         (N - X)) + nchoosek(N, X) * (p_A**X) * (N - X) * (-p_A_deriv)
     f_B_deriv = nchoosek(N, X) * X * (p_B_deriv) * (
         (1.0 - p_B)**
         (N - X)) + nchoosek(N, X) * (p_B**X) * (N - X) * (-p_B_deriv)
     l_deriv = -1.0 * nsum(1.0 / (z_A * f_A + z_B * f_B) *
                           (z_A * f_A_deriv + z_B * f_B_deriv))
     return l_deriv
def build_pce_regression( pts_filename, vals_filename, rv_trans ):
    # Must be a ( num_dims x num_pts ) matrix
    pts = numpy.loadtxt( pts_filename, delimiter = ',' )
    # must be a ( num_pts x 1 ) vector
    vals = numpy.loadtxt( vals_filename, delimiter = ',' )
    vals = vals.reshape( vals.shape[0], 1 )

    num_dims, num_pts = pts.shape 

    # find degree of PCE
    degree = 2
    while ( True ):
        num_basis_terms = nchoosek( degree + num_dims, num_dims )
        if ( num_basis_terms > num_pts ):
            break
        degree += 1
    degree -= 1

    # define the parameters of the PCE
    pce = PolynomialChaosExpansion()
    pce.set_random_variable_transformation( rv_trans )
    pce.define_isotropic_expansion( degree, 1. )

    # form matrices needed for normal equations 
    V, build_vals = pce.build_linear_system( pts, vals, 
                                             False )
    assert V.shape[1] <= V.shape[0]

    # Solve least squares to find PCE coefficients    
    coeff = numpy.linalg.solve( numpy.dot( V.T, V ), 
                                numpy.dot( V.T, build_vals ) )
    pce.set_coefficients( coeff.reshape( coeff.shape[0], 1 ) )

    return pce
    def bootstrappable_pce_regression(pts,vals):
        ## bootstrap gives this function a tuple of arrays of shape (N,...)
        ## but PCE expects pts to be of shape (...,N), so we transpose
        pts=pts.transpose()
        num_dims, num_pts = pts.shape
        #num_dims-= 1
        #pts = data[:,range(num_dims)]
        #vals = data[:,num_dims]

         # find degree of PCE
        degree = 2
        while ( True ):
            num_basis_terms = nchoosek( degree + num_dims, num_dims )
            if ( num_basis_terms > num_pts ):
                break
            degree += 1
        degree -= 1

        # define the parameters of the PCE
        pce = PolynomialChaosExpansion()
        pce.set_random_variable_transformation( rv_trans )
        pce.define_isotropic_expansion( degree, 1. )

        # form matrices needed for normal equations
        V, build_vals = pce.build_linear_system( pts, vals,
                                                 False )
        assert V.shape[1] <= V.shape[0]

        # Solve least squares to find PCE coefficients
        coeff = numpy.linalg.solve( numpy.dot( V.T, V ),
                                    numpy.dot( V.T, build_vals ) )
        pce.set_coefficients( coeff.reshape( coeff.shape[0], 1 ) )
        return get_tsi(pce,qoi=0)
Esempio n. 14
0
 def Yljm(self, l, j, m):
     aux_1 = np.power(-1, j) * (np.sqrt(2 * l + 1) / np.power(2, l))
     aux_2 = self.trinomial(m, j, l - m - 2 * j) * nchoosek(
         2 * (l - j), l - j)
     aux_3 = np.sqrt(self.trinomial(m, m, l - m))
     y = (aux_1 * aux_2) / aux_3
     return y
Esempio n. 15
0
def model(N, k_choices, distance_matrix):
    
    if k_choices >= N:
        raise ValueError("k_choices must be less than N")

    m = Model("distance1")
    I = range(N)

    distance_matrix = np.array(distance_matrix / distance_matrix.max(), dtype=np.float64)

    dm = distance_matrix ** 2

    y, x = {}, {}
    for i in I:
        y[i] = m.addVar(vtype="B", obj=0, name="y[%s]" % i)
        for j in range(i + 1, N):
            x[i, j] = m.addVar(vtype="B", obj=1.0, name="x[%s,%s]" % (i, j))
    m.update()

    m.setObjective(quicksum([x[i, j] * dm[j][i] for i in I for j in range(i + 1, N)]))

    # Add constraints to the model     
    m.addConstr(quicksum([y[i] for i in I]) <= k_choices, "27")
    
    for i in I: 
        for j in range(i + 1, N):
            m.addConstr(x[i, j] <= y[i], "28-%s-%s" % (i, j))
            m.addConstr(x[i, j] <= y[j], "29-%s-%s" % (i, j))
            m.addConstr(y[i] + y[j] <= 1 + x[i, j], "30-%s-%s" % (i, j))
    
    m.addConstr(quicksum([x[i, j] for i in I for j in range(i + 1, N)]) <= nchoosek(k_choices, 2), "Cut_1")
    m.update()
    return m
Esempio n. 16
0
 def Yljm(self, l, j, m):
     aux_1 = np.power(-1, j) * (np.sqrt(2 * l + 1) / np.power(2, l))
     aux_2 = self.trinomial(
         m, j, l - m - 2 * j) * nchoosek(2 * (l - j), l - j)
     aux_3 = np.sqrt(self.trinomial(m, m, l - m))
     y = (aux_1 * aux_2) / aux_3
     return y
Esempio n. 17
0
def hasse_deriv1(g, x, r, m):
    val = GF.GF(0, m)
    for i in range(r, len(g)):
        binom = nchoosek(i, r)
        tmp = (binom - 2 * math.floor(binom / 2))
        if abs(tmp - 1) < 1e-8:
            val = val + g[i] * (x**(i - r))
    return val
Esempio n. 18
0
def compute_true_delta_rand(ctable, n1, n2, n):
    """Compute change in RI obtained by merging rows n1 and n2.

    This function assumes ctable is normalized to sum to 1.
    """
    localct = n*ctable[(n1,n2),]
    delta_sxy = 1.0/2*((localct.sum(axis=0)**2).sum()-(localct**2).sum())
    delta_sx = 1.0/2*(localct.sum()**2 - (localct.sum(axis=1)**2).sum())
    return (2*delta_sxy - delta_sx) / nchoosek(n,2)
Esempio n. 19
0
def compute_true_delta_rand(ctable, n1, n2, n):
    """Compute change in RI obtained by merging rows n1 and n2.

    This function assumes ctable is normalized to sum to 1.
    """
    localct = n * ctable[(n1, n2), ]
    delta_sxy = 1.0 / 2 * ((localct.sum(axis=0)**2).sum() - (localct**2).sum())
    delta_sx = 1.0 / 2 * (localct.sum()**2 - (localct.sum(axis=1)**2).sum())
    return (2 * delta_sxy - delta_sx) / nchoosek(n, 2)
Esempio n. 20
0
def find_largest_degree_overdetermined( num_dims , num_pts ):
     # find degree of PCE
    degree = 1
    while ( True ):
        num_basis_terms = nchoosek( degree + num_dims, num_dims )
        if ( num_basis_terms > num_pts ):
            break
        degree += 1
    degree -= 1
    return degree
Esempio n. 21
0
def _compute_minimum_spanning_tree(shapes, root_vertex, level_str, verbose):
    # initialize edges and weights matrix
    n_vertices = shapes[0].n_points
    n_edges = nchoosek(n_vertices, 2)
    weights = np.zeros((n_vertices, n_vertices))
    edges = np.empty((n_edges, 2), dtype=np.int32)

    # fill edges and weights
    e = -1
    for i in range(n_vertices - 1):
        for j in range(i + 1, n_vertices, 1):
            # edge counter
            e += 1

            # print progress
            if verbose:
                print_dynamic(
                    '{}Computing complete graph`s weights - {}'.format(
                        level_str,
                        progress_bar_str(float(e + 1) / n_edges,
                                         show_bar=False)))

            # fill in edges
            edges[e, 0] = i
            edges[e, 1] = j

            # create data matrix of edge
            diffs_x = [s.points[i, 0] - s.points[j, 0] for s in shapes]
            diffs_y = [s.points[i, 1] - s.points[j, 1] for s in shapes]
            coords = np.array([diffs_x, diffs_y])

            # compute mean
            m = np.mean(coords, axis=1)

            # compute covariance
            c = np.cov(coords)

            # get weight
            for im in range(len(shapes)):
                weights[i, j] += -np.log(
                    multivariate_normal.pdf(coords[:, im], mean=m, cov=c))
            weights[j, i] = weights[i, j]

    # create undirected graph
    complete_graph = UndirectedGraph(edges)

    if verbose:
        print_dynamic('{}Minimum spanning graph computed.\n'.format(level_str))

    # compute minimum spanning graph
    return complete_graph.minimum_spanning_tree(weights, root_vertex)
Esempio n. 22
0
def _compute_minimum_spanning_tree(shapes, root_vertex, level_str, verbose):
    # initialize edges and weights matrix
    n_vertices = shapes[0].n_points
    n_edges = nchoosek(n_vertices, 2)
    weights = np.zeros((n_vertices, n_vertices))
    edges = np.empty((n_edges, 2), dtype=np.int32)

    # fill edges and weights
    e = -1
    for i in range(n_vertices-1):
        for j in range(i+1, n_vertices, 1):
            # edge counter
            e += 1

            # print progress
            if verbose:
                print_dynamic('{}Computing complete graph`s weights - {}'.format(
                    level_str,
                    progress_bar_str(float(e + 1) / n_edges,
                                     show_bar=False)))

            # fill in edges
            edges[e, 0] = i
            edges[e, 1] = j

            # create data matrix of edge
            diffs_x = [s.points[i, 0] - s.points[j, 0] for s in shapes]
            diffs_y = [s.points[i, 1] - s.points[j, 1] for s in shapes]
            coords = np.array([diffs_x, diffs_y])

            # compute mean
            m = np.mean(coords, axis=1)

            # compute covariance
            c = np.cov(coords)

            # get weight
            for im in range(len(shapes)):
                weights[i, j] += -np.log(multivariate_normal.pdf(coords[:, im],
                                                                 mean=m, cov=c))
            weights[j, i] = weights[i, j]

    # create undirected graph
    complete_graph = UndirectedGraph(edges)

    if verbose:
        print_dynamic('{}Minimum spanning graph computed.\n'.format(level_str))

    # compute minimum spanning graph
    return complete_graph.minimum_spanning_tree(weights, root_vertex)
Esempio n. 23
0
def find_most_distant(input_sample, N, num_params, k_choices):
    '''
    Finds the 'k_choices' most distant choices from the
    'N' trajectories contained in 'input_sample'
    '''

    # Now evaluate the (N choose k_choices) possible combinations
    if nchoosek(N, k_choices) >= sys.maxsize:
        raise ValueError("Number of combinations is too large")
    number_of_combinations = int(nchoosek(N, k_choices))

    # First compute the distance matrix for each possible pairing
    # of trajectories and store in a shared-memory array
    distance_matrix = compute_distance_matrix(input_sample,
                                              N,
                                              num_params)


    # Initialise the output array

    chunk = int(1e6)
    if chunk > number_of_combinations:
        chunk = number_of_combinations

    counter = 0
    # Generate a list of all the possible combinations
    #combos = np.array([x for x in combinations(range(N),k_choices)])
    combo_gen = combinations(list(range(N)),k_choices)
    scores = np.empty(number_of_combinations,dtype=np.float32)
    # Generate the pairwise indices once
    pairwise = np.array([y for y in combinations(list(range(k_choices)),2)])

    for combos in grouper(chunk, combo_gen):
        scores[(counter*chunk):((counter+1)*chunk)] = mappable(combos, pairwise, distance_matrix)
        counter += 1
    return scores
Esempio n. 24
0
def model(N, k_choices, distance_matrix):

    if k_choices >= N:
        raise ValueError("k_choices must be less than N")

    m = Model("distance1")
    I = range(N)
    big_M = k_choices + 1

    distance_matrix = distance_matrix / distance_matrix.max()

    dm = distance_matrix**2

    y, x = {}, {}
    for i in I:
        y[i] = m.addVar(vtype="B", obj=0, name="y[%s]" % i)
        for j in range(i + 1, N):
            x[i, j] = m.addVar(vtype="B", obj=1.0, name="x[%s,%s]" % (i, j))
    m.update()

    m.setObjective(
        quicksum([x[i, j] * dm[j][i] for i in I for j in range(i + 1, N)]))

    m.addConstr(
        quicksum([x[i, j] for i in I
                  for j in range(i + 1, N)]) == nchoosek(k_choices, 2), "All")

    # Finally, each combination may only appear three times in the combination list
    for i in I:
        m.addConstr(
            quicksum(x[i, j] for j in range(i + 1, N)) +
            quicksum(x[k, i] for k in range(0, i)) - (y[i] * big_M), '<=',
            (k_choices - 1), "a:Only k-1 scores in any row/column for %s" % i)
        m.addConstr(
            quicksum(x[i, j] for j in range(i + 1, N)) +
            quicksum(x[k, i] for k in range(0, i)) + (y[i] * big_M), '>=',
            (k_choices - 1), "b:Only k-1 scores in any row/column for %s" % i)

    m.addConstr(quicksum(y[i] for i in I),
                "==",
                N - k_choices,
                name="Only %s hold" % (N - k_choices))
    m.update()
    return m
Esempio n. 25
0
def model(N, k_choices, distance_matrix):
    
    if k_choices >= N:
        raise ValueError("k_choices must be less than N")

    m = Model("distance1")
    I = range(N)
    big_M = k_choices + 1

    distance_matrix = distance_matrix / distance_matrix.max()

    dm = distance_matrix ** 2

    y, x = {}, {}
    for i in I:
        y[i] = m.addVar(vtype="B", obj=0, name="y[%s]" % i)
        for j in range(i + 1, N):
            x[i, j] = m.addVar(vtype="B", obj=1.0, name="x[%s,%s]" % (i, j))
    m.update()

    m.setObjective(quicksum([x[i, j] * dm[j][i] for i in I for j in range(i + 1, N)]))

    m.addConstr(quicksum([x[i, j] for i in I for j in range(i + 1, N)]) == nchoosek(k_choices, 2), "All")

    # Finally, each combination may only appear three times in the combination list
    for i in I:
        m.addConstr(quicksum(x[i, j] for j in range(i + 1, N)) + quicksum(x[k, i] for k in range(0, i)) - (y[i] * big_M),
                    '<=',
                    (k_choices - 1),
                    "a:Only k-1 scores in any row/column for %s" % i)
        m.addConstr(quicksum(x[i, j] for j in range(i + 1, N)) + quicksum(x[k, i] for k in range(0, i)) + (y[i] * big_M),
                    '>=',
                    (k_choices - 1),
                    "b:Only k-1 scores in any row/column for %s" % i)

    m.addConstr(quicksum(y[i] for i in I), "==", N - k_choices, name="Only %s hold" % (N - k_choices))
    m.update()
    return m
Esempio n. 26
0
def central_moments_from_noncentral_sums(a):
    """Compute moments about the mean from sums of x**i, for i=0, ..., len(a).

    The first two moments about the mean (1 and 0) would always be 
    uninteresting so the function returns n (the sample size) and mu (the 
    sample mean) in their place.
    """
    a = a.astype(np.double)
    if len(a) == 1:
        return a
    N = a.copy()[0]
    a /= N
    mu = a.copy()[1]
    ac = np.zeros_like(a)
    for n in range(2,len(a)):
        js = np.arange(n+1)
        if a.ndim > 1: js = js[:,np.newaxis]
        # Formula found in Wikipedia page for "Central moment", 2011-07-31
        ac[n] = (nchoosek(n,js) * 
                    (-1)**(n-js) * a[js.ravel()] * mu**(n-js)).sum(axis=0)
    ac[0] = N
    ac[1] = mu
    return ac
Esempio n. 27
0
def central_moments_from_noncentral_sums(a):
    """Compute moments about the mean from sums of x**i, for i=0, ..., len(a).

    The first two moments about the mean (1 and 0) would always be 
    uninteresting so the function returns n (the sample size) and mu (the 
    sample mean) in their place.
    """
    a = a.astype(double)
    if len(a) == 1:
        return a
    N = a.copy()[0]
    a /= N
    mu = a.copy()[1]
    ac = zeros_like(a)
    for n in range(2,len(a)):
        js = arange(n+1)
        if a.ndim > 1: js = js[:,newaxis]
        # Formula found in Wikipedia page for "Central moment", 2011-07-31
        ac[n] = (nchoosek(n,js) * 
                    (-1)**(n-js) * a[js.ravel()] * mu**(n-js)).sum(axis=0)
    ac[0] = N
    ac[1] = mu
    return ac
Esempio n. 28
0
def compute_local_rand_change(s1, s2, n):
    """Compute change in rand if we merge disjoint sizes s1,s2 in volume n."""
    return float(s1*s2)/nchoosek(n,2)
Esempio n. 29
0
    def zernike(self, G, N):
        V = np.zeros([N + 1, N + 1, N + 1], dtype=complex)
        for a, b, c, alpha in nest(
                lambda: xrange(N // 2 + 1),
                lambda _a: xrange(N - 2 * _a + 1),
                lambda _a, _b: xrange(N - 2 * _a - _b + 1),
                lambda _a, _b, _c: xrange(_a + _c + 1),
        ):
            V[a, b, c] += np.power(IMAG_CONST, alpha) * \
                nchoosek(a + c, alpha) * G[2 * a + c - alpha, alpha, b]

        W = np.zeros([N + 1, N + 1, N + 1], dtype=complex)
        for a, b, c, alpha in nest(
                lambda: xrange(N // 2 + 1),
                lambda _a: xrange(N - 2 * _a + 1),
                lambda _a, _b: xrange(N - 2 * _a - _b + 1),
                lambda _a, _b, _c: xrange(_a + 1),
        ):
            W[a, b, c] += np.power(-1, alpha) * np.power(2, a - alpha) * \
                nchoosek(a, alpha) * V[a - alpha, b, c + 2 * alpha]

        X = np.zeros([N + 1, N + 1, N + 1], dtype=complex)
        for a, b, c, alpha in nest(
                lambda: xrange(N // 2 + 1),
                lambda _a: xrange(N - 2 * _a + 1),
                lambda _a, _b: xrange(N - 2 * _a - _b + 1),
                lambda _a, _b, _c: xrange(_a + 1),
        ):
            X[a, b, c] += nchoosek(a, alpha) * W[a - alpha, b + 2 * alpha, c]

        Y = np.zeros([N + 1, N + 1, N + 1], dtype=complex)
        for l, nu, m, j in nest(
                lambda: xrange(N + 1),
                lambda _l: xrange((N - _l) // 2 + 1),
                lambda _l, _nu: xrange(_l + 1),
                lambda _l, _nu, _m: xrange((_l - _m) // 2 + 1),
        ):
            Y[l, nu, m] += self.Yljm(l, j, m) * X[nu + j, l - m - 2 * j, m]

        Z = np.zeros([N + 1, N + 1, N + 1], dtype=complex)
        for n, l, m, nu, in nest(
                lambda: xrange(N + 1),
                lambda _n: xrange(_n + 1),
                # there's an if...mod missing in this but it
                # still works?
                lambda _n, _l: xrange(_l + 1),
                lambda _n, _l, _m: xrange((_n - _l) // 2 + 1),
        ):
            k = (n - l) // 2
            Z[n, l, m] += (3 / (4 * PI_CONST)) * \
                self.Qklnu(k, l, nu) * np.conj(Y[l, nu, m])

        for n, l, m in nest(
                lambda: xrange(N + 1),
                lambda _n: xrange(n + 1),
                lambda _n, _l: xrange(l + 1),
        ):
            if np.mod(np.sum([n, l, m]), 2) == 0:
                Z[n, l,
                  m] = np.real(Z[n, l, m]) - np.imag(Z[n, l, m]) * IMAG_CONST
            else:
                Z[n, l, m] = -np.real(Z[n, l, m]) + \
                    np.imag(Z[n, l, m]) * IMAG_CONST

        return Z
Esempio n. 30
0
 def NumCoefficients(self):
     """
     There are (n+d) choose n coefficients where n is the degree of the polynomial and d is the dimension
     :return: Return the number of coefficients corresponding to the polynomial given the degree and dimension
     """
     return nchoosek(self.degree + self.dimension, self.degree, exact=True)
Esempio n. 31
0
def compute_local_rand_change(s1, s2, n):
    """Compute change in rand if we merge disjoint sizes s1,s2 in volume n."""
    return float(s1 * s2) / nchoosek(n, 2)