示例#1
0
def find_caught_instances(X_pos_rule, X_neg_rule, remain_pos, remain_neg):
    caught_pos_rule = X_pos_rule & remain_pos
    caught_neg_rule = X_neg_rule & remain_neg

    ncaught_pos_rule = gmpy.popcount(caught_pos_rule)
    ncaught_neg_rule = gmpy.popcount(caught_neg_rule)

    return caught_pos_rule, caught_neg_rule, ncaught_pos_rule, ncaught_neg_rule
示例#2
0
def find_remain_instances(caught_pos_rule, caught_neg_rule, remain_pos,
                          remain_neg):
    remain_pos_rule = remain_pos - caught_pos_rule
    remain_neg_rule = remain_neg - caught_neg_rule

    nremain_pos_rule = gmpy.popcount(remain_pos_rule)
    nremain_neg_rule = gmpy.popcount(remain_neg_rule)

    return remain_pos_rule, remain_neg_rule, nremain_pos_rule, nremain_neg_rule
def precalculate_probabilities(p, bits=8):
    """
    Pre-calculates the probability of drawing each of the 2^bits patterns,
    so that the probability that any bit will be 1, is p.
    Returns the patterns and the corresponding stochastic vector.
    """

    weights = np.array([scipy.special.binom(bits, i) * (p ** i) * ((1 - p) ** (bits - i)) for i in xrange(bits + 1)])
    values = np.arange(2 ** bits, dtype=np.ubyte)
    probabilities = np.zeros(values.shape)
    for i, v in enumerate(values):
        probabilities[i] = weights[gmpy.popcount(i)] / (scipy.special.binom(bits, gmpy.popcount(i)) + 0.0)
    return values, probabilities
示例#4
0
def gen(token_bits = 5, degree=6, initial_vector=0b111111, coefficients=0b0000110):
    """
    LFSR keystream generator

    Parameters:
        degree: the number of terms in the LFSR
        initial_vector: an integer, the bits of which comprise the initial values of each term in the LFSR (the LSB will be the first bit yielded)
        coefficients: an integer, the bits of which comprise the coefficients of each term (where the LSB is the constant term)

    In other words, the feedback to the LFSR is computed as the XOR of all of the bits in ((vector << 1) | 1) & coefficients.

    Returns: a generator which yields ints corresponding to the tokens of the keystream
    """
    from gmpy import popcount
    x = initial_vector
    out_byte = 0
    out_ind = 0
    while True:
        out_byte |= (x & 1) << (token_bits - 1 - out_ind)
        out_ind = (out_ind + 1) % token_bits
        if out_ind == 0:
            yield out_byte
            out_byte = 0
        feedback = popcount(((x << 1) | 1) & coefficients) % 2
        x = (x >> 1) | (feedback << (degree - 1))
示例#5
0
def gen(token_bits=5,
        degree=6,
        initial_vector=0b111111,
        coefficients=0b0000110):
    """
    LFSR keystream generator

    Parameters:
        degree: the number of terms in the LFSR
        initial_vector: an integer, the bits of which comprise the initial values of each term in the LFSR (the LSB will be the first bit yielded)
        coefficients: an integer, the bits of which comprise the coefficients of each term (where the LSB is the constant term)

    In other words, the feedback to the LFSR is computed as the XOR of all of the bits in ((vector << 1) | 1) & coefficients.

    Returns: a generator which yields ints corresponding to the tokens of the keystream
    """
    from gmpy import popcount
    x = initial_vector
    out_byte = 0
    out_ind = 0
    while True:
        out_byte |= (x & 1) << (token_bits - 1 - out_ind)
        out_ind = (out_ind + 1) % token_bits
        if out_ind == 0:
            yield out_byte
            out_byte = 0
        feedback = popcount(((x << 1) | 1) & coefficients) % 2
        x = (x >> 1) | (feedback << (degree - 1))
示例#6
0
    def add(self, key):
        if self.pivot == None:
            self.bucket.append(key)

            if len(self.bucket) > self.bktree.bucket_sz:
                self.pivot = self.bucket.pop()

                self.childs = {}
                for k in self.bucket:
                    self.childs.setdefault(gmpy.popcount(self.pivot ^ k),
                                           BKNode(self.bktree)).add(k)

                del self.bucket
        else:
            self.childs.setdefault(gmpy.popcount(self.pivot ^ key),
                                   BKNode(self.bktree)).add(key)
示例#7
0
def get_selection_transition_matrix(selection, nchromosomes, npositions):
    """
    Note that this includes only selection and not recombination or mutation.
    Therefore the transition matrix will be very sparse.
    @param selection: a fitness ratio
    @param nchromosomes: number of chromosomes in the population
    @param npositions: number of positions per chromosome
    """
    nstates = 1 << (nchromosomes * npositions)
    P = np.zeros((nstates, nstates))
    for parent_chroms in product(range(1 << npositions), repeat=nchromosomes):
        # define the source index
        source_index = chroms_to_index(parent_chroms, npositions)
        # get the distribution over indices into the parental population
        parent_index_distn = np.zeros(nchromosomes)
        for i, chrom in enumerate(parent_chroms):
            parent_index_distn[i] = selection**gmpy.popcount(chrom)
        parent_index_distn /= np.sum(parent_index_distn)
        # choose child chromosomes independently
        for parent_idxs in product(range(nchromosomes), repeat=nchromosomes):
            # define the sink index and conditional probability
            p = 1
            sink_index = 0
            for i in parent_idxs:
                p *= parent_index_distn[i]
                child_chrom = parent_chroms[i]
                sink_index <<= npositions
                sink_index |= child_chrom
            P[source_index, sink_index] += p
    return P
示例#8
0
def get_selection_transition_matrix_s(ci_to_short, short_to_count,
                                      sorted_chrom_lists, selection,
                                      nchromosomes, npositions):
    """
    Note that this includes only selection and not recombination or mutation.
    Therefore the transition matrix will be very sparse.
    @param selection: a fitness ratio
    @param nchromosomes: number of chromosomes in the population
    @param npositions: number of positions per chromosome
    """
    nstates = len(sorted_chrom_lists)
    P = np.zeros((nstates, nstates))
    for parent_short, parent_chroms in enumerate(sorted_chrom_lists):
        parent_index_distn = np.zeros(nchromosomes)
        for i, chrom in enumerate(parent_chroms):
            parent_index_distn[i] = selection**gmpy.popcount(chrom)
        parent_index_distn /= np.sum(parent_index_distn)
        for parent_idxs in product(range(nchromosomes), repeat=nchromosomes):
            child_chroms = [parent_chroms[i] for i in parent_idxs]
            p = 1
            for i in parent_idxs:
                p *= parent_index_distn[i]
            child_ci = chroms_to_index(sorted(child_chroms), npositions)
            child_short = ci_to_short[child_ci]
            P[parent_short, child_short] += p
    return P
示例#9
0
def get_selection_transition_matrix(selection, nchromosomes, npositions):
    """
    Note that this includes only selection and not recombination or mutation.
    Therefore the transition matrix will be very sparse.
    @param selection: a fitness ratio
    @param nchromosomes: number of chromosomes in the population
    @param npositions: number of positions per chromosome
    """
    nstates = 1 << (nchromosomes * npositions)
    P = np.zeros((nstates, nstates))
    for parent_chroms in product(range(1<<npositions), repeat=nchromosomes):
        # define the source index
        source_index = chroms_to_index(parent_chroms, npositions)
        # get the distribution over indices into the parental population
        parent_index_distn = np.zeros(nchromosomes)
        for i, chrom in enumerate(parent_chroms):
            parent_index_distn[i] = selection**gmpy.popcount(chrom)
        parent_index_distn /= np.sum(parent_index_distn)
        # choose child chromosomes independently
        for parent_idxs in product(range(nchromosomes), repeat=nchromosomes):
            # define the sink index and conditional probability
            p = 1
            sink_index = 0
            for i in parent_idxs:
                p *= parent_index_distn[i]
                child_chrom = parent_chroms[i]
                sink_index <<= npositions
                sink_index |= child_chrom
            P[source_index, sink_index] += p
    return P
示例#10
0
def get_selection_transition_matrix_s(
        ci_to_short, short_to_count, sorted_chrom_lists,
        selection, nchromosomes, npositions):
    """
    Note that this includes only selection and not recombination or mutation.
    Therefore the transition matrix will be very sparse.
    @param selection: a fitness ratio
    @param nchromosomes: number of chromosomes in the population
    @param npositions: number of positions per chromosome
    """
    nstates = len(sorted_chrom_lists)
    P = np.zeros((nstates, nstates))
    for parent_short, parent_chroms in enumerate(sorted_chrom_lists):
        parent_index_distn = np.zeros(nchromosomes)
        for i, chrom in enumerate(parent_chroms):
            parent_index_distn[i] = selection**gmpy.popcount(chrom)
        parent_index_distn /= np.sum(parent_index_distn)
        for parent_idxs in product(range(nchromosomes), repeat=nchromosomes):
            child_chroms = [parent_chroms[i] for i in parent_idxs]
            p = 1
            for i in parent_idxs:
                p *= parent_index_distn[i]
            child_ci = chroms_to_index(sorted(child_chroms), npositions)
            child_short = ci_to_short[child_ci]
            P[parent_short, child_short] += p
    return P
示例#11
0
    def run(self, data):
        """Compute biclustering.

        Parameters
        ----------
        data : numpy.ndarray
        """
        data = check_array(data, dtype=np.bool, copy=True)
        self._validate_parameters()

        data = [np.packbits(row) for row in data]
        biclusters = []
        patterns_found = set()

        for ri, rj in combinations(data, 2):
            pattern = np.bitwise_and(ri, rj)
            pattern_cols = sum(popcount(int(n)) for n in pattern)

            if pattern_cols >= self.min_cols and self._is_new(
                    patterns_found, pattern):
                rows = [
                    k for k, r in enumerate(data) if self._match(pattern, r)
                ]

                if len(rows) >= self.min_rows:
                    cols = np.where(np.unpackbits(pattern) == 1)[0]
                    biclusters.append(Bicluster(rows, cols))

        return Biclustering(biclusters)
示例#12
0
def compute_rule_update(d_star,R_star, d_old, R_old, N_old, unused_old, newItemPosition, X,Y):
  global trainingSize
  N_star = zeros((R_star+1,2))
  #  print "newItemPosition is ", newItemPosition, R_star, len(N_old), len(N_star)
  N_star[:newItemPosition] = N_old[:newItemPosition]
  remaining_unused = unused_old[:newItemPosition+1]
  i = min(newItemPosition, len(unused_old)-1)
  while remaining_unused[i]:
    j = d_star[i]
    usedj = remaining_unused[i] & X[j]
    remaining_unused.append(remaining_unused[i] - usedj)
    N_star[i,0] = gmpy.popcount(Y[0] & usedj)
    N_star[i,1] = gmpy.popcount(Y[1] & usedj)
    i+=1
  if int(sum(N_star)) != trainingSize:
    raise Exception #bug check
  return N_star, remaining_unused
示例#13
0
    def nn_search(self):
        if self.pivot == None:
            #self.bktree.xx += len(self.bucket)
            dist = min(gmpy.popcount(self.bktree.key ^ k) for k in self.bucket)

            self.bktree.min_dist = min(self.bktree.min_dist, dist)
        else:
            #self.bktree.xx += 1
            dist_root = gmpy.popcount(self.bktree.key ^ self.pivot)

            self.bktree.min_dist = min(self.bktree.min_dist, dist_root)

            for dist in sorted(self.childs, key=lambda x: abs(x - dist_root)):
                if abs(dist_root - dist) > self.bktree.min_dist:
                    break

                self.childs[dist].nn_search()
示例#14
0
def precalculate_probabilities(p, bits=8):
    """
    Pre-calculates the probability of drawing each of the 2^bits patterns,
    so that the probability that any bit will be 1, is p.
    Returns the patterns and the corresponding stochastic vector.
    """

    weights = np.array([
        scipy.special.binom(bits, i) * (p**i) * ((1 - p)**(bits - i))
        for i in xrange(bits + 1)
    ])
    values = np.arange(2**bits, dtype=np.ubyte)
    probabilities = np.zeros(values.shape)
    for i, v in enumerate(values):
        probabilities[i] = weights[gmpy.popcount(i)] / (
            scipy.special.binom(bits, gmpy.popcount(i)) + 0.0)
    return values, probabilities
示例#15
0
def bitphase_to_nchanges(bitphase, npositions):
    """
    @param bitphase: a python integer
    @param npositions: length of binary array represented by the bitphase
    @return: the number of state changes along the binary array
    """
    nboundaries = npositions - 1
    return gmpy.popcount(gmpy.lowbits(bitphase ^ (bitphase >> 1), nboundaries))
示例#16
0
def compute_rule_update(d_star, R_star, d_old, R_old, N_old, unused_old,
                        newItemPosition, X, Y):
    global trainingSize
    N_star = zeros((R_star + 1, 2))
    #  print "newItemPosition is ", newItemPosition, R_star, len(N_old), len(N_star)
    N_star[:newItemPosition] = N_old[:newItemPosition]
    remaining_unused = unused_old[:newItemPosition + 1]
    i = min(newItemPosition, len(unused_old) - 1)
    while remaining_unused[i]:
        j = d_star[i]
        usedj = remaining_unused[i] & X[j]
        remaining_unused.append(remaining_unused[i] - usedj)
        N_star[i, 0] = gmpy.popcount(Y[0] & usedj)
        N_star[i, 1] = gmpy.popcount(Y[1] & usedj)
        i += 1
    if int(sum(N_star)) != trainingSize:
        raise Exception  #bug check
    return N_star, remaining_unused
示例#17
0
def hammingDist(s1, s2):
	''' count the hammingDist between two lists of equal lenth '''
	assert len(s1) == len(s2)
	
	if type(s1) == StringType:
		s1 = map(ord, s1)
	if type(s2) == StringType:
		s2 = map(ord, s2)

	return sum([popcount(x ^ y) for x,y in zip(s1,s2)])
示例#18
0
def sz_sector(L, sz):
    cn = 0
    sec = {}
    invsec = []
    for i in range(2**L):
        if 2 * gmpy.popcount(i) - L == sz:
            sec[i] = cn
            invsec.append(i)
            cn += 1
    return (L, sec, invsec)
示例#19
0
def compute_rule_usage(d_star,R_star,X,Y):
  global trainingSize
  #N_star = zeros((R_star+1,Y.shape[1]))
  #remaining_unused = 2**(Y.shape[0]) - 1
  N_star = zeros((R_star+1, 2))
  remaining_unused = [(1<<trainingSize) - 1]
  i = 0
  #print X
  while remaining_unused[i]:
    j = d_star[i]
    usedj = remaining_unused[i] & X[j]
    remaining_unused.append(remaining_unused[i] - usedj)
    N_star[i,0] = gmpy.popcount(Y[0] & usedj)
    N_star[i,1] = gmpy.popcount(Y[1] & usedj)
    i+=1
  if int(sum(N_star)) != trainingSize:
    print "############"
    print "not equal!!!", int(sum(N_star)), trainingSize, v1train, v2train, v3train
    raise Exception #bug check
  return N_star, remaining_unused
示例#20
0
def get_chromosome_distn_fast(selection, recombination, K):
    """
    This is a faster version with more bitwise cleverness.
    """
    nchromosomes, npositions = K.shape
    chromos = [bin_to_int(row) for row in K]
    distn = np.zeros(1<<npositions)
    # sum over all ways to independently pick parental chromosomes
    # this is (nchromosomes)^2 things because repetition is allowed
    for chromo_a in chromos:
        weight_a = selection**gmpy.popcount(chromo_a)
        for chromo_b in chromos:
            weight_b = selection**gmpy.popcount(chromo_b)
            for phase in range(1<<npositions):
                nchanges = bitphase_to_nchanges(phase, npositions)
                weight_phase = 1
                weight_phase *= recombination**nchanges
                weight_phase *= (1-recombination)**(npositions-1-nchanges)
                chromo_c = (chromo_a & phase) | (chromo_b & ~phase)
                distn[chromo_c] += weight_a * weight_b * weight_phase
    return distn / np.sum(distn)
示例#21
0
def compute_rule_usage(d_star, R_star, X, Y):
    global trainingSize
    #N_star = zeros((R_star+1,Y.shape[1]))
    #remaining_unused = 2**(Y.shape[0]) - 1
    N_star = zeros((R_star + 1, 2))
    remaining_unused = [(1 << trainingSize) - 1]
    i = 0
    #print X
    while remaining_unused[i]:
        j = d_star[i]
        usedj = remaining_unused[i] & X[j]
        remaining_unused.append(remaining_unused[i] - usedj)
        N_star[i, 0] = gmpy.popcount(Y[0] & usedj)
        N_star[i, 1] = gmpy.popcount(Y[1] & usedj)
        i += 1
    if int(sum(N_star)) != trainingSize:
        print "############"
        print "not equal!!!", int(
            sum(N_star)), trainingSize, v1train, v2train, v3train
        raise Exception  #bug check
    return N_star, remaining_unused
示例#22
0
def main(args):
    # check args
    if gmpy.popcount(args.ntiles) != 1:
        raise ValueError('the number of tiles should be a power of two')
    # set up the logger
    f = logging.getLogger('toplevel.logger')
    h = logging.StreamHandler()
    h.setFormatter(logging.Formatter('%(message)s %(asctime)s'))
    f.addHandler(h)
    if args.verbose:
        f.setLevel(logging.DEBUG)
    else:
        f.setLevel(logging.WARNING)
    f.info('(local) read the xml contents')
    if args.infile is None:
        xmldata = sys.stdin.read()
    else:
        with open(args.infile) as fin:
            xmldata = fin.read()
    f.info('(local) modify the log filename and chain length xml contents')
    xmldata = beast.set_nsamples(xmldata, args.mcmc_id, args.nsamples)
    xmldata = beast.set_log_filename(xmldata, args.log_id, args.log_filename)
    xmldata = beast.set_log_logevery(xmldata, args.log_id, args.log_logevery)
    f.info('(local) define the hierarchically nested intervals')
    start_stop_pairs = tuple(
            (a+1,b) for a, b in beasttiling.gen_hierarchical_slices(
                args.tile_width, args.offset, args.tile_width * args.ntiles))
    f.info('(local) run BEAST serially locally and build the R stuff')
    table_string, full_table_string, scripts = get_table_strings_and_scripts(
            xmldata, args.alignment_id, start_stop_pairs, args.nsamples)
    if args.full_table_out:
        f.info('(local) create the verbose R table')
        with open(args.full_table_out, 'w') as fout:
            fout.write(full_table_string)
    f.info('(local) create the composite R script')
    out = StringIO()
    print >> out, 'library(ggplot2)'
    print >> out, 'par(mfrow=c(3,1))'
    for script in scripts:
        print >> out, script
    comboscript = out.getvalue()
    f.info('(local) run R to create the pdf')
    device_name = Form.g_imageformat_to_r_function['pdf']
    retcode, r_out, r_err, image_data = RUtil.run_plotter( 
        table_string, comboscript, device_name, keep_intermediate=True) 
    if retcode: 
        raise RUtil.RError(r_err) 
    f.info('(local) write the .pdf file')
    with open(args.outfile, 'wb') as fout:
        fout.write(image_data)
    f.info('(local) return from toplevel')
示例#23
0
def main(args):
    # check args
    if gmpy.popcount(args.ntiles) != 1:
        raise ValueError('the number of tiles should be a power of two')
    # set up the logger
    f = logging.getLogger('toplevel.logger')
    h = logging.StreamHandler()
    h.setFormatter(logging.Formatter('%(message)s %(asctime)s'))
    f.addHandler(h)
    if args.verbose:
        f.setLevel(logging.DEBUG)
    else:
        f.setLevel(logging.WARNING)
    f.info('(local) read the xml contents')
    if args.infile is None:
        xmldata = sys.stdin.read()
    else:
        with open(args.infile) as fin:
            xmldata = fin.read()
    f.info('(local) modify the log filename and chain length xml contents')
    xmldata = beast.set_nsamples(xmldata, args.mcmc_id, args.nsamples)
    xmldata = beast.set_log_filename(xmldata, args.log_id, args.log_filename)
    xmldata = beast.set_log_logevery(xmldata, args.log_id, args.log_logevery)
    f.info('(local) define the hierarchically nested intervals')
    start_stop_pairs = tuple(
        (a + 1, b) for a, b in beasttiling.gen_hierarchical_slices(
            args.tile_width, args.offset, args.tile_width * args.ntiles))
    f.info('(local) run BEAST serially locally and build the R stuff')
    table_string, full_table_string, scripts = get_table_strings_and_scripts(
        xmldata, args.alignment_id, start_stop_pairs, args.nsamples)
    if args.full_table_out:
        f.info('(local) create the verbose R table')
        with open(args.full_table_out, 'w') as fout:
            fout.write(full_table_string)
    f.info('(local) create the composite R script')
    out = StringIO()
    print >> out, 'library(ggplot2)'
    print >> out, 'par(mfrow=c(3,1))'
    for script in scripts:
        print >> out, script
    comboscript = out.getvalue()
    f.info('(local) run R to create the pdf')
    device_name = Form.g_imageformat_to_r_function['pdf']
    retcode, r_out, r_err, image_data = RUtil.run_plotter(
        table_string, comboscript, device_name, keep_intermediate=True)
    if retcode:
        raise RUtil.RError(r_err)
    f.info('(local) write the .pdf file')
    with open(args.outfile, 'wb') as fout:
        fout.write(image_data)
    f.info('(local) return from toplevel')
    def min_distance(self, query_address):
        if query_address in self.locations:
            return 0

        min_distance = float('inf')

        for address in self.locations:
            min_distance = min(
                gmpy.popcount(address ^ query_address), min_distance)

            if min_distance == 1:
                break

        return min_distance
 def check(self):
     print(self.state)
     n_snake_pieces = gmpy.popcount(self.state)  # number of 1's in state
     n_neighbors = np.zeros(pow(2, self.dimensions))
     for n, bit in enumerate(self.bin_state):
         if bit == '1':
             for d in range(self.dimensions):
                 n_neighbors[n] += (self.bin_state[self.partner_index(
                     n, d)] == '1')
     n_endpoints = len(n_neighbors[n_neighbors == 1])
     n_middles = len(n_neighbors[n_neighbors == 2])
     n_crowded = len(n_neighbors[n_neighbors > 2])
     n_isolated = n_snake_pieces - n_endpoints - n_middles - n_crowded
     return n_snake_pieces, n_endpoints, n_middles, n_crowded, n_isolated
    def answer(self, query_address):
        if query_address in self.locations:
            return [self.locations[query_address]], 0

        min_distance = float('inf')

        for address, value in self.locations.viewitems():
            distance = gmpy.popcount(address ^ query_address)
            if distance < min_distance:
                min_distance = distance
                answer_values = [value]
            elif min_distance == distance:
                answer_values.append(value)

        return answer_values, min_distance
示例#27
0
 def __init__(self, key, mask):
     self._key = key & ~mask # ensure all masked bits are 0 in _key
     self._mask = mask
     from gmpy import popcount
     self._len = 1<<popcount(mask)
     self._blocks = []
     mask_bits = bin(mask)[::-1][:-2]
     start = None
     for i in range(len(mask_bits) + 1):
         if i < len(mask_bits) and mask_bits[i] == '1':
             if start is None:
                 start = i
         else:
             if start is not None:
                 self._blocks.append((start, i))
                 start = None
示例#28
0
 def __init__(self, key, mask):
     self._key = key & ~mask  # ensure all masked bits are 0 in _key
     self._mask = mask
     from gmpy import popcount
     self._len = 1 << popcount(mask)
     self._blocks = []
     mask_bits = bin(mask)[::-1][:-2]
     start = None
     for i in range(len(mask_bits) + 1):
         if i < len(mask_bits) and mask_bits[i] == '1':
             if start is None:
                 start = i
         else:
             if start is not None:
                 self._blocks.append((start, i))
                 start = None
示例#29
0
 def r0(self, V, mask, *flag):
     self.c += 1
     if not V:
         return 0
     m = -1
     max_deg = 0
     #        print V
     #        print bin(mask)[2:].zfill(self.n)
     for v in V:
         s = gmpy.popcount(self.G[v] & mask)  # - (1<<(self.n - 1 - v)))
         if s == 0:
             mask = self.remove(v, V, mask)
             return 1 + self.r0(V, mask)
         '''if s == 2: #R2
             mask = self.remove(v, V, mask)
             u = self.next_nb(v, mask)
             mask = self.remove(u, V, mask)
             w = self.next_nb(v, mask)
             mask = self.remove(w, V, mask)
             print 's: %d, v: %d, u: %d, w: %d' % (s, v, u, w)
             if self.is_nb(u, w):
                 return  1 + self.r0(V, mask)
             G = list(self.G)
             mask = self.add_z((v, u, w), V, mask)
             a = 1 + self.r0(V, mask)
             self.del_z((v, u, w))
             if G != self.G:
                 print 'Old G:'
                 self.print_g(G)
                 print 'New G:'
                 self.print_g(self.G)
             return a'''
         if s == 1:  #R1
             mask = self.remove(v, V, mask)
             mask = self.remove(self.next_nb(v, mask), V, mask)
             return 1 + self.r0(V, mask)
         if s > max_deg:
             m = v
             max_deg = s
     mask = self.remove(m, V, mask)
     a = self.r0(list(V), mask)
     V1 = list(V)
     for nb in V:
         if self.is_nb(m, nb):
             mask = self.remove(nb, V1, mask)
     return max(1 + self.r0(V1, mask), a)
示例#30
0
def _gen_parental_triples(population, selection):
    """
    This gives a sparse distribution over pairs of parental chromosomes.
    Yield a sequence of (chra, chrb, probability) triples.
    @param population: a sequence of chromosomes each as python integers
    @param selection: a fitness ratio
    """
    n = len(population)
    # get the distribution over indices into the parental population
    distn = np.zeros(n)
    for i, chrom in enumerate(population):
        distn[i] = selection**gmpy.popcount(chrom)
    distn /= np.sum(distn)
    # Define the triples assuming that parental chromosomes
    # are drawn independently according to relative fitness.
    for chra, pa in zip(population, distn):
        for chrb, pb in zip(population, distn):
            yield chra, chrb, pa*pb
 def check(self):
     bin_state = "".join([
         j for (i, j) in enumerate(self.bin_state)
         if self.valid_markers[i] == '1'
     ])
     bin_state = bin_state[:pow(2, dimensions)].zfill(pow(2, dimensions))
     state = int(bin_state, 2)
     n_snake_pieces = gmpy.popcount(state)  # number of 1's in state
     n_neighbors = np.zeros(pow(2, self.dimensions))
     for n, bit in enumerate(bin_state):
         if bit == '1':
             for d in range(self.dimensions):
                 n_neighbors[n] += (bin_state[self.partner_index(n,
                                                                 d)] == '1')
     n_endpoints = len(n_neighbors[n_neighbors == 1])
     n_middles = len(n_neighbors[n_neighbors == 2])
     n_crowded = len(n_neighbors[n_neighbors > 2])
     n_isolated = n_snake_pieces - n_endpoints - n_middles - n_crowded
     return n_snake_pieces, n_endpoints, n_middles, n_crowded, n_isolated
示例#32
0
 def i_sets(self):
     if not self.vertices:
         return []
     sets = [(0, 0)]
     w = len(self.vertices)
     for i in xrange(1, 1 << w):  # xrange is non-inclusive
         t = i
         j = 0
         mask = 0
         ind = True
         while t:
             if t & 1:
                 mask += 1 << self.vertices[j]
                 if self.G[self.vertices[j]] & mask:
                     ind = False
                     break
             t >>= 1
             j += 1
         if ind:
             sets += [(gmpy.popcount(mask), mask)]
     return sets
示例#33
0
def gen_hierarchical_slices(tile_width, start_index_in, sentinel_index_in):
    """
    @param tile_width: width of the smallest tile
    @param start_index_in: index of the first column
    @param sentinel_index_in: index of the sentinel column
    """
    ncolumns = sentinel_index_in - start_index_in
    if ncolumns < 1:
        raise ValueError('bad interval')
    if ncolumns % tile_width:
        raise ValueError('the tiles should exactly cover the interval')
    if gmpy.popcount(ncolumns / tile_width) != 1:
        raise ValueError('the number of tiles should be a power of two')
    nlevels = gmpy.scan1(ncolumns / tile_width) + 1
    for i in range(nlevels):
        width = tile_width * 2**i
        ntiles = ncolumns / width
        for j in range(ntiles):
            a = start_index_in + j*width
            b = start_index_in + (j+1)*width
            yield a, b
示例#34
0
def gen_hierarchical_slices(tile_width, start_index_in, sentinel_index_in):
    """
    @param tile_width: width of the smallest tile
    @param start_index_in: index of the first column
    @param sentinel_index_in: index of the sentinel column
    """
    ncolumns = sentinel_index_in - start_index_in
    if ncolumns < 1:
        raise ValueError('bad interval')
    if ncolumns % tile_width:
        raise ValueError('the tiles should exactly cover the interval')
    if gmpy.popcount(ncolumns / tile_width) != 1:
        raise ValueError('the number of tiles should be a power of two')
    nlevels = gmpy.scan1(ncolumns / tile_width) + 1
    for i in range(nlevels):
        width = tile_width * 2**i
        ntiles = ncolumns / width
        for j in range(ntiles):
            a = start_index_in + j * width
            b = start_index_in + (j + 1) * width
            yield a, b
示例#35
0
文件: consolidate.py 项目: jofas/csp
    def __init__(self, n):
        self.n = n
        self.sides = [[] for _ in range(n)]
        self.sides_ = [[] for _ in range(n)]
        self.nodes = []
        self.rotation = 0
        self.direction = 1

        for i in range(2**n):

            x = _DirectedNode(i)
            for node in self.nodes:
                if popcount(x.id ^ node.id) == 1:
                    node.edges_out.append(x)
            self.nodes.append(x)

            for j in range(n):
                mask = 0b1 << j
                if x.id & mask == mask:
                    self.sides[j].append(x)
                else:
                    self.sides_[j].append(x)
示例#36
0
 def post_order(self, node):
     for c in node.children:
         self.post_order(c)
     i_sets = node.i_sets()
     if not node.children:
         self.table[node.index] = i_sets
         return
     for U in i_sets:
         c_sum = [0, 0]
         for c in node.children:
             temp = []
             UcVt = U[1] & c.Vt
             for Ui in self.table[c.index]:
                 if Ui[1] & node.Vt == UcVt:
                     wU = Ui[0] - gmpy.popcount(Ui[1] & U[1])
                     temp += [(wU, Ui[1] - (Ui[1] & U[1]))]
             if temp:
                 t = max(temp)
                 c_sum[0] += t[0]
                 c_sum[1] += t[1]
         self.table[node.index] += [(U[0] + c_sum[0], U[1] + c_sum[1])]
     return
示例#37
0
文件: consolidate.py 项目: jofas/csp
 def _compute_layers(self):
     for i in range(2**(self.d - 1)):
         k = popcount(i)
         self.layers[k].append(i)
         self.layers[self.d - k]\
             .append(self._complement(i))
示例#38
0
文件: angupweight.py 项目: Srheft/HOD
    match2 = match2[a]

    num_fib = len(match1)

    print('number of DDfib counts within ', sep, ' degrees = ', num_fib)

    PIP_fib = np.zeros(num_fib)

    for i in range(num_fib):

        for j in range(60):

            a = (catalog[match1[i]]['WEIGHT_BW'][j]) & (
                catalog[match2[i]]['WEIGHT_BW'][j])

            PIP_fib[i] += gmpy.popcount(int(a))

    DD_fib_PIP = np.sum(1860. / PIP_fib)

    print('wDD_ang= DD_par/DD_fib_PIP= ',
          num_par / DD_fib_PIP)  ### definition in equation 9 of PIP paper

    avg_pip_bin[k] = np.sum(1860. / PIP_fib)

    DD_fib[k] = num_fib

    DD_par[k] = num_par

    wDD_angup[k] = num_par / DD_fib_PIP

write_table = True
示例#39
0
def binDist(s1, s2):
	return sum([popcount(int(x == y)) for x,y in zip(s1,s2)])
示例#40
0
 def get_energies(self, X):
     self._check_params(X)
     g, = X
     popcounts = [gmpy.popcount(i) for i in range(self.get_nstates())]
     return np.array([g if p in (0, self.d) else 0 for p in popcounts])
示例#41
0
    def hash_similarity(self, h1, h2):
        """Return approximate cosine similarity of given hashes."""

        return self._hd_to_cos[popcount(h1 ^ h2)]
示例#42
0
文件: wvlib.py 项目: fginter/wvlib
    def hash_similarity(self, h1, h2):
        """Return approximate cosine similarity of given hashes."""

        return self._hd_to_cos[popcount(h1^h2)]
def hammingWeight(vertex):
        return gmpy.popcount(vertex)
示例#44
0
 def hammingWeight(self):
     return gmpy.popcount(self.vertex)
示例#45
0
 def __init__(self,vertex=None, dimension=None):
    self.hw = gmpy.popcount(vertex)
    self.dimension=dimension
     pass
 else:
     ints = range(400)
     for n in ints:
         try:
             assert gmpy.digits(n, 2) == digits(n)
         except AssertionError:
             print 'digits fail %d' % n
             raise
         try:
             assert gmpy.numdigits(n, 2) == numdigits(n)
         except AssertionError:
             print 'numdigits fail %d' % n
             raise
         try:
             assert gmpy.popcount(n) == popcount(n)
         except AssertionError:
             print 'popcount fail %d' % n
             raise
     for n in list(ints):
         for i in ints:
             try:
                 assert gmpy.getbit(n, i) == getbit(n, i)
             except AssertionError:
                 print 'getbit fail %d, %d' % (n, i)
                 raise
             try:
                 assert gmpy.setbit(n, i) == setbit(n, i)
             except AssertionError:
                 print 'setbit fail %d', n
                 raise
示例#47
0
def popcount(a):
    return gmpy.popcount(a) #bin(a).count('1')
示例#48
0
    def potential(self):
#        return gmpy.popcount(self.vertex)/float(self.dimension)
        if (self.hammingWeight() != 2):
            return gmpy.popcount(self.vertex)/float(self.dimension)
        return float(1/math.sqrt(self.dimension))