Exemplo n.º 1
0
def vice(charset, n_chars, n_threads=int):
    rev_charset = reverse_charset(charset)
    charsets = [charset[i:] + charset[:i] for i in range(0, len(charset), int(len(charset)/n_threads))]
    rev_charsets = [rev_charset[i:] + rev_charset[:i] for i in range(0, len(rev_charset), int(len(rev_charset)/n_threads))]
    # charsets = charsets + rev_charsets
    # gens = [prod(charset, n_chars) for c_set in charsets]
    gens = [prod(charset, repeat=n_chars), prod(rev_charset, repeat=n_chars)]
    return gens
Exemplo n.º 2
0
def solve(limit):
    #Declare variables
    start = time.time()
    numLen = limit
    primes = [[] for x in range(10)]
    
    #Solve the problem
    #(by building candidates and testing for primeness)
    for d in range(0, 10):
        if d > 0: m = numLen - 1
        else:     m = numLen - 2
        digits = [i for i in range(0, 10) if i != d]		#get all non-d digits
        while primes[d] == []:
            for x in comb(range(0, numLen), numLen - m):
                if d == 0 and 0 not in x: continue		#Skip if the number starts with 0
                num = [d for i in range(0, numLen)]
                for y in prod(digits, repeat = numLen - m):
                    if x[0] == 0 and y[0] == 0: continue
                    z = 0
                    for k in x:
                        num[k] = y[z]
                        z += 1
                    numInt = int(''.join(map(str, num)))
                    if isPrime(numInt):
                        primes[d].append(numInt)
            m -= 1
	
    ans = str(sum(sum(x) for x in primes))
    
    #Print the results
    print 'For d = 0 to 9, the sum of all S(' + str(limit) + ', d) is ' + ans + '.'
    print 'This took ' + str(time.time() - start) + ' seconds to calculate.'
Exemplo n.º 3
0
def getBodyByPower(power):
    #get all possible terms without coeffecient with given power

    if power in getBodyByPowerRec:
        return getBodyByPowerRec[power]
    else:
        if power == 0:
            ret = [Poly(Term(1, []))]
        else:
            p = prod(
                getBodyByPower(power - 1),
                [
                    Poly(Term(1, [j == i for j in range(vary)]))
                    for i in range(vary)
                ]  # vary=3 -> [x, y, z]
            )

            ret = []
            for tup in p:
                if not (tup[0] * tup[1] in ret):
                    ret.append(tup[0] * tup[1])

        getBodyByPowerRec[power] = ret
        #record

        return ret
Exemplo n.º 4
0
    def assignBikes(self, workers: List[List[int]], bikes: List[List[int]]) -> \
    List[int]:
        def dist(w, b):
            return abs(workers[w][0] - bikes[b][0]) + abs(workers[w][1] -
                                                          bikes[b][1])

        n, m = len(workers), len(bikes)
        pairs = defaultdict(list)
        for w, b in prod(range(n), range(m)):
            d = dist(w, b)
            pairs[d].append((w, b))
        n = len(workers)
        ans = [None] * n
        used_w, used_b = [False] * n, [False] * m
        asg_w = 0
        for d in range(2000):
            for w, b in pairs[d]:
                if used_w[w] or used_b[b]:
                    continue
                ans[w] = b
                used_w[w] = True
                used_b[b] = True
                asg_w += 1
            if asg_w == n:
                break
        return ans
Exemplo n.º 5
0
def catspace(rank, dim):
    """Makes a categorical space of the discrete
    combinations of rank each holding dim elements

    USAGE
    -----
    > catspace(3, 2)
    > [(0, 0, 0), (1, 0, 0), (0, 1, 0), (1, 1, 0),
       (0, 0, 1), (1, 0, 1), (0, 1, 0), (1, 1, 0)]

    PARAMETERS
    ----------
    * rank: int
    The length of the tuples ( width )

    * dim: int
    The number of elements in each position of
    the tuple ( depth )

    RETURNS
    -------
    * space: list of tuples
    Each tuple is of length rank, and each
    tuple element is an integer in 0...dim
    """
    return [
        tuple(e)
        for e in prod(range(dim), repeat=rank)]
Exemplo n.º 6
0
    def min_distances(self, r_1_components, r_2_components):

        min_separation_distances = []

        for relation_1, relation_2 in prod(r_1_components, r_2_components):
            min_separation_distance = self.d_min_separation(
                relation_1, relation_2)
            min_separation_distances.append(min_separation_distance)

        dim_1 = min(len(r_1_components), len(r_2_components))
        dim_2 = max(len(r_1_components), len(r_2_components))

        min_separation_distances = np.array(min_separation_distances,
                                            dtype=np.float32).reshape(
                                                (dim_1, dim_2))

        min_dists_per_row = np.argmin(min_separation_distances, axis=1)
        n = 0
        min_dist_sum = 0
        while n < dim_1:
            min_dist_sum = min_dist_sum + min_separation_distances[
                n, min_dists_per_row[n]]
            min_separation_distances[:, min_dists_per_row[n]] = np.inf
            min_dists_per_row = np.argmin(min_separation_distances, axis=1)
            n = n + 1

        return min_dist_sum / n
Exemplo n.º 7
0
def InitializeInterTimestamp(timestamp):  # Fast learning / detection
    ''' Generate Inter-timestamp Correlation  '''
    ''' Investigates patterns associated with samples from one time group to the next   '''

    # Initialize Inter's Timestamp History list
    if len(normalProfile["Inter"]["hist"]) == 0:
        # need two timestamps, so return on only one
        normalProfile["Inter"]["hist"] = [timestamp]
        return

    # Generate the differences between all dbm samples from one timestamp to the next
    diff = [
        a - b for a, b in prod(normalProfile["Inter"]["hist"][-1], timestamp)
    ]

    # hold the history of all differences in indecies
    for i in range(len(diff)):
        if len(normalProfile["Inter"]["diff"]) < len(diff):
            normalProfile["Inter"]["diff"][i] = [diff[i]]
            normalProfile["Inter"]["score"][i] = 0
        else:
            normalProfile["Inter"]["diff"][i].append(diff[i])

    # Store timestamp for next iteration (and other model uses)
    normalProfile["Inter"]["hist"].append(timestamp)

    return
Exemplo n.º 8
0
 def infer_mutation_equation(self, data_point, symbolic_mutation_rate):
     mutation_equation = 0
     for slots in prod(*[
             list(
                 combi_with_replacement(
                     self.mutable_nodes_by_mutype[mutype],
                     data_point[mutype])) for mutype in MUTYPES
     ]):
         mutypes_by_idx = defaultdict(list)
         for mutype, slot in zip(MUTYPES, slots):
             if slot:
                 for idx in list(slot):
                     mutypes_by_idx[idx].append(mutype)
         mutation_part = 1
         for node_idx, mutypes in mutypes_by_idx.items():
             mutype_counter = Counter(mutypes)
             node_multinomial = multinomial(mutype_counter.values())
             mutation_part *= node_multinomial
             nodeObj = self.nodeObjs[node_idx]
             denominator = self.path_denominators[node_idx]
             for mutype in mutypes:
                 mutation_part *= ((nodeObj.mutype_counter[mutype] *
                                    symbolic_mutation_rate[mutype]) /
                                   denominator)
         mutation_equation += mutation_part
     return mutation_equation
Exemplo n.º 9
0
    def assignBikes(self, workers: List[List[int]], bikes: List[List[int]]) -> \
    List[int]:
        pairs = []

        def dist(w, b):
            return abs(workers[w][0] - bikes[b][0]) + abs(
                workers[w][1] - bikes[b][1])

        n, m = len(workers), len(bikes)
        for w, b in prod(range(n), range(m)):
            key = (dist(w, b), w, b)
            pairs.append((key, w, b))
        pairs.sort()
        n = len(workers)
        ans = [None] * n
        used_w, used_b = [False] * n, [False] * m
        asg_w = 0
        for _, w, b in pairs:
            if used_w[w] or used_b[b]:
                continue
            ans[w] = b
            used_w[w] = True
            used_b[b] = True
            asg_w += 1
        return ans
Exemplo n.º 10
0
def getPolyByMesh(mesh, power):
    #generator

    body = getBodyByPowerAbove(power)
    #get body

    p = prod(*([mesh] * len(body)))
    #mesh=[-1,0,1] len(body)=4 -> [(-1,-1,-1,-1), (-1,-1,-1,0), (-1,-1,-1,1), (-1,-1,0,-1), (-1,-1,0,0), ..., (1,1,1,1)]

    ret = []
    loader = 0
    loaderMax = len(mesh)**(
        (3**(power + 1) - 1) // 2)  #mesh_length^[(3^(power+1)-1)/2]

    for tup in p:
        #tup: (-1,-1,-1,-1) -> (-1,-1,-1,0) -> (-1,-1,-1,1) -> ...
        s = Poly(Term(0, []))
        for i in range(len(body)):

            s += tup[i] * body[i]
            #e.g.
            #tup[i]: -1 -> 1 -> 0 -> -1  body[i]: x -> y -> z -> (const)
            #tup[i]*body[i]: -x -> y -> 0 -> -1

            #s: -x+y-1

        loader += 1
        #polynomial generated

        if loader % 1000 == 0:
            print("[*] Big F progress:{this}/{all_}".format(
                this=loader // 1000, all_=loaderMax // 1000 + 1))
            #log progress

        yield s
Exemplo n.º 11
0
    def cal_sim_user(self, u1, u2):
        products1 = u1['products']
        products2 = u2['products']
        product_id1 = {p['id'] for p in products1}
        product_id2 = {p['id'] for p in products2}

        common_ids = product_id1 & product_id2
        union_ids = product_id1 | product_id2
        diff_u1u2 = [p for k in product_id1 - product_id2 for p in products1 if p['id'] == k]
        diff_u2u1 = [p for k in product_id2 - product_id1 for p in products2 if p['id'] == k]

        # The ratio of the length of common products on all union products.
        intersection_factor = len(common_ids) / len(union_ids)

        # After calculating the intersection factor we will calculate the similarity factor
        # Which is mean of the medians of the similarities of each product related to user_1
        # to each product related to user_2
        combination = prod(products1, products2)

        d = defaultdict(list)
        for p1, p2 in combination:
            d[p1['id']].append(self.get_product_sim(p1, p2))
        medians = map(median, d.values())
        similarity_factor = mean(medians)

        return mean((similarity_factor, intersection_factor)), diff_u1u2, diff_u2u1
Exemplo n.º 12
0
    def assertSubgrid(self, grid, subgrid, ijk_bound):
        sijk_space = prod(*[range(d) for d in subgrid.getDims()[:-1:]])
        for sijk in sijk_space:
            gijk = tuple([a + b for a, b in zip(sijk, zip(*ijk_bound)[0])])

            self.assertEqual(
                [subgrid.getCellCorner(i, ijk=sijk) for i in range(8)],
                [grid.getCellCorner(i, ijk=gijk) for i in range(8)])

            self.assertEqual(grid.active(ijk=gijk), subgrid.active(ijk=sijk))
 def countryBreakDown(self, country):
     #Data processing is getting more difficult than I though. Let's hope that they stop changing the data format as the day goes on.
     df_df = {
         d: self.df[d][self.df[d][(
             self.c_format & set(self.df[d].columns)).pop()] == country][[
                 (self.s_format & set(self.df[d].columns)).pop(),
                 sorted(self.coords_format & set(self.df[d].columns))[-1],
                 sorted(self.coords_format & set(self.df[d].columns))[0],
                 'Confirmed', 'Recovered', 'Deaths'
             ]].set_index((self.s_format & set(self.df[d].columns)).pop())
         for d in self.cDates
     }
     #Add multi-Index
     for k, v in df_df.items():
         coords = list(prod(['Coordinates'], v.columns[:2]))
         coords.extend(list(prod([k], v.columns[2:])))
         v.columns = pd.MultiIndex.from_tuples(coords)
         v.index.names = ['State/Province'
                          ]  #Rename index level for consistency
     return df_df
Exemplo n.º 14
0
    def assertSubgrid(self, grid, subgrid, ijk_bound):
        sijk_space = prod(*[range(d) for d in subgrid.getDims()[:-1:]])
        for sijk in sijk_space:
            gijk = tuple([a + b for a, b in
                         zip(sijk, list(zip(*ijk_bound))[0])])

            self.assertEqual(
                [subgrid.getCellCorner(i, ijk=sijk) for i in range(8)],
                [grid.getCellCorner(i, ijk=gijk) for i in range(8)]
                )

            self.assertEqual(grid.active(ijk=gijk), subgrid.active(ijk=sijk))
def get_correlation_matrix(saccades, vars, delays, type):
    ''' Returns Correlation, p-value, labels.
    
        type can be 'pearson', 'spearman', 
     '''
    maximum_delay = numpy.max(delays)    
    
    labels = []
    xs = []
    for delay in delays:
        for i, var in enumerate(vars):
            label = "%s[%+d]" % (var.letter, -delay) if delay > 0 else \
                var.letter + ''
            labels.append(label)
            
            x = saccades[var.field]
            T = len(x)
            
            x_delayed = x[delay:]
            target_length = T - maximum_delay
            if len(x_delayed) > target_length:
                # keep the last
                x_cut = x_delayed[0:target_length]
            else:
                x_cut = x_delayed
                
            # if delay != 0:
            #    assert x_cut[0] != x[0]
            
            #print var.field, x[:10]
            #print "delayed", delay, x_cut[:10]
            
            assert len(x_cut) == target_length
            
            xs.append(x_cut)
    
    N = len(xs)
    R = numpy.ndarray((N, N), dtype='float64')
    P = numpy.ndarray((N, N), dtype='float64')
    
    types = {'kendall': fast_kendall_tau,
             'pearson': scipy.stats.pearsonr,
             'spearman': scipy.stats.spearmanr}
    if not type in types:
        raise ValueError('Unknown correlation type "%s". Try: %s' % 
                         (type, types.keys()))
    function = types[type]

    for i, j in prod(range(N), range(N)):
        r, p = function(xs[i], xs[j])
        R[i, j] = r
        P[i, j] = p
    return R, P, labels
def divisors(m):
    factors = rho_factor(m)
    primes = list(factors)
    lists = [list(range(factors[x] + 1)) for x in primes]
    exponents = prod(*lists)
    div_set = set()
    for exp in exponents:
        divisor = 1
        for e in range(len(exp)):
            divisor *= primes[e] ** exp[e]
        div_set.add(divisor)
    return div_set
Exemplo n.º 17
0
Arquivo: probs.py Projeto: DRL/gIMble
 def generate_data_space(self):
     start_time = timer()
     print("[+] Generating base data points (based on MAX_MUTYPES: %s) ..." % (", ".join(["%s=%s" % (mutype, self.max_by_mutype[mutype]) for mutype in MUTYPES])))
     # works only for equal max_mutypes ...
     for i, data_point in enumerate(prod(*[range(0, self.max_by_mutype[mutype] + 1) for mutype in MUTYPES])):
         counter = Counter()
         for mutype, count in zip(MUTYPES, data_point):
             counter[mutype] = count
         # Four-gamete-test : exclude data points w/ counter['fixed'] > 0 AND counter['hetAB'] > 0
         if not (counter['fixed'] > 0 and counter['hetAB'] > 0):
             self.data.append(counter)
     print("[=] Generated %s base data points in %s seconds." % (len(self.data), timer() - start_time))
Exemplo n.º 18
0
def euler87():
    upper = 50 * 10 ** 6
    squares = [x ** 2 for x in primes_gen(int(upper ** (1.0 / 2)))]
    cubes = [x ** 3 for x in primes_gen(int(upper ** (1.0 / 3)))]
    quads = [x ** 4 for x in primes_gen(int(upper ** (1.0 / 4)))]

    ret = set()
    for a, b, c in itertools.prod(squares, cubes, quads):
        res = a + b + c
        if res < upper:
            ret.add(res)
    return len(ret)
Exemplo n.º 19
0
def create_model_matching(A, r, types, resources, B, Z):
    pairs = list(prod(resources, types))
    mod = Model("LP")
    mod.setParam('OutputFlag', False)  #No output message
    x = mod.addVars(pairs, name="x")
    mod.setObjective(sum(r[i, j] * A[i, j] * x[i, j] for (i, j) in pairs),
                     GRB.MAXIMIZE)
    bud_const = mod.addConstrs(
        (quicksum(x[i, j] for j in types) <= B[i] for i in resources))
    dem_const = mod.addConstrs(
        (quicksum(x[i, j] for i in resources) <= Z[j] for j in types))

    return mod, x, bud_const, dem_const
Exemplo n.º 20
0
    def vals_generator(domain, variables):
        def rotator(listed):
            rotations = [listed]
            i = 0
            while i < len(listed) - 1:
                listed = listed[1:] + listed[:1]
                rotations.append(listed)
                i += 1
            return rotations

        combos = prod(rotator(domain), repeat=len(variables))
        combolist = [list(ele) for ele in combos]

        return combolist
Exemplo n.º 21
0
def recurse(arranged):
    sets = dict()

    for i in range(1, 10):
        temp_set = {(n, ) for n in arranged[i]}
        for j in range(1, i / 2 + 1):
            k = i - j
            for a, b in prod(sets[j], sets[k]):
                flat = a + b
                if len(set(''.join(flat))) == i:
                    temp_set.add(tuple(sorted(flat)))
        sets[i] = temp_set

    return sets
Exemplo n.º 22
0
    def solve_all(self) -> tuple:
        curr_length = 0
        for l in range(1, len(self.numbers) + 1):
            for subset in comb(self.numbers, l):
                for each_possible in perm(subset):

                    if curr_length != len(each_possible):
                        curr_length = len(each_possible)
                        signs_permutations = tuple(
                            prod(self.signs, repeat=curr_length - 1))

                    for answer in self._solve(each_possible,
                                              signs_permutations):
                        yield answer
Exemplo n.º 23
0
def get_inputPatterns(units, dimensions):
    
    base = np.zeros(units)
    base[0] = 1.0
    
    base_perm = copy.copy(base)
        
    for i in range(1, len(base)):
        base_perm = np.vstack((base_perm, np.roll(base, i)))
    
    patterns = np.array(list(prod(base_perm, repeat = dimensions)))    
    
    patterns = patterns.reshape(patterns.shape[0], -1)
    
    return patterns
Exemplo n.º 24
0
def gen_kmers(kmin, kmax, alphabet="ACGT"):
    """
    generates possible k-mers of range(kmin, kmax + 1)

    :param kmin: int, minimum kmer length
    :param kmax: int, maximum kmer length
    :param alphabet: str, accepted sequence alphabet for DNA, RNA, Amino Acids

    :return list of str, possible kmers
    """

    for n in xrange(kmin, kmax + 1):
        kmer_lis = [''.join(mer) for mer in prod(alphabet, repeat=n)]

    return kmer_lis
Exemplo n.º 25
0
 def judgePoint24(self, nums: List[int]) -> bool:
     ops = [add, sub, mul, div]
     evals = [
         self.evalType1, self.evalType2, self.evalType3, self.evalType4
     ]
     for p in perm(nums):
         for q in prod(ops, ops, ops):
             for ev in evals:
                 try:
                     if abs(ev(p, q) - 24) < EPS:
                         return True
                 except ZeroDivisionError:
                     None
                 except:
                     raise
     return False
Exemplo n.º 26
0
    def integration_result(self):
        """Generate the list of all possible integration result
        and the associated parameter set
        """
        if self.verbose:
            print "Computing synaptic integration \n"
        ipt = np.rot90(self.ipt)
        w_min = self.w_min
        w_max = self.w_max
        n = self.n
        order = self.order

        # Generate all possible set of vectors
        if order:
            self.weights = np.array([i for i in prod(range(w_min, w_max + 1), repeat=n)], dtype=np.int)
        else:
            self.weights = np.array([i for i in comb(range(w_min, w_max + 1), n)], dtype=np.int)
        self.integration = np.dot(self.weights, ipt)
Exemplo n.º 27
0
def counts_pseudo(kmer_count, order, alphabet, start_char, stop_char,
    pseudo):
    """
    counts all observed kmers (counted in training data) and possible kmers
    (incremented by psedocount), adds intersection as in a dict of dicts

    :param kmer_count: dict, kmer counts as {'kmer':int}
    :param order: int, context length for Markov chain
    :param alphabet: str, accepted sequence alphabet for DNA, RNA, Amino Acids
    :param start_char: str, single start char defaulting to '^'
    :param stop_char: str, single stop char defaulting to '$'
    :param pseudo: float or int, non-negative pseudocount value
    :return: dict of dict, context counts as{'context':{'finalchar':int}}
    """

    # tracks every final letter of each kmer
    char_count = {}
    for letter in alphabet + stop_char:  # creates pseudocounts from alphabet
        char_count[letter] = pseudo

    # tracks contexts of each final character
    contexts = {}

    # cartesian product of all possible kmers
    if order >= 0:
        for start_pos in range(order + 1):
            for mer in prod(alphabet, repeat=order - start_pos):
                contexts[start_char * start_pos + ''.join(mer)] = dc(
                    char_count)

    for kmer, freq in kmer_count.items():
        # prevents stop_char in context or start_char last (invalid seqs)
        if((order and kmer[0:-1][-1] != stop_char and kmer[-1] !=
            start_char) or order == 0):
            if kmer[0:-1] not in contexts.keys():
                raise Exception(
                    "K-mer context {} contains letters not in alphabet!"
                    .format(kmer[0:-1]))
            if kmer[-1] not in contexts[kmer[0:-1]].keys():
                raise Exception("K-mer contains {} but is not in alphabet!"
                    .format(kmer[-1]))
            contexts[kmer[0:-1]][kmer[-1]] += freq

    return contexts
Exemplo n.º 28
0
    async def _solve(self, nums: tuple, permut_tmp: tuple) -> tuple:
        for each in permut_tmp:
            permutations = set()
            eqs = "".join([
                str(x) + str(y if y is not None else "")
                for x, y in [temp for temp in z(nums, each)]
            ])
            try:
                if hash(eqs) not in permutations:
                    permutations.add(hash(eqs))
                    yield (eqs, eval(eqs))
            except GeneratorExit:
                return
            except ZeroDivisionError:
                pass

            m = self.regex.search(eqs)
            if m:
                r = self.regex.sub(r' (\1\3) ', eqs)
                spl = [([x[1:-1], x] if '(' in x else x) for x in r.split()]
                count = sum(isinstance(i, list) for i in spl)
                for binar in prod([0, 1], repeat=count):
                    temp_r = ''
                    counter = 0
                    for rep in spl:
                        if isinstance(rep, list):
                            state = binar[counter]
                            temp_r += rep[state]
                            counter += 1
                        else:
                            temp_r += rep
                    try:
                        if hash(temp_r) not in permutations:
                            permutations.add(hash(temp_r))
                            yield (temp_r, eval(temp_r))
                    except GeneratorExit:
                        return
                    except ZeroDivisionError:
                        pass
def get_inputPatterns(units, dimensions, sc):

    base = np.zeros((1, units))
    base[0, 0] = 1.0

    if sc:

        scale = np.arange(0.25, 1, 0.25)

        base = np.reshape(scale, (-1, 1)) @ np.reshape(base, (1, -1))

    ## Add a second loop for scale dimension

    base_perm = copy.copy(base)

    for i in range(1, units):
        base_perm = np.vstack((base_perm, np.roll(base, i, axis=1)))

    patterns = np.array(list(prod(base_perm, repeat=dimensions)))

    patterns = patterns.reshape(patterns.shape[0], -1)

    return patterns
def get_inputPatterns(units, dimensions):
    
    # generate a base vector with a single node active
    
    base = np.zeros((1, units))
    base[0, 0] = 1.0
    
    base_perm = copy.copy(base)   # make a copy as to not modify the base vector
    
    
    # roll the vector to get all possible nodes active in a single input dimension
    
    for i in range(1, units):
        base_perm = np.vstack((base_perm, np.roll(base, i, axis = 1)))
    
    
    # get all possible combinations of input dimensions
    
    patterns = np.array(list(prod(base_perm, repeat = dimensions)))  
    
    patterns = patterns.reshape(patterns.shape[0], -1)
    
    return patterns
Exemplo n.º 31
0
    def isInCFL(self, st):
        self.table = np.empty((len(st), len(st)), dtype=object)
        for i in range(len(st)):
            l = []
            for j in range(len(self.g)):
                if st[i] == self.g[j][1]:
                    l.append(self.g[j][0])
            self.table[0][i] = l

        for i in range(1, len(st)):
            for j in range(0, len(st) - i):
                l = []
                for k in range(0, i):
                    self.p = prod(self.table[k][j],
                                  self.table[i - 1 - k][j + k + 1])
                    for q in self.p:
                        for z in range(len(self.g)):
                            if ''.join(q) == self.g[z][1]:
                                l.append(self.g[z][0])
                self.table[i][j] = l

        output = self.check_final(st)
        return output
Exemplo n.º 32
0
from itertools import product as prod
from random import randint as rand

combos = [list(prod(range(1,7), repeat=r)) for r in range(2,6)]

wins = 0
for combo in combos[0]:
  if combo[0] > combo[1]:
    wins += 1
print "1 on 1 : ", wins, len(combos[0]), 1.0 * wins / len(combos[0])

wins = 0
for combo in combos[1]:
  if combo[0] > combo[2]:
    wins += 1
  elif combo[1] > combo[2]:
    wins += 1
print "2 on 1 : ", wins, len(combos[1]), 1.0 * wins / len(combos[1])

wins = 0
for combo in combos[2]:
  if combo[0] > combo[3]:
    wins += 1
  elif combo[1] > combo[3]:
    wins += 1
  elif combo[2] > combo[3]:
    wins += 1
print "3 on 1 : ", wins, len(combos[2]), 1.0 * wins / len(combos[2])


wins = 0
def add_comparisons(all_experiments, outdir): 
        
    r = Report('comparisons')

    fmain = r.figure('summary', cols=3,
                     caption="Summary of projection analysis")

    n = r.node('odds analysis')
    fodds_real = n.figure('odds_real', cols=3,
                     caption="Summary of odds analysis (real)")
    fodds_hall = n.figure('odds_hall', cols=3,
                     caption="Summary of odds analysis (hall)")
    fodds_rel = n.figure('rel', cols=3, caption="Relative odds")

    generic = r.node('generic')
    if 1:
        key = ('posts', 'contrast_w')
        real_traj = all_experiments[key]
        key = ('noposts', 'hcontrast_w')
        hall_traj = all_experiments[key]
        diff = real_traj.mean - hall_traj.mean
        
        max_value = numpy.max(numpy.max(real_traj.mean),
                              numpy.max(hall_traj.mean))
        add_scaled(generic, 'real_traj', real_traj.mean, max_value=max_value)
        add_scaled(generic, 'hall_traj', hall_traj.mean, max_value=max_value)
        add_posneg(generic, 'diff_traj', diff)
        with generic.data_pylab('diff_traj_plot') as pylab:
            pylab.plot(real_traj.mean, 'b.', label='real')
            pylab.plot(hall_traj.mean, 'r.', label='hallucination')
        
        
        f = generic.figure(cols=2)
        f.sub('real_traj', caption='signal over all trajectory (real)')
        f.sub('hall_traj', caption='signal over all trajectory (hallucinated)')
        f.sub('diff_traj_plot', caption='real - hallucination')
        f.sub('diff_traj', caption='real - hallucination')
        
    for view, dir, saccades_set in \
        prod(['start' ],
             ['left', 'right', 'alldir'],
             ['allsac', 'center', 'border']):
        # statistics over the whole trajectory
        
        
        key = Exp(image='contrast_w', group='posts',
                  view=view, dir=dir, saccades_set=saccades_set)
        real = all_experiments[key]
        
        key = Exp(image='hcontrast_w', group='noposts',
                  view=view, dir=dir, saccades_set=saccades_set)
        hall = all_experiments[key]
         
        
        case = r.node('analysis_%s_%s_%s' % (view, dir, saccades_set))
        
        cased = " (%s, %s, %s)" % (dir, view, saccades_set)
        
        diff = real.mean - hall.mean

        max_value = numpy.max(numpy.max(real.mean),
                              numpy.max(hall.mean))
        add_scaled(case, 'real', real.mean, max_value=max_value)
        add_scaled(case, 'hall', hall.mean, max_value=max_value)
        add_posneg(case, 'diff', diff)
    
    
        max_value = numpy.max(numpy.max(real.var),
                              numpy.max(hall.var))
        add_scaled(case, 'real_var', real.var, max_value=max_value)
        add_scaled(case, 'hall_var', hall.var, max_value=max_value)
    
        
        real_minus_traj = real.mean - real_traj.mean
        hall_minus_traj = hall.mean - hall_traj.mean
        rmt_minus_hmt = numpy.maximum(0, real_minus_traj) - \
                        numpy.maximum(0, hall_minus_traj)
        M = numpy.max(numpy.abs(real_minus_traj).max(),
                      numpy.abs(hall_minus_traj).max())
        add_posneg(case, 'real_minus_traj', real_minus_traj, max_value=M)
        add_posneg(case, 'hall_minus_traj', hall_minus_traj, max_value=M)
        add_posneg(case, 'rmt_minus_hmt', rmt_minus_hmt)
        
        from scipy import polyfit
        (ar, br) = polyfit(real.mean, hall.mean, 1)
        case.text('case', 'Best linear fit: a = %f, b = %f.' % (ar, br))
        
        diffr = (ar * real.mean + br) - hall.mean
        add_posneg(case, 'diffr', diffr)
        
        diffn = diff / numpy.sqrt(hall.var)
        add_posneg(case, 'diffn', diffn)
        
        with case.data_pylab('diffn_plot') as pylab:
            pylab.plot(diffn, 'k.')
        
        with case.data_pylab('linear-fit') as pylab:
            pylab.plot(real.mean, hall.mean, '.')
            pylab.plot(real.mean, real.mean, 'k.')
            
            pylab.plot(real.mean, real.mean * ar + br, 'r.')
            pylab.axis('equal')

        with case.data_pylab('mean') as pylab:
            x = range(len(real.mean))
            pylab.plot(x, real.mean, 'b.', label='real')
            pylab.plot(x, hall.mean, 'r.', label='hallucinated')
            #yerr = numpy.sqrt(var_cwnd) * 2
            #pylab.errorbar(x, cwnd, yerr=yerr, color='k', label='hallucinated')
            pylab.legend()
            pylab.title('mean')

        with case.data_pylab('var') as pylab:
            x = range(len(real.var))
            pylab.plot(x, numpy.sqrt(real.var) * 2, 'b.', label='real')
            pylab.plot(x, numpy.sqrt(hall.var) * 2, 'r.', label='hallucinated')
            pylab.legend()
            pylab.title('variance')

    
        f = case.figure(cols=2)
        f.sub('real', caption='real response (mean)' + cased)
        f.sub('hall', caption='hallucinated response (mean)' + cased)
        f.sub('real_var', caption='real response (var)' + cased)
        f.sub('hall_var', caption='hallucinated response (var)' + cased)
        f.sub('mean', caption='mean comparison' + cased)
        f.sub('var', caption='variance comparison' + cased)
        f.sub('linear-fit', caption='Linear fit between the two (a=%f, b=%f)' 
              % (ar, br))
        f.sub('diff', caption='difference (real - hallucinated)' + cased)
        f.sub('diffr', caption='difference (real*a- hallucinated)' + cased)
        f.sub('diffn', caption='Normalized difference' + cased)
        f.sub('diffn_plot', caption='Normalized difference' + cased)
        f.sub('diffn_plot', caption='Normalized difference' + cased)
        
        f.sub('real_minus_traj',
              'Difference between saccading and trajectory (real)' + cased)
        f.sub('hall_minus_traj',
              'Difference between saccading and trajectory (hall)' + cased)
        f.sub('rmt_minus_hmt', 'diff-diff')
        
        
        proj, err, alpha = compute_projection(real.mean, hall.mean)
        
        add_scaled(case, 'proj', proj)
        money = add_posneg(case, 'err', err)
        case.text('stats', 'alpha = %f' % alpha)
        
        f = case.figure(cols=2)
        
        f.sub('proj', caption='projection' + cased)
        f.sub('err', caption='error' + cased)
        
        fmain.sub(money, caption='mismatch' + cased)
       
       
        f = case.figure(cols=2, caption="relative odds")
        
        real_ratio = numpy.log(real.mean / real_traj.mean)
        hall_ratio = numpy.log(hall.mean / hall_traj.mean)
        rel = real_ratio - hall_ratio
        
        a = add_posneg(case, 'real_ratio', (real_ratio))
        b = add_posneg(case, 'hall_ratio', (hall_ratio))
        c = add_posneg(case, 'rel', rel)
        
        f.sub('real_ratio', caption='real relative' + cased)
        f.sub('hall_ratio', caption='hall relative' + cased)
        f.sub('rel')
        
        fodds_real.sub(a, cased)
        fodds_hall.sub(b, cased)
        fodds_rel.sub(c, cased)
        
    output_file = os.path.join(outdir, '%s.html' % r.id)
    resources_dir = os.path.join(outdir, 'images')
    r.to_html(output_file, resources_dir=resources_dir) 
Exemplo n.º 34
0
def product(*vars):
    return map(''.join, prod(*vars))
Exemplo n.º 35
0
def main():
    parser = OptionParser(usage=description)
    parser.add_option("--flydra_db", default="saccade_data_flydradb", help="Main data directory")

    parser.add_option(
        "--interactive", action="store_true", default=False, help="Starts an interactive compmake session."
    )

    parser.add_option("--report", default="saccade_report", help="Saccade report directory")

    parser.add_option("--groups", default=None, help="Which groups to consider")

    parser.add_option("--configurations", default=None, help="Which configurations to consider")

    parser.add_option("--combid", default=None, help="How to name this combination of groups/configs.")

    (options, args) = parser.parse_args()  # @UnusedVariable
    if args:
        raise Exception("Spurious arguments %r." % args)

    db = FlydraDB(options.flydra_db)

    robust_split = lambda s: filter(lambda x: x, s.split(","))

    if not options.groups in [None, "all"]:
        groups = robust_split(options.groups)
        if not groups:
            raise Exception("No groups specified.")
        groupset = "_".join(groups)
    else:
        groups = db.list_groups()
        groupset = "all"
        if not groups:
            raise Exception("No groups found.")

    if not options.configurations in [None, "all"]:
        configurations = robust_split(options.configurations)
        if not configurations:
            raise Exception("No configuration specified")
        confset = "_".join(configurations)
    else:
        configurations = db.list_all_versions_for_table(SACCADES_TABLE)
        confset = "all"

        configurations = set()
        for group in groups:
            configurations.update(db.list_versions_for_table_in_group(group, SACCADES_TABLE))
        configurations = natsorted(configurations)

        if not configurations:
            raise Exception("No valid versions of table %r found." % SACCADES_TABLE)

    print ("I will consider the configurations: %r" % configurations)

    if options.combid is None:
        combination = "%s_%s" % (groupset, confset)
    else:
        combination = options.combid
    print ("I call this combination %r." % combination)

    output_dir = os.path.join(options.report, combination)
    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
    set_namespace("master_plot_%s" % combination)

    # we maintain several indices

    # key = (group, configuration, plot)
    index_group_plots = {}
    # key = (sample, plot)
    index_sample_expdata_plots = {}
    # key = (sample, configuration, plot)
    index_sample_saccades_plots = {}

    # First we index the DB
    print ("Looking for data in database...")
    all_samples = set()
    group2samples = {}
    configurations_for_group = {}
    group_has_exp_data = {}
    for group in groups:
        group2samples[group] = db.list_samples_for_group(group)
        all_samples.update(group2samples[group])

        if not group2samples[group]:
            raise Exception("Empty group %r." % group)

        available = db.list_versions_for_table_in_group(group=group, table=SACCADES_TABLE)
        configurations_for_group[group] = set(configurations).intersection(available)

        if not configurations_for_group[group]:
            print ("No configurations found for %r; available %r" % (group, available))

        group_has_exp_data[group] = db.group_has_table(group, EXP_DATA_TABLE)
    all_samples = natsorted(all_samples)

    # print info
    print ("Summary:")
    for group in groups:
        msg = "  group {group:>20}  samples: {nsamples:3} " " configurations: {nconf:3} raw data? {data}".format(
            group=group,
            nsamples=len(group2samples[group]),
            nconf=len(configurations_for_group[group]),
            data=group_has_exp_data[group],
        )
        print (msg)

    # TODO: iterate by sample, not by group
    for group in groups:

        for configuration in configurations_for_group[group]:

            for plot in group_plots:
                job_id = "%s-%s-%s" % (group, configuration, plot.id)

                index_group_plots[(group, configuration, plot.id)] = comp(
                    wrap_group_plot, options.flydra_db, group, configuration, plot.command, plot.args, job_id=job_id
                )

            for sample, plot in prod(group2samples[group], sample_saccades_plots):
                job_id = "%s-%s-%s" % (sample, configuration, plot.id)
                key = (sample, configuration, plot.id)
                if key in index_sample_saccades_plots:
                    # we already did it as part of another group
                    continue
                index_sample_saccades_plots[key] = comp(
                    wrap_sample_saccades_plot,
                    options.flydra_db,
                    sample,
                    configuration,
                    plot.command,
                    plot.args,
                    job_id=job_id,
                )

        if group_has_exp_data[group]:
            for sample, plot in prod(group2samples[group], sample_expdata_plots):
                job_id = "%s-%s" % (sample, plot.id)
                key = (sample, plot.id)
                if key in index_sample_expdata_plots:
                    # we already did it as part of another group
                    continue
                index_sample_expdata_plots[key] = comp(
                    wrap_sample_expdata_plot, options.flydra_db, sample, plot.command, plot.args, job_id=job_id
                )

    # now we create the indices
    # fix configuration, function; iterate groups
    for configuration, plot in itertools.product(configurations, group_plots):
        subs = []
        descs = []

        page_id = "%s.%s" % (configuration, plot.id)

        for group, group_desc in order_groups(groups):
            if not configuration in configurations_for_group[group]:
                continue

            descs.append(group_desc)
            subs.append(index_group_plots[(group, configuration, plot.id)])

        if not subs:
            raise Exception("no groups for configuration %r." % configuration)

        job_id = page_id
        comp(combine_reports, subs, descs, page_id, output_dir, job_id=job_id)

    comp(
        create_gui,
        filename=os.path.join(output_dir, "group_plots.html"),
        menus=[
            ("Detector", configurations, configurations),
            ("Plot/table", map(lambda x: x.id, group_plots), map(lambda x: x.description, group_plots)),
        ],
        job_id="gui-group_plots",
    )

    # fix group, function; iterate samples
    for group in groups:
        if not group_has_exp_data[group]:
            continue

        for plot in sample_expdata_plots:
            subs = []
            descs = []
            for sample in group2samples[group]:
                descs.append(sample)
                subs.append(index_sample_expdata_plots[(sample, plot.id)])

            page_id = "%s.%s" % (group, plot.id)

            job_id = page_id
            comp(combine_reports, subs, descs, page_id, output_dir, job_id=job_id)

    # get the ordered group lists and desc
    ordered_groups = map(lambda t: t[0], order_groups(groups))
    ordered_groups_desc = map(lambda t: t[1], order_groups(groups))

    comp(
        create_gui,
        filename=os.path.join(output_dir, "expdata_plots.html"),
        menus=[
            ("Group", ordered_groups, ordered_groups_desc),
            (
                "Plot/table",
                map(lambda x: x.id, sample_expdata_plots),
                map(lambda x: x.description, sample_expdata_plots),
            ),
        ],
        job_id="gui-expdata_plots",
    )

    # fix configuration, group, function; iterate samples
    for group in groups:

        for configuration in configurations:

            if not configuration in configurations_for_group[group]:
                for plot in sample_saccades_plots:
                    page_id = "%s.%s.%s" % (configuration, group, plot.id)
                    comp(
                        write_empty,
                        page_id,
                        output_dir,
                        "Group %r has not been processed with algorithm %r." % (group, configuration),
                        job_id=page_id,
                    )
                continue

            for plot in sample_saccades_plots:

                subs = []
                descs = []
                for sample in group2samples[group]:
                    descs.append(sample)
                    r = index_sample_saccades_plots[(sample, configuration, plot.id)]
                    subs.append(r)

                page_id = "%s.%s.%s" % (configuration, group, plot.id)

                job_id = page_id
                comp(combine_reports, subs, descs, page_id, output_dir, job_id=job_id)

    comp(
        create_gui,
        filename=os.path.join(output_dir, "saccade_plots.html"),
        menus=[
            ("Detector", configurations, configurations),
            ("Group", ordered_groups, ordered_groups_desc),
            (
                "Plot/table",
                map(lambda x: x.id, sample_saccades_plots),
                map(lambda x: x.description, sample_saccades_plots),
            ),
        ],
        job_id="gui-saccade_plots",
    )

    # fix configuration, sample; plot fullsscreen

    for group in groups:
        for configuration in configurations:

            for sample in group2samples[group]:

                # XXX make it clenaer
                if not configuration in configurations_for_group[group]:
                    for plot in sample_fullscreen_plots:
                        page_id = "%s.%s.%s" % (sample, configuration, plot.id)
                        comp(
                            write_empty,
                            page_id,
                            output_dir,
                            'Group %s has not been processed with algorithm "%s".' % (group, configuration),
                            job_id=page_id,
                        )
                    # print "skipping sample %s group %s config %s" %\
                    #     (sample,group, configuration)
                    continue

                if not group_has_exp_data[group]:
                    for plot in sample_fullscreen_plots:
                        page_id = "%s.%s.%s" % (sample, configuration, plot.id)
                        comp(
                            write_empty,
                            page_id,
                            output_dir,
                            "Group %r does not have raw experimental data." % (group),
                            job_id=page_id,
                        )
                    continue

                for plot in sample_fullscreen_plots:

                    job_id = "%s-%s-%s" % (sample, configuration, plot.id)

                    # FIXME: error if sample in 2 groups
                    job = comp(
                        wrap_sample_saccades_plot,
                        options.flydra_db,
                        sample,
                        configuration,
                        plot.command,
                        plot.args,
                        job_id=job_id,
                    )

                    page_id = "%s.%s.%s" % (sample, configuration, plot.id)
                    comp(write_report, job, output_dir, page_id, job_id=job_id + "-write_report")

    comp(
        create_gui,
        filename=os.path.join(output_dir, "sample_fullscreen_plots.html"),
        menus=[
            ("Sample", all_samples, all_samples),
            ("Detector", configurations, configurations),
            (
                "Plot/table",
                map(lambda x: x.id, sample_fullscreen_plots),
                map(lambda x: x.description, sample_fullscreen_plots),
            ),
        ],
        job_id="gui-sample_fullscreen_plots",
    )

    tabs = [
        (
            "group_plots",
            "By group",
            "This set displays one plot/table for each group of samples. "
            "You have the further choice of detection algorithm and plot/table to display.",
        ),
        (
            "saccade_plots",
            "By sample",
            "This set displays one plot/table for each individual sample. "
            "You have the further choice of which group to consider, which "
            "detection algorithm, and which plot/table to display.",
        ),
        (
            "expdata_plots",
            "By sample (raw)",
            "This set displays one plot/table for each individual sample, "
            " produced from the raw data (no saccade detection, so no choice of detector). "
            "You have the further choice of which group to consider, "
            "and which plot/table to display."
            " Note that some samples might be missing; for example, we don't use "
            " raw orientation data for the Mamarama samples.",
        ),
        (
            "sample_fullscreen_plots",
            "By sample, single page",
            "This set displays one entire page for each sample. "
            "You have the further choice of sample, "
            "detection algorithm, and which plot/table to display.",
        ),
    ]

    comp(create_main_gui, tabs, filename=os.path.join(output_dir, "main.html"), job_id="gui-main")

    if options.interactive:
        # start interactive session
        compmake_console()
    else:
        # batch mode
        # try to do everything
        batch_command("make all")
        # start the console if we are not done
        # (that is, make all failed for some reason)
        todo = list(parse_job_list("todo"))
        if todo:
            print ("Still %d jobs to do." % len(todo))
            sys.exit(-2)
Exemplo n.º 36
0
    Plot("levy_vs_exp", levy_exp, desc="Levy and exponential fits for saccade interval"),
    Plot("interval_histogram", interval_histogram, desc="Normalized bin histogram for interval"),
]

sample_saccades_plots = [
    Plot(
        "simulated_trajectories",
        plot_simulated_sample_trajectories,
        desc="Plots of simulated trajectories (using saccades)",
    )
]


var_group = [v for v in variables if v.percentiles]

for delays, type in prod([[0, 1], [0, 2]], ["pearson", "spearman", "kendall"]):

    name = "timecorr%d%d%s" % (delays[0], delays[1], type)
    desc = "Correlation analysis (%s, delay: %d)" % (type, delays[-1])
    args = {"variables": var_group, "delays": delays, "type": type}

    group_plots.append(Plot(name, group_var_time_correlation, args, desc))
    sample_saccades_plots.append(Plot(name, sample_var_time_correlation, args, desc))


for var in variables:
    group_plots.append(Plot("hist_%s" % var.id, group_var_hist, {"variable": var}, desc="Histograms of %s" % var.name))
    if var.percentiles:
        group_plots.append(
            Plot(
                "percentiles_%s" % var.id,
Exemplo n.º 37
0
##########

N = 64
L = .1
x = np.linspace(0, L, N)
y = np.linspace(0, L, N)
XX, YY = np.meshgrid(x, y)
Ex, Ey = np.zeros([N, N]), np.zeros([N, N])

# exponential spacing, to better sample larger beta
xx = np.linspace(0.001, 1, 50)
bet_arr = 1. - np.exp(-5 * xx)
t = 0
print('Computing field frames for Ex/Ey animation')
for k, bet in enumerate(bet_arr):
    for i, j in prod(range(N), range(N)):
        Ex[i, j], Ey[i, j] = electric_field(q, XX[i, j], YY[i, j], t, bet)

    # replace nan and zeroes for second smallest value
    # aesthetical purpose only
    Exmin = np.sort(list(set(Ex.flatten())))[1]
    Eymin = np.sort(list(set(Ey.flatten())))[1]
    Ex[np.isnan(Ex) + (Ex == 0)] = Exmin
    Ey[np.isnan(Ey) + (Ey == 0)] = Eymin

    # Ex image
    img = np.hstack([Ex[:, ::-1], Ex])
    img = np.vstack([img[::-1, :], img])

    plt.figure(figsize=(7, 5))
    plt.contourf(np.log10(img), origin='lower', levels=[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6], \
Exemplo n.º 38
0
def disambiguate(aName):
    ''' This function accepts a <name hash> and using the fast community detection algorithm creates groups of names
        that are likely to be the same author. It then either enters them in the database or writes their info to files
        for verification
    '''

    def return_handle(cur, aNamePrint, code):
        '''This function takes care of cleaning files before returning the disambiguate function
           code: 0 - remove all files
                 1 - remove .debug and .wpairs files
                 2 - remove .debug file
        '''
        #Remove .groups and .wpairs file
        try:
            ### Debug ###
            if code in [0,1,2]: os.remove('%s.debug'%aNamePrint)
            ### End Debug ###
            if code in [0,1]: os.remove('%s.wpairs'%aNamePrint) 
            if code == 0: os.remove(group_file)
        except OSError as e:
            print e.errno, e.filename, e.strerror 
            pass
        conn.commit()
        cur.close()
        conn.close()

    # aName contains space. E.g. 'adams j'. This is how names are stored in the databse
    # aNamePrint is the stripped version that contains only alpha-num chars. 'adamsj'
    aNamePrint = aName[:-2]+aName[-1]
    patt = re.compile('[\W_]+')
    aNamePrint = patt.sub('',aNamePrint)

    ### For Debugging ###
    open("%s.debug"%aNamePrint,'a').close()
    ### End Debug ###

    itemA = dict()
    conn=psycopg2.connect(database="PubData",port=5433)
    cur = conn.cursor()
    cur.execute("SELECT B.author_id, A.item_id FROM Item_author A INNER JOIN Author_Hash B ON A.author_id=B.author_id WHERE name_hash=(%s);",(aName,))
    #print 'No. of Author names:', cur.rowcount
    bucket_size = cur.rowcount
    for row in cur.fetchall(): itemA[row[0]]=row[1]
    
    # Fetching information about the authors and storing them in data structures
    iAuthor, iAddress, iItem, iIssue, iCoAuthor = dict(), dict(), dict(), dict(), dict()
    for author in itemA.keys():
        # Information related to the author
        data = list()
        cur.execute("SELECT init2,suffix,first_name FROM Author_Hash A INNER JOIN Author_Name B ON A.author_id=B.author_id WHERE A.author_id=(%s);",(author,))
        if cur.rowcount == 0 : data = [' ',' ',' ']
        else:
            temp = cur.fetchone()
            for i in temp:
                if i is None: data.append(' ')
                else: data.append(i)
        # Extract second initial from fullname
        cur.execute("SELECT fullname FROM Author WHERE author_id=(%s);",(author,))
        if cur.rowcount == 0 : "Author %s does not show up. This should not be happening."
        else: 
            fullname = cur.fetchone()[0]
            initials = fullname.split(',')[1].strip()
            if len(initials) > 1:
                data[0] = initials[1].lower()
        # Author keywords
        cur.execute("SELECT author_keyword FROM Author_Keyword WHERE author_id=(%s);",(author,))
        keywords = set()
        if cur.rowcount > 0 :
            for t in cur.fetchall() : keywords.add(t[0])
        ai = sm.AuthorInfo(data[0],data[1],data[2],keywords)
        iAuthor[author] = ai

        # Information related to the Address
        cur.execute("SELECT full_address,email FROM Author_Address A INNER JOIN Address B ON A.address_id = B.address_id WHERE A.author_id=(%s);",(author,))
        if cur.rowcount == 0 : data = [' ',' ']
        else:
            temp = cur.fetchone()
            data = list()
            for i in temp:
                if i is None: data.append(' ')
                else: data.append(i)
        ai = sm.AddressInfo(data[0],data[1])
        iAddress[author] = ai
        
        # Information related to Item
        data = range(5)
        cur.execute("SELECT title FROM Item WHERE item_id=(%s);",(itemA[author],))
        if cur.rowcount == 0: data[0] = ' '
        else : data[0] = cur.fetchone()[0]
        cur.execute("SELECT language FROM Item_Language WHERE item_id=(%s);",(itemA[author],))
        if cur.rowcount == 0: data[1] = set()
        else : 
            tset = set()
            for x in cur.fetchall() : tset.add(x[0])
            data[1] = tset
        cur.execute("SELECT item_keyword FROM Item_Keyword WHERE item_id=(%s);",(itemA[author],))
        if cur.rowcount == 0: data[2] = set()
        else :
            tset = set()
            for x in cur.fetchall():tset.add(x[0])
            data[2] = tset
        cur.execute("SELECT B.org_name FROM Research_org A INNER JOIN Organization B ON A.org_id = B.org_id WHERE A.item_id=(%s);",(itemA[author],))
        if cur.rowcount == 0: data[3] = set()
        else :
            tset = set()
            for x in cur.fetchall() : tset.add(x[0])
            data[3] = tset
        cur.execute("SELECT pref_name FROM Item_PrefName WHERE item_id=(%s);",(itemA[author],))
        if cur.rowcount == 0 : data[4] = set()
        else :
            tset = set()
            for x in cur.fetchall() : tset.add(x[0])
            data[4] = tset

        ii = sm.ItemInfo(data[0],data[1],data[2],data[3],data[4])
        iItem[author] = ii     

        # Information about Issue
        cur.execute('''SELECT D.full_title, D.subject, D.year FROM Item A INNER JOIN 
                       (SELECT B.issue_id, B.full_title, C.subject, B.year  FROM 
                       Issue B INNER JOIN Subject_Cat C ON B.issue_id=C.issue_id) 
                       AS D ON A.issue_id=D.issue_id WHERE item_id=(%s);''',(itemA[author],))
        data = range(3)
        if cur.rowcount == 0: data = [' ',set(),' '] 
        else : 
            tset = set()
            temp = cur.fetchone()
            data[0] = temp[0]
            tset.add(temp[1])
            data[2] = int(temp[2])
            for x in cur.fetchall(): tset.add(x[1])
            data[1] = tset
        iIssue[author] = sm.IssueInfo(data[0],data[1],data[2])

        # Information about CoAuthors
        tset = set()
        cur.execute('''SELECT name_hash FROM
                       Item_Author A INNER JOIN Author_Hash B
                       ON A.author_id=B.author_id
                       WHERE A.item_id=(%s)
                       AND NOT A.author_id=(%s);''',(itemA[author],author))    
        if cur.rowcount > 0:
            for x in cur.fetchall() : tset.add(x[0])
        tset.discard(aName)
        iCoAuthor[author] = sm.CoAuthorInfo(tset)

#    for key in itemA.keys(): 
#        print key, iIssue[key].title
#        print iIssue[key].subject
    

    # Compute edge scores
    if bucket_size > 1000 :
        x = (bucket_size - 1000.0)/1000.0
        a, b = 7.0/552, 79.0/552 
        cutoff = ceil(a*x*x + b*x + 5)
    elif bucket_size <= 20:
        cutoff = 3
    elif bucket_size <= 100:
        cutoff = 4
    else:
        cutoff = 5
    #print '%s cutoff = %d'%(aNamePrint,cutoff)
    edges_exist = False
    wpairs = dict()
    cite_set = set() # Stores pair of authors with citation between them
    coauthor_set = set() # Stores pair of authors that share coauthors with same last name first initial
    negative = list() # Stores negative edges
    # singles contain nodes not present in the graph
    singles = set(itemA.keys())
    for a in comb(itemA.keys(),2):
        au_score = sm.authorScore(iAuthor[a[0]],iAuthor[a[1]])
        ad_score = sm.addressScore(iAddress[a[0]],iAddress[a[1]])
        it_score = sm.itemScore(iItem[a[0]],iItem[a[1]])
        is_score = sm.issueScore(iIssue[a[0]],iIssue[a[1]])
        co_score = sm.coAuthorScore(iCoAuthor[a[0]],iCoAuthor[a[1]])
        if co_score > 0 : coauthor_set.add((a[0],a[1]))
        ci_score = sm.citeScore(itemA[a[0]],itemA[a[1]],cur)
        if ci_score > 0 : cite_set.add((a[0],a[1]))
        in_score = sm.interaction(iItem[a[0]],iItem[a[1]],iIssue[a[0]],iIssue[a[1]],iCoAuthor[a[0]],iCoAuthor[a[1]])
        score = au_score + ad_score + it_score + is_score + co_score + ci_score + in_score
        # Change : subtracting cutoff from edge weight
        wpairs[(a[0],a[1])] = score - cutoff
        if score < 0 : negative.append([a[0],a[1]])
        if score > cutoff : 
            singles.difference_update([a[0],a[1]])
            edges_exist = True

    # If no edge exists, skip rest of disambiguate function
    # and call individual()
    if not edges_exist:
        for author in singles:
            au = iAuthor[author]
            f, s = name(au)
            aff = {iIssue[author].year:iItem[author].pref_name.pop()}
            grp = set([author])
            individual(cur,grp,aff,f,s,aName)

        # Insert completed hashes in Hash_Done
        cur.execute("INSERT INTO Hash_Done (name_hash) VALUES (%s);",(aName,))            
        return_handle(cur,aNamePrint,2) 
        return
        # disambiguate() ends

    # Map node ids to sequential numbers. Helps increase computation speed immensely
    # Also write new edge file to be used by fast community algorithm
    i = 1
    f = open('%s.wpairs'%aNamePrint,'w')
    nodemap = dict()
    for e in wpairs.keys():
        if wpairs[e] > 0 :
            n1,n2,w = e[0],e[1],wpairs[e]
            if n1 in nodemap : n1 = nodemap[n1]
            else : 
                nodemap[n1] = i
                n1 = i
                i += 1
            if n2 in nodemap : n2 = nodemap[n2]
            else : 
                nodemap[n2] = i
                n2 = i
                i += 1
            f.write('%d\t%d\t%d\n'%(n1,n2,w))
    f.close() # Edge file written

    # Reverse mapping dictionary
    revmap = dict()
    for k in nodemap.keys() : revmap[nodemap[k]] = k

    # Running fast community algorithm
    command = "sudo /media/RaidDisk1/disamb/fast_comm/FastCommunity_wMH -f %s.wpairs -l 1"%aNamePrint
    try:
        p = subprocess.Popen(command,shell=True)
        p.wait() # Waiting for algorithm to finish
    except OSError as e:
        print e.strerror 
        print 'except', aNamePrint
        return_handle(cur, aNamePrint, 1)
        return

    # Reading group file written by fast_community algorithm
    group_file = '%s-fc_1.groups'%aNamePrint
    gs, groups = list(),list()
    g = set()
    gdict= dict()
    code = -1 #Code used to remove files at the end
    try:
        # Opening group file
        gfile = open(group_file,'r')
    except:
        # Groups file does not exist
        # Perform simple grouping based on connected components
        simple_cutoff = 10 - cutoff
        G = dict()
        for e in wpairs.keys():
            n1, n2, w = e[0], e[1], wpairs[e]
            if not n1 in G : G[n1] = set()
            if not n2 in G : G[n2] = set()
            if w > simple_cutoff:
                G[n1].add(n2)
                G[n2].add(n1)
        gs = components(G)
        code = 1        
    else:
        # Groups file exists
        for line in gfile.readlines():
            if line[:5] == 'GROUP' :
                if len(g) > 0 : gs.append(g)
                g = set()
            else :
                node = revmap[int(line.split()[0])] 
                g.add(node)
        gs.append(g)
        gfile.close()
        code = 0

    # Generic group handling begins
    counter = 0
    for g in gs:
        if len(g) == 1: singles.update(g)
        else: 
            new_g = set()
            for node in g:
                gdict[node] = counter
                new_g.add(node)
            groups.append(new_g)
            counter += 1 
            
    # Assigning group numbers to singletons
    for node in singles:
        if not node in gdict:
            counter += 1
            gdict[node] = counter

#---Removing negative edge weights from within a group
    for e in negative:
        if gdict[e[0]] == gdict[e[1]]:
            group_no = gdict[e[0]]
            rnode = removeNode(e,groups[group_no],wpairs)
            groups[group_no].discard(rnode)
            singles.add(rnode)
            counter += 1
            gdict[rnode] = counter

#---Post clustering group merge
    iGroup = list()
    # Create group info for pairwise comparison
    for i in xrange(len(groups)):
        f_name, s_init = list(), list() #First Name and Second initial
        address = set()
        for author in groups[i]:
            # Name
            au = iAuthor[author]
            f, s = name(au)
            if not f == '': f_name.append(f)
            if not s == '':s_init.append(s)
            # Address    
            ad = iAddress[author]
            if ad.fullAdd[-4:] == 'USA': 
                c,s,p = sm.usAddress(ad.fullAddress.lower())
                address.add((c,s))
        if len(f_name) == 0: first = ''
        else : first = Counter(f_name).most_common(1)[0][0]
        if len(s_init) == 0: init2 = ''
        else : init2 = Counter(s_init).most_common(1)[0][0]
        aff = affiliation(cur,groups[i],iItem,iIssue,iAddress)
        ginfo = sm.GroupInfo(i,(first,init2),address,aff)
        iGroup.append(ginfo)

    # Create graph to merge connected components
    G = dict() # Adjacency list
    for i in xrange(len(groups)): G[i] = set()
    for g in comb(iGroup,2):
        # Group Number
        i1, i2 = g[0].i, g[1].i
        # Name
        name_score, addr_score, cite_score, co_score, aff_score = 0,0,0,0,0
        f1, s1 = g[0].name
        f2, s2 = g[1].name
        if f1 == '' or f2 == '' : pass
        elif f1 == f2 : name_score += 5
        else : continue
        if s1 == '' or s2 == '' : pass
        elif s1 == s2 : name_score += 5
        else : continue
        if name_score < 10 : name_score = 0 
        # Address
        if len(g[0].address & g[1].address) > 0 : addr_score = 10
        # Citation and CoAuthor
        cite_found, coauthor_found = False, False 
        for x,y in prod(groups[i1], groups[i2]):
            if (x,y) in cite_set or (y,x) in cite_set:
                cite_found = True
                cite_score = 10
                if coauthor_found: break
            if (x,y) in coauthor_set or (y,x) in coauthor_set:
                coauthor_found = True
                co_score = 10
                if cite_found: break        
        # Affiliation
        aff1, aff2 = g[0].affiliation, g[1].affiliation
        for y in aff1.keys():
            if aff1[y] == aff2.get(y,'NIL'):
                aff_score = 10
                break
        score = name_score+addr_score+cite_score+co_score+aff_score
        #print 'Groups', i1, i2
        #print 'Name:', name_score, 'Address:', addr_score, 'Cite:', cite_score, \
        #      'CoAuthor:', co_score, 'Affiliation:', aff_score, 'Total:', score
        if score >= 20:
            G[i1].add(i2)
            G[i2].add(i1)

    # Forming new, possibly merged, groups 
    group_list = components(G)
    #print 'group_list:', group_list
    new_groups = list()
    for gl in group_list:
        g = set()
        for x in gl: g.update(groups[x])
        new_groups.append(g)

    '''        
#############################################################
# Printing group information for manual curation
    #Add the singles group to the end
    new_groups.append(singles)
    # Make directory for author hash and store groups inside
    os.mkdir(aNamePrint)
    i = 0
    for group in new_groups:
        file = ''
        file = open("%s/Group%s.txt"%(aNamePrint,i),'w')
        i += 1
        file.write("Author_id\tSec Initial\tSuffix\tFirst\tKeywords\tCo Authors\tAddress\tEmail\tItem Title\tLanguage\tItem Keywords\tOrganizations\tPref Names\tYear\tIssue Title\tIssue Subjects \n")
        for author in group:
            au = iAuthor[author]
            ad = iAddress[author]
            it = iItem[author]
            iu = iIssue[author]
            co = iCoAuthor[author]
            keywords = ', '.join(au.keywords)
            au_string = " %s\t%s\t%s\t%s"%(au.init2,au.suffix,au.first,keywords)
            ad_string = " %s\t%s"%(ad.fullAdd,ad.email)
            language = ', '.join(it.lang)
            keywords = ', '.join(it.keywords)
            org = ', '.join(it.org)
            pref_name = ', '.join(it.pref_name)
            it_string = " %s\t%s\t%s\t%s\t%s"%(it.title,language,keywords,org,pref_name)
            subject = ', '.join(iu.subject)
            iu_string = " %s\t%s\t%s"%(iu.year,iu.title,subject)
            co_string = ', '.join(co.authors)
            file.write("%d\t%s\t%s\t%s\t%s\t%s\n"%(author,au_string,co_string,ad_string,it_string,iu_string))
        file.close()

#############################################################

    '''
#---Populate Individual details using individual function
    for ng in new_groups:
        f_name, s_init = list(), list() #First Name and Second initial
        for author in ng:
            # Name
            au = iAuthor[author]
            f, s = name(au)
            if not f == '': f_name.append(f)
            if not s == '': s_init.append(s)
        if len(f_name) == 0: first = ''
        else : first = Counter(f_name).most_common(1)[0][0]
        if len(s_init) == 0: init2 = ''
        else : init2 = Counter(s_init).most_common(1)[0][0]
        aff = affiliation(cur,ng,iItem,iIssue,iAddress)
        #print ' '.join(['%d:%s'%(y,aff[y])for y in sorted(aff.keys())]) 

        individual(cur,ng,aff,first,init2,aName)

    for author in singles:
        au = iAuthor[author]
        f, s = name(au)
        aff = {iIssue[author].year:iItem[author].pref_name.pop()}
        grp = set([author])
        individual(cur,grp,aff,f,s,aName)

    # Insert completed hashes in Hash_Done
    cur.execute("INSERT INTO Hash_Done (name_hash) VALUES (%s);",(aName,))                
    return_handle(cur,aNamePrint,code)
Exemplo n.º 39
0
from itertools import product as prod
from sys import argv

with open("string_list.txt", "r") as file:
    test_cases = [x.strip() for x in file.read().split("\n") if x]

for case in test_cases:
    case = case.split(",")
    if len(case) != 2:
        continue
    N, S = case
    options = prod(S, repeat = int(N))
    options = set(options)
    options = sorted(["".join(x) for x in options])
    print(",".join(options))
Exemplo n.º 40
0
def generate_ijk_bounds(dims):
    ibounds = [(l, u) for l in range(dims[0]) for u in range(l, dims[0])]
    jbounds = [(l, u) for l in range(dims[1]) for u in range(l, dims[1])]
    kbounds = [(l, u) for l in range(dims[2]) for u in range(l, dims[2])]
    return prod(ibounds, jbounds, kbounds)
def count(L):
	applicants = prod(*L)
	return len(filter(lambda L: validate(L), [i for i in applicants])) % 10000
Exemplo n.º 42
0
try:
    import psyco

    psyco.full()
except:
    pass

if len(sys.argv) != 2:
    print "speedtouchkey.py &lt;SSID>"
    sys.exit(1)

chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
ssid = sys.argv[1].lower().strip()

try:
    int(ssid, 16)
except ValueError:
    print "%s is not a valid SSID." % ssid
    sys.exit(1)

for year in range(8, 11):
    print "Searching year %02d..." % year
    for week in range(1, 53):
        for xxx in prod(chars, chars, chars):
            xx = "".join(xxx)
            serial = "CP%02d%02d%s" % (year, week, hexl(xx).upper())
            sha = hashlib.sha1(serial).hexdigest()
            if sha.endswith(ssid):
                print "  Likely key: %s (serial %s)." % (sha[:10], "CP%02d%02d??%s" % (year, week, xx))
def variation_acc2_local_disturb(sess,
                                 network,
                                 x_,
                                 keep_prob,
                                 saver,
                                 noise_name,
                                 gamma,
                                 alpha,
                                 controls_nb,
                                 test_input,
                                 test_target,
                                 n_ts,
                                 evo_time,
                                 eps,
                                 accept_err):

    # restoring saved model
    saver.restore(sess, "weights/dim_{}/{}/gam_{}_alfa_{}.ckpt".format(model_dim, noise_name, gamma, alpha))

    # initializoing resulting tensor, first two dimensions corresponds to coordinate which will be disturbed, on the last dimension, there will be added variation of outputs
    results = np.zeros((n_ts, controls_nb, len(np.array(test_input))))

    print(len(test_input))
    print(np.shape(results))
    iter = -1

    for sample_nb in range(len(np.array(test_input))):

        # taking sample NCP
        origin_NCP = test_input[sample_nb]
        # taking target superoperator corresponding to the NCP
        origin_superoperator = test_target[sample_nb]
        tf_result = False


        # calculating nnDCP corresponding to input NCP
        pred_DCP = get_prediction(sess, network, x_, keep_prob, np.reshape(origin_NCP, [1, n_ts, controls_nb]))
        # calculating superoperator from nnDCP
        sup_from_pred_DCP = integrate_lind(pred_DCP[0], (alpha, gamma), n_ts, evo_time, noise_name, tf_result)

        print("sanity check")
        acceptable_error = fidelity_err([origin_superoperator, sup_from_pred_DCP], dim, tf_result)
        print("predicted DCP", acceptable_error)
        print("---------------------------------")

        ############################################################################################################
        #if sanity test is above assumed error then the experiment is performed
        if acceptable_error <= accept_err:
            iter += 1
            # iteration over all coordinates
            for (t, c) in prod(range(n_ts), range(controls_nb)):
                new_NCP = origin_NCP
                if new_NCP[t, c] < (1 - eps):
                    new_NCP[t, c] += eps
                else:
                    new_NCP[t, c] -= eps

                sup_from_new_NCP = integrate_lind(new_NCP, (alpha, 0.), n_ts, evo_time, noise_name, tf_result)
                new_DCP = get_prediction(sess, network, x_, keep_prob,
                                         np.reshape(new_NCP, [1, n_ts, controls_nb]))
                sup_form_new_DCP = integrate_lind(new_DCP[0], (alpha, gamma), n_ts, evo_time, noise_name, tf_result)
                error = fidelity_err([sup_from_new_NCP, sup_form_new_DCP], dim, tf_result)

                #print(error)
                # if predicted nnDCP gives wrong superopertaor, then we add not variation of output, but some label
                if error <= accept_err:
                    results[t, c, iter] = np.linalg.norm(pred_DCP - new_DCP)
                else:
                    results[t, c, iter] = -1

        print(iter)

    print(np.shape(results))
    return results
Exemplo n.º 44
0
# x * y 区画におけるデスクとラックの配置パターン
def dl_pattern(x, y):
    xy = x * y
    if xy < d + l:
        return 0
    return C(xy, d) * C(xy - d, l)

n = x * y
m = 10**9 + 7
# x * y == d + l ならば(デスクの置き方) == (ラックの置き方)
if n == d + l:
    desk_pattern = C(n, d) % m

# 上下左右の辺いずれかが空白であるときについて包除原理より考える
# 2行以上または2列以上空白であることは無いため(その場合は x, y の条件に不適当)
# 考えるパターンは上下左右の辺のいずれかが全て空白であるパターンを考えれば十分
else:
    desk_pattern = 0
    # us: 上辺, ds: 下辺, ls: 左辺, rs: 右辺
    for us, ds, ls, rs in prod([0, 1], repeat=4):
        pattern = dl_pattern(x - ls - rs, y - us - ds)
        if (us + ds + ls + rs) % 2 == 0:
            desk_pattern += pattern
        else:
            desk_pattern -= pattern

ans = pos_pattern * desk_pattern
ans = ans % m
print(ans)
Exemplo n.º 45
0
def generate_ijk_bounds(dims):
    ibounds = [(l, u) for l in range(dims[0]) for u in range(l, dims[0])]
    jbounds = [(l, u) for l in range(dims[1]) for u in range(l, dims[1])]
    kbounds = [(l, u) for l in range(dims[2]) for u in range(l, dims[2])]
    return prod(ibounds, jbounds, kbounds)
def main():
    parser = OptionParser(usage=description)

    parser.add_option("--db", default='flydra_db', help="Data directory")

    parser.add_option("--interactive", default=False, action="store_true",
                      help="Start a compmake interactive session."
                      " Otherwise run in batch mode") 

    parser.add_option("--empty_group_ok",
                      default=False, action="store_true",
                      help="do not give up if one group does not have samples ")


    (options, args) = parser.parse_args() #@UnusedVariable
    
    if options.db is None:
        logger.error('Please specify a directory using --db.')
        sys.exit(-1)

    outdir = os.path.join(options.db, 'out/saccade_view_joint_analysis')

    db = FlydraDB(options.db, False)
        
    set_namespace('saccade_view_joint_analysis')
    
    # for each image we do a different report
    data = {}
    for image in images:
        
        # For each image we have different tables
        tables = ["saccades_view_%s_%s" % (view.id, image.id) for view in views]
 
        all_available = [x for x in db.list_samples() if db.has_saccades(x) and 
                          all([db.has_table(x, table) for table in tables])] 
        
        # We further divide these in post and nopost
        groups_samples = {
            'posts':
                filter(lambda s: db.get_attr(s, 'stimulus') != 'nopost', all_available),
            'noposts':
                filter(lambda s: db.get_attr(s, 'stimulus') == 'nopost', all_available)
        }
        
        # now, for each group
        for group in groups:
             
            is_hallucination = image.id.startswith('h')
            white_arena = image.id.endswith('_w') 
        
            if (not is_hallucination) and white_arena and (group.id == 'noposts'):
                # if there are not posts, it's useless 
                continue
        
            samples = groups_samples[group.id] 
            if not samples:
                print "Warning: no samples for %s/%s" % (image.id, group.id)
                continue 
      
            # global statistics
            key = (group.id, image.id)
            job_id = "%s-%s" % key
            data[key] = comp(compute_stats, options.db,
                             samples, image.id, job_id=job_id)

            for saccades_set, direction in prod(saccades_sets, dirs):             
                view2result = {}
                for i, view in enumerate(views):                
                    table = tables[i]
                    key = Exp(image=image.id, group=group.id,
                              view=view.id, dir=direction.id,
                              saccades_set=saccades_set.id)
                    job_id = "%s-%s-%s-%s-%s" % key
                    
                    result = comp(compute_saccade_stats, options.db,
                             samples, table,
                             [direction.args, saccades_set.args],
                             job_id=job_id)
                    
                    data[key] = result
                    view2result[view.id] = result
          
                page_id = make_page_id(image=image.id, group=group.id,
                           dir=direction.id, saccades_set=saccades_set.id)
                
                comp(render_page, view2result, outdir, page_id, job_id=page_id)
            
            for saccades_set in saccades_sets:
                table = "saccades_view_start_%s" % (image.id)
                exp_id = '%s_%s_%s' % (image.id, group.id, saccades_set.id)
                
                results = comp(bet_on_flies, options.db, samples, table, saccades_set,
                               job_id='lasvegas-' + exp_id + '-bet')
                page_id = exp_id
                comp(las_vegas_report, os.path.join(outdir, 'lasvegas'), page_id, results,
                              job_id='lasvegas-' + exp_id + '-report')
            
            
    db.close()
    

    comp(add_comparisons, data, outdir)
    

    filename = os.path.join(outdir, 'gui.html')   
    comp(create_gui_new, filename, menus)
    
    
    if options.interactive:
        # start interactive session
        compmake_console()
    else:
        # batch mode
        # try to do everything
        batch_command('make all')
        # exit with error if we are not done
        # (that is, make all failed for some reason)
        todo = list(parse_job_list('todo')) 
        if todo:
            logger.info('Still %d jobs to do.' % len(todo))
            sys.exit(-2)