Example #1
0
    def nextIteration(self, Lp):
        size = len(Lp[0]) + 1
        # generated all possible combinations from L(n-1)
        C = [list(val) for val in itertools.combinations(Lp, 2)]

        C = map(lambda a:reduce(lambda x,y:x.union(y), a, set()), C)

        # size check
        C = [val for val in C if len(val) is size]

        # remove duplicate sets
        tmp = list()
        [tmp.append(val) for val in C if val not in tmp]
        C = tmp

        # check no duplicate columns
        C = [s for s in C if len(set([self.cols[val] for val in s])) is size]

        # Prune Step We find all the n-1 length combinations of the items in
        # candidate set. If any of the n-1 length combinations of an item is not a
        # part of L(n-1) then we eliminate that item from the candidate set
        for val in C:
            [C.remove(val) for temp in itertools.combinations(val,len(val)-1) if set(temp) not in Lp]

        # Finding all the large frequency items
        L = [val for val in C if self.getSupport(val) >= self.minSupport]
        print "Generated " + `len(L)` + " " + `len(L[0])` + "-sized frequent itemsets"
        return L
Example #2
0
def Process():
    
    for thisXConfiguration in product(range(MAX_X_CARDINALITY), repeat = BASE_CYCLE_SIZE):
        
        baseGraph = make_cycle(7)
        baseGraph, x_sets = ConstructGraph(thisXConfiguration, baseGraph)
        thisGraph = deepcopy(baseGraph)
        print(thisGraph.edges())
        #Try all possible combos of X and X_i+2 edges
        for xSetIndex in range(0,BASE_CYCLE_SIZE):
            s1 = x_sets[xSetIndex]
            s2 = x_sets[(xSetIndex + 2) % BASE_CYCLE_SIZE]
        
            for i in range(0,4):
                for thisEdgeCombo in combinations(product(s1,s2),i):
                    for thisEdge in thisEdgeCombo:
                        thisGraph.add_edge(thisEdge[0], thisEdge[1])      
                         
                    WriteToLogFile(FinalProcessGraph(thisGraph))
                
                    #for each of these combos, try it with all combos of X and X_i+5
                    thisGraphMoreJoins = deepcopy(thisGraph)
                    s1 = x_sets[xSetIndex]
                    s2 = x_sets[(xSetIndex + 5) % BASE_CYCLE_SIZE]
                    for i in range(0,4):
                        for thisEdgeCombo in combinations(product(s1,s2),i):
                            for thisEdge in thisEdgeCombo:
                                thisGraphMoreJoins.add_edge(thisEdge[0], thisEdge[1])      
                             
                            WriteToLogFile(FinalProcessGraph(thisGraphMoreJoins))
                    
                thisGraph = deepcopy(baseGraph)
                
        baseGraph.clear()
    return
 def done(self, one):
     customers, products = one.split(';')
     customers = customers.split(',')
     products = map(get_letters, products.split(','))
     for customer in customers:
         row = []
         letters = get_letters(customer)
         vowels = get_vowels(customer)
         for product in products:
             row.append(get_ss(product, letters, vowels))
         self.matrix.append(row)
     row_num = len(self.matrix)
     col_num = len(row)
     max_ss = 0
     less = row_num
     if row_num > col_num:
         less = col_num 
         for one in itertools.combinations(range(row_num), less):
             for two in itertools.permutations(range(col_num)):
                 total = self.get_total(one, two, less)
                 max_ss = total if max_ss < total else max_ss
     else:
         for one in itertools.combinations(range(col_num), less):
             for two in itertools.permutations(range(row_num)):
                 total = self.get_total(two, one, less)
                 max_ss = total if max_ss < total else max_ss
     return max_ss
def retornaSignificadoCadena(cadenaSimbolos,xmlEntradaObject,distractores,solucion,cantidadAlternativas,**kwuargs):
    listaCombinatoria=list()
    listaConjuntoAlternativas=list()
    listaConjuntoDistractores=None
    #Caso en que tenga varios comandos
    if '+' in cadenaSimbolos:
        for simbolos in quitaDuplicados(cadenaSimbolos.split('+')):
            for conjunto in retornaSignificadoSimbolo(simbolos, xmlEntradaObject, distractores, solucion):
                if conjunto not in listaCombinatoria:
                    listaCombinatoria.append(conjunto)
        listaConjuntoDistractores=list(itertools.combinations(listaCombinatoria, cantidadAlternativas))
    else:
        listaConjuntoDistractores=list(itertools.combinations(retornaSignificadoSimbolo(cadenaSimbolos, xmlEntradaObject, distractores, solucion), cantidadAlternativas))
    if len(listaConjuntoDistractores)>0:
        for conjunto in listaConjuntoDistractores:
            conjunto=list(conjunto)
            conjunto.append(comprimeAlternativasSingle(solucion))
            if 'orderBy' in kwuargs.keys():
                if kwuargs['orderBy'].lower()=='largocreciente':
                    conjunto.sort(key=lambda x:len(x.glosa))
                elif kwuargs['orderBy'].lower()=='largodecreciente':
                    conjunto.sort(key=lambda x:len(x.glosa),reverse=True)
                elif kwuargs['orderBy'].lower()=='alfabeticocreciente':
                    conjunto.sort(key=lambda x:x.glosa.lower)
                elif kwuargs['orderBy'].lower()=='alfabeticodecreciente':
                    conjunto.sort(key=lambda x:x.glosa.lower,reverse=True)
                else:
                    #No se ordena quedando la alternativa solucion siempre al final
                    pass
                #Luego se agrega el conjunto al conjunto de alternativas Validas
                listaConjuntoAlternativas.append(conjunto)
            #Si no se presenta el comando, no se ordena quedando la alternativa solucion siempre al final
            else:
                listaConjuntoAlternativas.append(conjunto)
    return listaConjuntoAlternativas
def binomial_coefficient_calibration_set( ideals, measured, n,  *args, **kwargs):
	'''
	Produces a ensemble of calibration instances based on choosing
	sub-sets of the ideal/measurement lists from an overdetermined 
	calibration. This concept is described in 'De-embeding and 
	Un-terminating' by Penfield and Baurer. 
	
	so, if the calibration ideals and measured lists have length 'm' 
	then the resultant ensemble of calibrations is 'm choose n' long.
	
		
	takes:
		ideals: list of ideal Networks
		measured: list of measured Networks
		n: length of ideal/measured lists to pass to calibrations
			(must be < len(ideals) )
		*args,**kwargs: passed to Calibration initializer
	
	returns:
		cal_ensemble: a list of Calibration instances.
		
		

	'''
	if n >= len(ideals):
		raise ValueError('n must be larger than # of standards')
	
	ideal_subsets = \
		[ ideal_subset for ideal_subset in combinations(ideals,n)]
	measured_subsets = \
		[ measured_subset for measured_subset in combinations(measured,n)]

	return 	[Calibration(ideals = list(k[0]), measured=list(k[1]),\
		*args, **kwargs) for k in zip(ideal_subsets, measured_subsets)]
Example #6
0
def apriori_gen(L_kmin1, k):
    ''' Candidate generation for the apriori algorithm. First, L[k-1] is
    joined with itself to obtain a superset of the final set of candidates.
    The union of itemsets p and q from L[k-1] is inserted in the final set
    if they share their k-2 first items.

    Then, all itemsets c from C_k are pruned so that some (k-1)-subset of
    c is not present in L[k-1].

    L_kmin1: The set of all large k-1 itemsets.
    k:       Integer indicating sweep phase.

    Returns the candidate set for the current k.
    '''
    C_k = dict()

    list_of_sets = [set(key) for key in L_kmin1.iterkeys()]

    for p, q in combinations(L_kmin1, 2):
        if p[:k-1] == q[:k-1]:
            candidate = p[:k-1] + tuple(sorted((p[k-1], q[k-1])))
            # TODO perhaps implement this more efficiently? Useful for large
            # candidate sets, as combinations can be expensive for >k
            if not any( set(subset_c) not in list_of_sets for subset_c in
                        combinations(candidate, k)):
                C_k[candidate] = 0
    return C_k
Example #7
0
    def AllBipartitionsAllSizes(data):
        """Given a dataset, it finds all bipartitions of all sizes.
        """
        # 1) FIND ALL THE COMBINATIONS UP TO LENGTH < len(data)/2
        bipartitions = []
        for n in range(1, Nmax):
            # 1.1) Find all combinations of given size out of dataset
            combinations = [c for c in itertools.combinations(data, n)]
            # 1.2) Sort and find complementary sets
            for comb in combinations:
                complementary = tuple(setdata - set(comb))
                bipartitions.append((comb, complementary))

        # 2) FIND AND SORT THE BIPARTITIONS OF SIZE Nmax
        # 2.1) Find all combinations of size Nmax
        combinations = [c for c in itertools.combinations(data, Nmax)]
        # 2.2) Sort and find complementary sets
        ncombs = len(combinations)
        # Ignore repeated combinations if both subsets are of size = Nmax
        if iseven: ncombs = ncombs // 2

        for i in range(ncombs):
            comb = combinations[i]
            combset = set(comb)
            complementary = setdata - combset
            bipartitions.append((comb, tuple(complementary)))

        return bipartitions
Example #8
0
def get_squares(grid, player_pt=None):
    # Return every found square in a grid
    # might be faster to pre-compute, but...ehhh.
    moves = grid.get_moves()
    # split into different players
    players = dict()
    for pt, player in moves:
        players[player] = players.get(player, []) + [pt]
    # for each, get every combination of 4
    squares = dict()
    # only do one player if necessary 
    if player_pt:
        player, pt = player_pt
        plays = players.get(player, [])
        possibilities = [[pt]+list(l) for l in list(combinations(plays, 3))]
        # calculate squares
        for square in possibilities:
            if check_square(square):
                squares[player] = squares.get(player, []) + [list(square)]
    else:
        # otherwise do all players
        for player, plays in players.items():
            possibilities = list(combinations(plays, 4))
            # calculate squares
            for square in possibilities:
                if check_square(square):
                    squares[player] = squares.get(player, []) + [list(square)]
    return squares
Example #9
0
    def find_all_hands(self):
        ## Make sure the table is dealt or errors will show up.
        #try:
        #    self.match_settings['table']
        #except:
        #    return [7, self.bots['me']['pocket']]

        hands = []

        ## Stderr full self variables
        stderr.write('full self: ' + str(vars(self)) + '\n')

        #hand = self.parse_cards(self.bots['me']['hand'])
        hand = self.bots['me']['pocket']
        #table = self.match_settings['table']
        table = Table(self.parse_cards(self.match_settings['table']))

        #stderr.write('hand: ' + str(hand) + '\n')
        #stderr.write('hand2:' + str())
        #stderr.write('table: ' + str(table) + '\n')

        for h in itertools.combinations(hand, 2):
            for t in itertools.combinations(table, 3):
                hands += [h + t]
        ranked_hands = [Ranker.rank_five_cards(hand) for hand in hands]
        #stderr.write(str(max(ranked_hands)) + '\n')
        return max(ranked_hands)
Example #10
0
def hadamard(n):
	#hadamardove matrike obstajajo le za sode n
	assert n % 2 == 0;
	if(n % 2 != 0):
		return prop.Fls();
		
	l = [];
	
	#predpostavka - prva vrstica so vsi True
	l.append(prop.And(["v0s%d" % i for i in range(n)]));
	
	#generiramo mozne kombinacije, tako da je n/2 clenov True in n/2 False
	a = list(itertools.combinations(range(n), n/2)); #print a;
	#print a;
	#gneriraj vse mozne vrstice | v moz[i] so spravljenje vse mogoce kombinacije spremenljivk za vrstico i
	moz = [[]];
	for i in range(1,n):
		moz.append([]);
		for j in range(len(a)):
			moz[i].append(prop.And([prop.Not("v"+str(i)+"s%d" % k) if (k not in a[j]) else "v"+str(i)+"s%d" % k for k in range(n)]));
	#print moz;
	#generiraj vse mozne matrike, katerih vrstice bi ustrezale Hadamardovi matriki
	#b = list(itertools.combinations(range(1,n*len(a)), n-1)); #iz vseh moznih vrstic moramo izbrati n-1, ker prvo ze imamo
	#print b;
	moz2 = [];
	
	b = list(itertools.combinations(range(len(moz[1])), n-1));
	for j in range(1):#range(len(b)):
		moz2.append(prop.And([l[0], prop.And([moz[i][b[j][i-1]] for i in range(1,n)])]));
	print moz2;
	return prop.Or(moz2);
	"""
def evaluation(clusters, cluster_name):
    # Get pairs for gold standard
    gold_std = {}
    for name in clusters:
        key = clusters[name].pop()
        gold_std.setdefault(key, [])
        gold_std[key].append(int(name))
    gold_pair = []
    for key in gold_std:
        for i in combinations(gold_std[key], 2):
            gold_pair.append(sorted(i))

    # Get pairs for gotten result
    result_pair = []
    for i in cluster_name:
        for j in combinations(i, 2):
            result_pair.append(sorted(j))

    # Calculate matched amount
    match = 0
    for i in gold_pair:
        for j in result_pair:
            if i == j:
                match += 1

    precision = float(match)/float(len(result_pair))
    recall = float(match)/float(len(gold_pair))

    return [precision, recall]
Example #12
0
 def generateRules(self, tup, tmp=0, size=0, part=0):
     rules = []
     if tmp == 0:  # for template 1
         for i in range(1, len(tup)):
             combines = itertools.combinations(list(tup), i)
             for e in combines:
                 rules.append((e, tuple(set(tup) - set(e))))
     else:
         if part == 0:
             if len(tup) < size:
                 pass
             else:
                 for i in range(1, len(tup)):
                     combines = itertools.combinations(list(tup), i)
                     for e in combines:
                         rules.append((e, tuple(set(tup) - set(e))))
         elif part == 1:
             for i in range(size, len(tup)):
                 combines = itertools.combinations(list(tup), i)
                 for e in combines:
                     rules.append((e, tuple(set(tup) - set(e))))
         else:
             for i in range(size, len(tup)):
                 combines = itertools.combinations(list(tup), i)
                 for e in combines:
                     rules.append((tuple(set(tup) - set(e)), e))
     return rules
def createboards(card1,card2,card3=None,card4=None,flop1=(None,None),flop2=(None,None),flop3=(None,None)):
    '''Returns all possible 5 card boards for 2-4 hole cards. You can also
       specify a flop, and instead create all possible turn/river cards.'''
    deck = createdeck()
    # Remove the four specified cards from the deck.
    deck.remove(card1)
    deck.remove(card2)
    if card3:
        deck.remove(card3)
        deck.remove(card4)
    # Check to see if a flop was specified. If so, remove those 3 from the deck.
    if flop1 != (None,None):
        deck.remove(flop1)
        deck.remove(flop2)
        deck.remove(flop3)
        c = list(combinations(deck,2))
        return c
        boards = {}
        for j in range(len(c)):
            boards[j] = c[j]
        return boards
    # Using itertools.combinations we create all 5 card combinations from
    # the remaining cards in the deck. Then place each of those
    # combinations into a dictionary for quick lookup.        
    c = list(combinations(deck,3))
    boards = {}
    for j in range(len(c)):
        boards[j] =c[j]
    return boards
def calculate_switch_stats(mappable, linkage_map_file, linkage_map_format, MST_grouping_threshold):
    genotypes_of_locus = mappable
    if linkage_map_format.lower() == 'mst':
        ini_map, loci_on_lg = parse_map_file_MST(linkage_map_file)
    elif linkage_map_format.lower() == 'rqtl':   
        ini_map, loci_on_lg = parse_map_file_rqtl(linkage_map_file)
    else:
        raise ValueError("unknown linkage_map_format")
    
    int_arr = convert_genotypes_to_int_array(genotypes_of_locus, ini_map)
    num_loci = int_arr.shape[0]
    num_pairs =  int((num_loci * (num_loci-1))/2)
    pairs = itertools.combinations(int_arr, 2)
    R = numpy.fromiter(getR(pairs), dtype = numpy.float64, count = num_pairs)
    pairs = itertools.combinations(int_arr, 2)
    NR = numpy.fromiter(getNR(pairs), dtype = numpy.float64, count = num_pairs)
    ml_R_frac = get_ml_R_frac(R = R, NR = NR)
    Z = get_LOD(R = R, NR = NR, R_frac = ml_R_frac)
    NR_matrix = get_NR_matrix(NR)
    #rf = get_rf_matrix(ml_R_frac)
    lod = get_lod_matrix(Z)
    index_of_lg = get_index_of_LG(loci_on_lg)
    lgs_longer_than_1 = find_LGs_with_multiple_loci(index_of_lg, loci_on_lg)
    #mean_rf = get_LG_pairwise_mean_rf(lgs_longer_than_1, rf, index_of_lg)
    #mean_lod = get_LG_pairwise_mean_lod(lgs_longer_than_1,lod, index_of_lg)
    sum_lod = get_LG_pairwise_sum_lod(lgs_longer_than_1,lod, index_of_lg)
    sq_sum_lod = get_square_form(sum_lod, lgs_longer_than_1)
    n = len(mappable.items()[0][1]) #number of individuals
    NR_threshold = get_threshold_recombinants_for_same_LGs(n, MST_grouping_threshold)
    NR_under_threshold = get_LG_pairwise_count_NR_threshold(lgs_longer_than_1, NR_matrix, index_of_lg, threshold = NR_threshold)
    sq_NR_matrix = get_square_form(NR_under_threshold, lgs_longer_than_1)
    return(ini_map, sq_sum_lod, sq_NR_matrix, R, NR, lgs_longer_than_1)
def n_table(n):
    i = tuple(range(1, n + 1))
    table = {i: 0}
    ring = set([i])
    new_ring = set()
    perms = set([x for x in permutations(range(1, n + 1))])
    n_fac = len(perms)
    while len(table) < n_fac:
        for p in ring:
            dep = table[p]
            for i, j in combinations(range(n), 2):
                pr = p_rev(p, i, j)
                if not pr in table:
                    new_ring.add(pr)
                    table[pr] = dep + 1
        ring, new_ring = new_ring, set()
    perms = perms - set(table.keys())
    new_perms = perms.copy()
    # Search from permutation to table entry - unused
    while len(table) < n_fac:
        for p in perms:
            if p in table:
                continue
            best_dist = float("inf")
            for i, j in combinations(range(n), 2):
                pr = p_rev(p, i, j)
                if pr in table and table[pr] + 1 < best_dist:
                    best_dist = table[pr] + 1
            if best_dist < float("inf"):
                table[p] = best_dist
                new_perms.remove(p)
        perms = new_perms
    return table
    def numSimilarGroups(self, A):
        def isSimilar(a, b):
            diff = 0
            for x, y in itertools.izip(a, b):
                if x != y:
                    diff += 1
                    if diff > 2:
                        return False
            return diff == 2

        N, L = len(A), len(A[0])
        union_find = UnionFind(N)
        if N < L*L:
            for (i1, word1), (i2, word2) in \
                    itertools.combinations(enumerate(A), 2):
                if isSimilar(word1, word2):
                    union_find.union_set(i1, i2)
        else:
            buckets = collections.defaultdict(list)
            lookup = set()
            for i in xrange(len(A)):
                word = list(A[i])
                if A[i] not in lookup:
                    buckets[A[i]].append(i)
                    lookup.add(A[i])
                for j1, j2 in itertools.combinations(xrange(L), 2):
                    word[j1], word[j2] = word[j2], word[j1]
                    buckets["".join(word)].append(i)
                    word[j1], word[j2] = word[j2], word[j1]
            for word in A:  # Time:  O(n * l^4)
                for i1, i2 in itertools.combinations(buckets[word], 2):
                    union_find.union_set(i1, i2)
        return union_find.size()
Example #17
0
def create_next_candidates(prev_candidates, length):
    """
    Returns the apriori candidates as a list.

    Arguments:
        prev_candidates -- Previous candidates as a list.
        length -- The lengths of the next candidates.
    """
    # Solve the items.
    item_set = set()
    for candidate in prev_candidates:
        for item in candidate:
            item_set.add(item)
    items = sorted(item_set)

    # Create the temporary candidates. These will be filtered below.
    tmp_next_candidates = (frozenset(x) for x in combinations(items, length))

    # Return all the candidates if the length of the next candidates is 2
    # because their subsets are the same as items.
    if length < 3:
        return list(tmp_next_candidates)

    # Filter candidates that all of their subsets are
    # in the previous candidates.
    next_candidates = [
        candidate for candidate in tmp_next_candidates
        if all(
            True if frozenset(x) in prev_candidates else False
            for x in combinations(candidate, length - 1))
    ]
    return next_candidates
Example #18
0
    def add_hard_constraints(self, items, get_similarity):
        """Add hard constraints

        If sim(I, J) = 0, then I|J.
        If sim(I, J) = 1, then I~J.
        """

        if self.solver == 'gurobi':

            for I, J in itertools.combinations(items, 2):

                s = get_similarity(I, J)
                if s in [0, 1]:

                    constr = self.x[I, J] == s
                    self.problem.addConstr(constr)

            self.problem.update()

        if self.solver == 'pulp':

            for I, J in itertools.combinations(items, 2):

                s = get_similarity(I, J)
                if s in [0, 1]:

                    name = "Hard (%s / %s)" % (I, J)
                    constr = self.x[I, J] == s
                    self.problem += constr, name

        return self
Example #19
0
    def extract_buoys_from_features(self, features):

        if len(features) < 3:
            return None, None

        for tripplet in combinations(features, 3):

            # Sort features by x value
            feature1, feature2, feature3 = sorted(tripplet, key=lambda x: x[0])

            # Test that center feature is in middle of outside features
            center = (feature1[0] + feature3[0]) / 2
            if abs(center - feature2[0]) > CENTER_ERROR_PERCENT * (feature3[0] - feature1[0]):
                continue

            # Go through every pair
            tripplet_is_valid = True
            for a, b in combinations((feature1, feature2, feature3), 2):

                # Ignore tripplets with pairs with large y separations
                if abs(a[1] - b[1]) > MAX_Y_SEPARATION:
                    tripplet_is_valid = False
                    break
            if not tripplet_is_valid:
                continue

            return feature2, feature3[0] - feature1[0]

        return None, None
Example #20
0
 def hill_climb(self, chairs_per_table):
     """
     Swaps seats until no move will increase the score.
     Always take the best move available
     """
     names = pad_names(self.friend_counter.keys(), chairs_per_table)
     tables = partition_randomly(names, chairs_per_table)
     num_tables = len(tables)
     pinnacle_score = 0
     while True:
         best_indices, best_score = [], 0
         for t1, t2 in itertools.combinations(range(num_tables), 2):
             for s1, s2 in itertools.combinations(range(chairs_per_table), 2):
                 indices = [t1, s1, t2, s2]
                 swap_seats(tables, indices)
                 score = self.total_value(tables)
                 if score > best_score:
                     best_score = score
                     best_indices = indices
                 swap_seats(tables, indices)
         if best_score > pinnacle_score:
             pinnacle_score = best_score
             swap_seats(tables, best_indices)
         else:
             return tables, pinnacle_score
Example #21
0
    def add_transitivity_constraints(self, items):
        """Add transitivity contraints

        For any triplet (I,J,K), I~J and J~K implies I~K
        """

        if self.solver == 'gurobi':

            for I, J, K in itertools.combinations(items, 3):

                constr = self.x[J, K]+self.x[I, K]-self.x[I, J] <= 1
                self.problem.addConstr(constr)

                constr = self.x[I, J]+self.x[I, K]-self.x[J, K] <= 1
                self.problem.addConstr(constr)

                constr = self.x[I, J]+self.x[J, K]-self.x[I, K] <= 1
                self.problem.addConstr(constr)

            self.problem.update()

        if self.solver == 'pulp':

            for I, J, K in itertools.combinations(items, 3):

                name = "Transitivity (%s / %s / %s)" % (I, K, J)
                self.problem += self.x[J, K]+self.x[I, K]-self.x[I, J] <= 1, name

                name = "Transitivity (%s / %s / %s)" % (I, J, K)
                self.problem += self.x[I, J]+self.x[I, K]-self.x[J, K] <= 1, name

                name = "Transitivity (%s / %s / %s)" % (J, I, K)
                self.problem += self.x[I, J]+self.x[J, K]-self.x[I, K] <= 1, name

        return self
Example #22
0
def main():
	pyramid_sum = []
	for i in range(1, 10):
		for j in range(1, 5):
			pyramid_sum.append(j)

	cubic_sum = []
	for i in range(1, 7):
		for j in range(1, 7):
			cubic_sum.append(j)

	pyramid_dic = {}
	for i in itertools.combinations(pyramid_sum, 9):
		key = sum(i)
		if key in pyramid_dic:
			pyramid_dic[key] += 1
		else:
			pyramid_dic[key] = 1

	cubic_dic = {}
	for i in itertools.combinations(cubic_sum, 6):
		key = sum(i)
		if key in cubic_dic:
			cubic_dic[key] += 1
		else:
			cubic_dic[key] = 1
	
	win = 0
	for k1, v1 in pyramid_dic.items():
		for k2, v2 in cubic_dic.items():
			if k1 > k2:
				win += pyramid_dic[k1]/sum(pyramid_dic.values()) * cubic_dic[k2]/sum(cubic_dic.values())

	print(win)
Example #23
0
def lsh_combinations(width, bandwidth, ramp):
    """Generate indices for overlapping LSH band selectors

    :param width: expected signature length
    :type width: int
    :param bandwidth: band size
    :type bandwidth: int
    :param ramp: For each integer value between 1 and bandwidth, return
                 (preferably uniformly) sampled combinations such that their
                 number corresponds to (width choose ramp) combinations
    :type ramp: int
    :return: a sequence of tuples with elements representing indices
    :rtype: list

    """
    master = list(combinations(range(width), bandwidth))
    cols = set(range(bandwidth))
    left_cols = list(combinations(cols, ramp))
    right_cols = [tsorted(cols - s) for s in imap(set, left_cols)]
    left_getters = create_getters(left_cols)
    right_getters = create_getters(right_cols)
    mapping = collections.defaultdict(list)
    for get_left, get_right in izip(left_getters, right_getters):
        for element in master:
            mapping[tsorted(wrap_scalar(get_left(element)))].append(
                tsorted(wrap_scalar(get_right(element))))
    return sorted(set(tsorted(k + v[0]) for k, v in mapping.iteritems()))
Example #24
0
def bsuccessors(state):
    """Return a dict of {state:action} pairs. A state is a (here, there, t) tuple,
    where here and there are frozensets of people (indicated by their times) and/or
    the 'light', and t is a number indicating the elapsed time. Action is represented
    as a tuple (person1, person2, arrow), where arrow is '->' for here to there and
    '<-' for there to here."""
    here, there, t = state
    # your code here
    import itertools
    # choose 2 from here to there, choose 1 from there to here
    successor = {}

    if 'light' in here:
        iterations = itertools.combinations(here.difference(['light']), 1)
        for tt in iterations:
            i = tt[0]
            successor[(frozenset(here.difference([i, 'light'])), frozenset(there.union([i, 'light'])), i+t)] = (i, i, '->')

    elif 'light' in there:
        iterations = itertools.combinations(there.difference(['light']), 1)
        for tt in iterations:
            i = tt[0]
            successor[(frozenset(here.union([i, 'light'])), frozenset(there.difference([i, 'light'])), i+t)] = (i, i, '<-')

    return successor
Example #25
0
File: PE_0090.py Project: mbh038/PE
def p90():
    
    t=time.clock()
    
    digits=set([0,1,2,3,4,5,6,7,8,9])
    
    pairs=set()
    for set1 in it.combinations(digits,6):
        set1x=set(set1)
        if 6 in set1x: set1x.add(9)
        if 9 in set1x: set1x.add(6)
        for set2 in it.combinations(digits,6):
            set2x=set(set2)
            if 6 in set2x: set2x.add(9)
            if 9 in set2x: set2x.add(6)
            squares={'01':0,'04':0,'09':0,'16':0,'25':0,'36':0,'49':0,'64':0,'81':0}
            for d1 in set1x:
                for d2 in set2x:
                    n1=str(d1)+str(d2)
                    n2=str(d2)+str(d1)
                    if n1 in squares:
                        squares[n1]+=1
                    if n2 in squares:
                        squares[n2]+=1
            if all([v>0 for k,v in squares.items()]):
                pairs.add(tuple(sorted((set1,set2))))
                
    print(len(pairs),time.clock()-t)
def complement_edges(G):
    """Returns only the edges in the complement of G

    Parameters
    ----------
    G : NetworkX Graph

    Yields
    ------
    edge : tuple
        Edges in the complement of G

    Example
    -------
    >>> G = nx.path_graph((1, 2, 3, 4))
    >>> sorted(complement_edges(G))
    [(1, 3), (1, 4), (2, 4)]
    >>> G = nx.path_graph((1, 2, 3, 4), nx.DiGraph())
    >>> sorted(complement_edges(G))
    [(1, 3), (1, 4), (2, 1), (2, 4), (3, 1), (3, 2), (4, 1), (4, 2), (4, 3)]
    >>> G = nx.complete_graph(1000)
    >>> sorted(complement_edges(G))
    []
    """
    if G.is_directed():
        for u, v in it.combinations(G.nodes(), 2):
            if v not in G.adj[u]:
                yield (u, v)
            if u not in G.adj[v]:
                yield (v, u)
    else:
        for u, v in it.combinations(G.nodes(), 2):
            if v not in G.adj[u]:
                yield (u, v)
Example #27
0
def powerset(A,nonTrivial=False):
	''' powerset(set) -> iterator -- returns a complete list of all subsets of A as tuple, if nonTrivial=True, returns all set expects the empty set and A'''
	from itertools import chain, combinations
	if nonTrivial:
		return chain.from_iterable( combinations(A,i) for i in range(1,len(A)) )
	else:	
		return chain.from_iterable( combinations(A,i) for i in range(0,len(A)+1) )
Example #28
0
def get_candidate(unique_elements,count_k):
    output_list = []
    unique_item=[]
    dict_all_elements = {}
    for element in unique_elements:
        element.sort()
        dict_all_elements[tuple(element)] = 1
        for inner_element in element:
            if not inner_element in unique_item:
                unique_item.append(inner_element)
    frequentk_itemset=list(itertools.combinations(unique_item, count_k))

    if count_k > 1:
        for item in frequentk_itemset:
            item=set(item)
            is_valid = True
            possible_combination=list(itertools.combinations(item, count_k-1))
            for combination_item in possible_combination:
                combination_item = list(combination_item)
                combination_item.sort()
                if tuple(combination_item) not in dict_all_elements:
                    is_valid = False
                    break
            if is_valid:
                output_list.append(list(item))
    else:
        for item in frequentk_itemset:
            item=list(item)
            output_list.append(item)
    return output_list
Example #29
0
def close_obj(coord, size):
    coord = coord_pack(coord)
    ba, ab = np.indices((len(coord), len(coord)), dtype=np.int16)
    sep = coord[ab].separation(coord[ba])
    c = np.where(sep < size)
    close = np.where(c[0] < c[1])
    pairs = np.vstack((c[0][close],c[1][close]))
    samefov = np.delete(np.arange(len(coord), dtype=np.int16), np.hstack((c[0][close],c[1][close])))
    samefov = samefov.reshape(len(samefov),1).tolist()
    n, m = np.unique(pairs[0], return_counts=True)
    y = np.in1d(pairs[0], n[np.where(m == 1)])
    n1, m1 = np.unique(pairs[1], return_counts=True)
    y1 = np.in1d(pairs[1], n1[np.where(m1 == 1)])
    samefov = samefov + pairs.T[y*y1].tolist()
    q = pairs.T[-(y*y1)]
    for z in np.unique(q.T[0]):
        b = q.T[1][np.where(q.T[0] == z)]
        combs = []
        for i in np.arange(len(b),0, -1, dtype=np.int16):
            els = [[z] + list(x) for x in itertools.combinations(b, i)]
            combs.append(els)
        for i in combs:
            for a in i:
                d = False
                w = [list(x) for x in itertools.combinations(a,2)]
                if not np.all([i in q.tolist() for i in w]):
                    continue
                for v in samefov:
                    e = all(k in v for k in a)
                    d = bool(d + e)
                if d == False:
                    samefov = samefov + [a]
    return np.sort(samefov).tolist()
Example #30
0
def gen_policy_chain_allpair(filename, k):
	"""
	update Dec 1, 2012
	create all pair policy chain, 
		for each pair, randomly pick K logical sequence 
	"""
	debug = 1
	f = open(filename, "w+")
	f.write("## Ingress Egress PolicyChain Volume\n")

	allpair = list(itertools.combinations(HOST_LIST, 2))
	allmbpair = list(itertools.combinations(list(set(MB_TYPE_ATTACH)), MB_CHAIN_DEPTH))
	#print "All mb pair ", allmbpair, MB_TYPE_ATTACH
	for pair_str in allpair:
		(frm, to) = pair_str
		pair = frm + " " + to

		mb_pairs = random.sample(allmbpair, k)
		#print "sample ", mb_pairs
		for mb_pair in mb_pairs:
			f.write(pair + " ")
			f.write(",".join(mb_pair) + " ")

			# lookup the traffic matrix to get the volume
			#if key in TRAFFIC_MATRIX.keys():
			volume = 0
			tm_pair = "%d %d" % (convert_node2id(frm), convert_node2id(to))

			if tm_pair in TRAFFIC_MATRIX.keys():
				volume = TRAFFIC_MATRIX[tm_pair]
			f.write("%d" % volume)	
			f.write("\n")
	f.close();
Example #31
0
def optimize(inputFile, outputFile):
    # Read input file
    sections = pd.read_excel(inputFile,
                             sheet_name='Classes').set_index('SECTION')
    rooms = pd.read_excel(inputFile, sheet_name='Rooms', index_col=1)
    block = pd.read_excel(inputFile, sheet_name='Black Outs')

    # Step 1: Pair up half-semester sections
    print("Step 1: Pair up half-semester sections")
    # šŗ : set of first-half-semester sections
    # š»: set of second-half-semester sections
    G = sections[sections['Sem Type'] == 1].index
    H = sections[sections['Sem Type'] == 2].index

    # phi: set of section pairs (š‘”,ā„Ž) that do not meet at the same time or with the same frequency every week
    # seatsDiffDF: seats offered difference between a first-half-semester section and a second-half-semester section
    # sameProfDF: whether a first-half-semester section and a second-half-semester section have the same instructor
    phi = []
    seatsDiffDF = pd.DataFrame(index=G, columns=H)  #u
    sameProfDF = pd.DataFrame(index=G, columns=H)  #m
    for g in G:
        for h in H:
            if sections.loc[g,'FIRST DAYS']!=sections.loc[h,'FIRST DAYS'] or \
            sections.loc[g,'FIRST BEGIN TIME']!=sections.loc[h,'FIRST BEGIN TIME'] or\
            sections.loc[g,'FIRST END TIME']!=sections.loc[h,'FIRST END TIME']:
                phi.append((g, h))
            seatsDiffDF.loc[g, h] = max(
                sections.loc[g, 'REG COUNT'] - sections.loc[h, 'REG COUNT'],
                sections.loc[h, 'REG COUNT'] - sections.loc[g, 'REG COUNT'])
            if sections.loc[g,'FIRST INSTRUCTOR']==sections.loc[h,'FIRST INSTRUCTOR'] or\
            sections.loc[g,'FIRST INSTRUCTOR']==sections.loc[h,'SECOND INSTRUCTOR'] or\
            sections.loc[g,'SECOND INSTRUCTOR']==sections.loc[h,'SECOND INSTRUCTOR'] or\
            sections.loc[g,'SECOND INSTRUCTOR']==sections.loc[h,'FIRST INSTRUCTOR']:
                sameProfDF.loc[g, h] = 1
    sameProfDF = sameProfDF.fillna(0)

    # Set up GUROBI model for paring up half-semester sections
    alpha = 5  # extra weight on same professor pairs
    mod1 = Model()
    l = mod1.addVars(G, H, vtype=GRB.BINARY)
    paired_secs = sum(l[g, h] for g in G for h in H)
    paired_secs_same_prof = sum(l[g, h] * sameProfDF.loc[g, h] for g in G
                                for h in H)
    tot_seats_diff = sum(l[g, h] * seatsDiffDF.loc[g, h] for g in G for h in H)
    # Maximize the number of paired sections, with extra weights put on pair of sections taught by the same professor
    mod1.setObjective(paired_secs + alpha * paired_secs_same_prof,
                      sense=GRB.MAXIMIZE)
    # Only sections that meet at the same time and frequency every week are allowed to be paired
    for pair in phi:
        mod1.addConstr(l[pair[0], pair[1]] == 0)
    # Each section can only be paired once
    for h in H:
        mod1.addConstr(sum(l[g, h] for g in G) <= 1)
    for g in G:
        mod1.addConstr(sum(l[g, h] for h in H) <= 1)
    mod1.optimize()

    # Print pairing resulds
    print('# paired sections', paired_secs.getValue())
    print('# paired sections w/ same instructor',
          paired_secs_same_prof.getValue())
    print('Total seats offered differences', tot_seats_diff.getValue())

    # Step 2: Assign classrooms
    print(
        "-------------------------------------------------------------------")
    print("Step 2: Assign classrooms")
    sections['Core'] = sections['Type General'].apply(
        lambda x: 1 if x != 'Elective' else 0)
    secs = sections[[
        'Course',
        'FIRST DAYS',
        'FIRST BEGIN TIME',
        'FIRST END TIME',
        'FIRST INSTRUCTOR',
        'SECOND INSTRUCTOR',
        'REG COUNT',
        'Core',
    ]].copy()

    C = secs.Course.unique()  # C: List of courses
    courseSecDict = {}  # I_c: set of sections that belong to course š‘āˆˆš¶
    courseSeatsDict = {}  # r_c: total seats offered by course š‘āˆˆš¶
    courseCoreDict = {}  # w_c: whether course š‘ is a core course
    for course in C:
        courseSecDict[course] = set(secs[secs['Course'] == course].index)
        courseSeatsDict[course] = secs[secs['Course'] ==
                                       course]['REG COUNT'].sum()
        courseCoreDict[course] = secs[secs['Course'] ==
                                      course]['Core'].unique()[0]

    # P: set of professors
    P = set(
        list(secs['FIRST INSTRUCTOR'].unique()) +
        list(secs['SECOND INSTRUCTOR'].unique()))
    P.remove(np.nan)
    profSecDict = {}  #I_p: set of sections taught by professor š‘āˆˆš‘ƒ
    for prof in P:
        profSecDict[prof]=list(set(list(secs[secs['FIRST INSTRUCTOR']==prof].index)+\
                              list(secs[secs['SECOND INSTRUCTOR']==prof].index)))

    # Combine paired half-semester sections into full semester sections
    # Unpaired hals-semester sections are treated as full semester sections
    for g in G:
        for h in H:
            if l[g, h].x == 1:
                sec_name = str(g) + '/' + str(h)
                course1 = secs.loc[g, 'Course']
                course2 = secs.loc[h, 'Course']
                # Update course-section dictionary
                courseSecDict[course1].add(sec_name)
                courseSecDict[course2].add(sec_name)
                courseSecDict[course1].remove(g)
                courseSecDict[course2].remove(h)
                # Update professor-section dictionary
                for prof in [
                        secs.loc[g, 'FIRST INSTRUCTOR'],
                        secs.loc[g, 'SECOND INSTRUCTOR']
                ]:
                    if isinstance(prof, str):
                        profSecDict[prof].append(sec_name)
                        profSecDict[prof].remove(g)
                for prof in [
                        secs.loc[h, 'FIRST INSTRUCTOR'],
                        secs.loc[h, 'SECOND INSTRUCTOR']
                ]:
                    if isinstance(prof, str):
                        profSecDict[prof].append(sec_name)
                        profSecDict[prof].remove(h)
                secs.loc[g, 'Combined'] = 1
                secs.loc[h, 'Combined'] = 1
                secs.loc[sec_name, 'FIRST DAYS'] = secs.loc[g, 'FIRST DAYS']
                secs.loc[sec_name,
                         'FIRST BEGIN TIME'] = secs.loc[g, 'FIRST BEGIN TIME']
                secs.loc[sec_name,
                         'FIRST END TIME'] = secs.loc[g, 'FIRST END TIME']

    final_secs = secs[secs['Combined'].isnull()][[
        'FIRST DAYS', 'FIRST BEGIN TIME', 'FIRST END TIME'
    ]].copy()
    I = final_secs.index  # I: list of sections
    K = rooms.index  # K: list of classrooms
    roomLimit = 30
    # Limit classroom capacity to 30 person
    cap = rooms['6ft'].fillna(10).apply(
        lambda x: roomLimit if x > roomLimit else x).astype(int)  #m_k

    # This function translates time into minutes since 12:00am for easy comparison
    def minutes(text):
        return int(text[:2]) * 60 + int(text[3:5])

    # š‘‡ : set of section pairs (š‘–1,š‘–2) that are held at the same or overlapping time slots.
    # š¹ : set of section pairs (š‘–1,š‘–2) such that section š‘–1 and š‘–2 are taught by the same professor and the interval between two sections is less than 20 minutes.
    T = []
    F = []
    secPairList = list(combinations(I, 2))
    for (i, ii) in secPairList:
        start1 = final_secs.loc[i, 'FIRST BEGIN TIME']
        start2 = final_secs.loc[ii, 'FIRST BEGIN TIME']
        end1 = final_secs.loc[i, 'FIRST END TIME']
        end2 = final_secs.loc[ii, 'FIRST END TIME']
        day1 = final_secs.loc[i, 'FIRST DAYS']
        day2 = final_secs.loc[ii, 'FIRST DAYS']
        if day1 == day2 or day1 in day2 or day2 in day1:
            if (minutes(start1)>=minutes(start2) and minutes(start1)<minutes(end2)) or\
            (minutes(start2)>=minutes(start1) and minutes(start2)<minutes(end1)):
                T.append((i, ii))
            elif (minutes(start1)-minutes(end2)>=0 and minutes(start1)-minutes(end2)<=20) or \
            (minutes(start2)-minutes(end1)>=0 and minutes(start2)-minutes(end1)<=20):
                profList1 = set()
                profList2 = set()
                for prof, sec in profSecDict.items():
                    if i in sec:
                        profList1.add(prof)
                    if ii in sec:
                        profList2.add(prof)
                if len(profList1 & profList2) > 0:
                    F.append((i, ii))

    # š‘ : set of section-classroom pairs (š‘–,š‘˜) such that the time when section š‘– is held is blocked out for classroom š‘˜
    N = []
    for idx in block.index:
        k = block.loc[idx, 'Room']
        block_start = minutes(block.loc[idx, 'Start Time'])
        block_end = minutes(block.loc[idx, 'End Time'])
        block_days = block.loc[idx, 'Days']
        for day in block_days:
            pop_slots = final_secs[final_secs['FIRST DAYS'].str.contains(
                day)].copy()
            pop_slots['Block Start'] = block_start
            pop_slots['Block End'] = block_end
            pop_slots['Start'] = pop_slots['FIRST BEGIN TIME'].apply(
                lambda x: minutes(x))
            pop_slots['End'] = pop_slots['FIRST END TIME'].apply(
                lambda x: minutes(x))
            pop_slots['Blocked'] = pop_slots[[
                'Block Start', 'Block End', 'Start', 'End'
            ]].apply(lambda x: 1 if (x[2] >= x[0] and x[2] < x[1]) or
                     (x[0] >= x[2] and x[0] < x[3]) else 0,
                     axis=1)
            for i in pop_slots[pop_slots['Blocked'] == 1].index:
                N.append((i, k))

    B = rooms.Building.unique().tolist()  # šµ : set of buildings
    bldgRoomDict = {}  # K_b: set of classrooms in regions  š‘āˆˆšµ
    for b in B:
        bldgRoomDict[b] = rooms[rooms['Building'] == b].index.tolist()

    sigma1 = 2  # base weight for Hybrid50 mode
    sigma2 = 5  # additional weight for Hybrid50 mode if it's a core course
    sigma3 = 1  # weight for Hybrid33 mode
    e = 10000  # arbitrary large number (larger than the sum of seats offered for any given course)

    # Set up GUROBI model to assign classrooms
    start_time = pd.datetime.now()
    mod2 = Model()
    x = mod2.addVars(I, K, vtype=GRB.BINARY)
    y = mod2.addVars(C, vtype=GRB.BINARY)
    z = mod2.addVars(C, vtype=GRB.BINARY)
    q = mod2.addVars(I, B, vtype=GRB.BINARY)
    hybrid50_seats = sum(y[c] * courseSeatsDict[c] for c in C)
    hybrid50_core_seats = sum(y[c] * courseCoreDict[c] * courseSeatsDict[c]
                              for c in C)
    hybrid33_seats = sum((z[c] - y[c]) * courseSeatsDict[c] for c in C)
    # Maximize the seats in Hybrid50 and Hybrid33 mode classes (weighted differently based on 50/33, core/elective)
    mod2.setObjective(sigma1 * hybrid50_seats + sigma2 * hybrid50_core_seats +
                      sigma3 * hybrid33_seats,
                      sense=GRB.MAXIMIZE)
    print('Model initiated and variables added:',
          pd.datetime.now() - start_time,
          flush=True)

    #1 Classroom capacity
    start_time = pd.datetime.now()
    for c in C:
        mod2.addConstr(2*sum(x[i,k]*cap[k] for i in courseSecDict[c] for k in K)\
                       >=courseSeatsDict[c]*y[c])
        mod2.addConstr(3 * sum(x[i, k] * cap[k] for i in courseSecDict[c]
                               for k in K) <= e * z[c])
        mod2.addConstr(3*sum(x[i,k]*cap[k] for i in courseSecDict[c] for k in K)\
                       >=courseSeatsDict[c]*z[c])
    print('Constraint #1 added:', pd.datetime.now() - start_time, flush=True)

    #2 Core hybrid50
    start_time = pd.datetime.now()
    for c in C:
        if courseCoreDict[c] == 1:
            mod2.addConstr(z[c] == y[c])
    print('Constraint #2 added:', pd.datetime.now() - start_time, flush=True)

    #3 Same section conflict
    start_time = pd.datetime.now()
    for i in I:
        mod2.addConstr(sum(x[i, k] for k in K) <= 1)
    print('Constraint #3 added:', pd.datetime.now() - start_time, flush=True)

    #4 Same classroom conclict
    start_time = pd.datetime.now()
    for k in K:
        for (i1, i2) in T:
            mod2.addConstr(x[i1, k] + x[i2, k] <= 1)
    print('Constraint #4 added:', pd.datetime.now() - start_time, flush=True)

    #5 Blackout time
    start_time = pd.datetime.now()
    for (i, k) in N:
        mod2.addConstr(x[i, k] == 0)
    print('Constraint #5 added:', pd.datetime.now() - start_time, flush=True)

    # 6 Proximity
    start_time = pd.datetime.now()
    for i in I:
        for b in B:
            mod2.addConstr(q[i, b] == sum(x[i, k] for k in bldgRoomDict[b]))
        for (i1, i2) in F:
            BB = B.copy()
            BB.remove(b)
            mod2.addConstr(q[i1, b] + sum(q[i2, bb] for bb in BB) <= 1)
    print('Constraint #6 added:', pd.datetime.now() - start_time, flush=True)

    start_time = pd.datetime.now()
    mod2.optimize()
    print('Model optimized:', pd.datetime.now() - start_time, flush=True)

    # Read assigned classrooms
    for i in I:
        for k in K:
            if x[i, k].x == 1:
                if isinstance(i, str):
                    sec_pair = i.split('/')
                    for sec in sec_pair:
                        sections.loc[int(sec), 'Room'] = k
                else:
                    sections.loc[i, 'Room'] = k

    # Determine hybrid mode
    hybrid = pd.Series(index=C, name='Type')
    for c in C:
        if y[c].x == 1:
            hybrid[c] = 'hybrid50'
        elif z[c].x == 1:
            hybrid[c] = 'hybrid33'
        else:
            hybrid[c] = 'online'

    mg = sections.merge(cap, how='left', left_on='Room', right_index=True)
    mg = mg.merge(hybrid, left_on='Course', right_index=True, how='left')
    mg['Core'] = mg['Core'].map({0: 'Elective', 1: 'Core'})

    # Create seat count summary
    summarySeats = mg.groupby(
        ['Core', 'Type']).sum()['REG COUNT'].reset_index(name='Seats Offered')
    summarySeats = pd.pivot_table(summarySeats,
                                  index=['Core'],
                                  columns='Type',
                                  values='Seats Offered').fillna(0)
    summarySeats = summarySeats.astype(int)
    summarySeats.loc['Total'] = summarySeats.sum()
    summarySeats['Total'] = summarySeats.sum(axis=1)
    summarySeats['hybrid33 %'] = (
        summarySeats['hybrid33'] /
        summarySeats['Total']).apply(lambda x: f'{x*100:.1f}%')
    summarySeats['hybrid50 %'] = (
        summarySeats['hybrid50'] /
        summarySeats['Total']).apply(lambda x: f'{x*100:.1f}%')
    summarySeats['online %'] = (
        summarySeats['online'] /
        summarySeats['Total']).apply(lambda x: f'{x*100:.1f}%')

    # Create course count summary
    summaryCourses = mg.groupby(
        ['Core', 'Type']).nunique()['Course'].reset_index(name='# Courses')
    summaryCourses = pd.pivot_table(summaryCourses,
                                    index=['Core'],
                                    columns='Type',
                                    values='# Courses').fillna(0)
    summaryCourses = summaryCourses.astype(int)
    summaryCourses.loc['Total'] = summaryCourses.sum()
    summaryCourses['Total'] = summaryCourses.sum(axis=1)
    summaryCourses['hybrid33 %'] = (
        summaryCourses['hybrid33'] /
        summaryCourses['Total']).apply(lambda x: f'{x*100:.1f}%')
    summaryCourses['hybrid50 %'] = (
        summaryCourses['hybrid50'] /
        summaryCourses['Total']).apply(lambda x: f'{x*100:.1f}%')
    summaryCourses['online %'] = (
        summaryCourses['online'] /
        summaryCourses['Total']).apply(lambda x: f'{x*100:.1f}%')

    print(
        "-------------------------------------------------------------------")
    print("Optimization Complete")
    # Print summaries to screen
    print(summarySeats)
    print(summaryCourses)

    # Write results to output file
    writer = pd.ExcelWriter(outputFile)
    mg.to_excel(writer, sheet_name='Schedules', index=True)
    summarySeats.to_excel(writer, sheet_name='Seats Summary')
    summaryCourses.to_excel(writer, sheet_name='Courses Summary')
    writer.save()
Example #32
0
def main():
    parser = argparse.ArgumentParser(
        description='A deep learning powered face recognition app.',
        epilog=
        'Written for the INSA Lyon DEEP Project by Anh Pham, Mathilde du Plessix, '
        'Romain Latron, BeonƮt Zhong, Martin Haug. 2018 All rights reserved.')

    parser.add_argument('image', help='Image to recognize faces on')
    parser.add_argument('--epoch',
                        help='Number of epoch',
                        type=int,
                        default=15)
    parser.add_argument('-m',
                        '--model',
                        help='Pass a model file to skip training')
    parser.add_argument('-t',
                        '--train',
                        help='A folder with correctly labeled training data. '
                        'Will save at model path if option is specified.')
    parser.add_argument(
        '--training-preview',
        help='Will preview a batch of images from the training set',
        action='store_true')
    parser.add_argument(
        '--test-preview',
        help='Will preview a batch of images from the test set',
        action='store_true')
    parser.add_argument(
        '--outliers',
        help='Will include the outlier detections in the result as squares',
        action='store_true')
    parser.add_argument(
        '--color',
        help='Runs the network on a color version of the image',
        action='store_true')
    parser.add_argument('-e',
                        '--test',
                        help='A folder with labeled testing data')
    args = parser.parse_args()

    transform = transforms.Compose([
        transforms.Resize(224),
        transforms.ToTensor(),
        transforms.Normalize((.5, .5, .5), (.5, .5, .5))
    ])

    classes = ('noface', 'face')
    net = torchvision.models.resnet18(pretrained=True)
    num_features = net.fc.in_features
    net.fc = nn.Linear(num_features, len(classes))

    if torch.cuda.is_available():
        net.to("cuda:0")
        print("On GPU!")
    else:
        print("On CPU :(")

    datasets = {
        'train':
        torchvision.datasets.ImageFolder(
            root=args.train, transform=transform, loader=image_loader)
        if args.train is not None else None,
        'val':
        torchvision.datasets.ImageFolder(
            root=args.test, transform=transform, loader=image_loader)
        if args.test is not None else None
    }

    loaders = {
        k: torch.utils.data.DataLoader(
            v, batch_size=4, shuffle=True, num_workers=2)
        if v is not None else None
        for (k, v) in datasets.items()
    }

    if (loaders['train'] is None
            or loaders['val'] is None) and args.model is None:
        print(
            "You have to specify a model or a training and testing set to use the net"
        )
    elif loaders['train'] is not None and loaders['val'] is not None:
        if args.training_preview:
            preview(loaders['train'], classes)

        net = train_net(net, loaders, args.epoch,
                        {k: len(v)
                         for (k, v) in datasets.items()},
                        torch.cuda.is_available())

        if args.model is not None:
            torch.save(net.state_dict(), args.model)
            print("Saved model at {}".format(args.model))

    else:
        net.load_state_dict(torch.load(args.model))
        print("Loaded model from {}".format(args.model))

    if args.test is not None:
        if args.test_preview:
            preview(loaders['val'], classes)

        test_net(net, loaders['val'], classes, torch.cuda.is_available())

    orig, bw_img = None, None
    try:
        orig, bw_img = load_single_image(args.image)
    except FileNotFoundError:
        print("Could not open image!")
        exit(-1)

    window_array = image_mover(
        orig if args.color else bw_img,
        0.4,
        0.6,
        terminate_size=max(np.sqrt(orig.size[0] * orig.size[1] * 0.00138), 36))

    fig, ax = plt.subplots(1)
    ax.imshow(orig)
    count = 0
    lowest_score = float("inf")
    highest_score = 0
    with torch.no_grad():
        total_iters = 0
        for window_wrap in window_array:
            crop_real = transform(
                transforms.functional.crop(orig if args.color else bw_img,
                                           window_wrap.y, window_wrap.x,
                                           window_wrap.height,
                                           window_wrap.width))
            if torch.cuda.is_available():
                crop_real = crop_real.to("cuda:0")
            outputs = net(crop_real.unsqueeze(0))
            _, predicted = torch.max(outputs.data, 1)
            if outputs.data[0][1] > 1.2 and predicted[0] == 1:
                window_wrap.is_face(outputs.data[0][1])
                if outputs.data[0][1] > highest_score:
                    highest_score = outputs.data[0][1]

                if outputs.data[0][1] < lowest_score:
                    lowest_score = outputs.data[0][1]

                count += 1
            total_iters += 1
            terminal.print_progress(
                total_iters,
                len(window_array),
                prefix='Processing image: ',
                suffix='Complete ({} candidates)'.format(count),
                bar_length=80)
        print("{} faces detected".format(count))

    med_height = np.median([x.height for x in window_array])
    scan = DBSCAN(eps=med_height * .75, min_samples=2)

    matches = [i for i in window_array if i.face]
    points = np.array([i.get_center() for i in matches])
    clusters = scan.fit(points)

    cl_labels = clusters.labels_

    class_dict = {}

    for i in range(len(matches)):
        if cl_labels[i] not in class_dict:
            class_dict[cl_labels[i]] = []
        class_dict[cl_labels[i]].append(matches[i])

    if -1 in class_dict:
        med = float(np.median([x.score for x in class_dict[-1]]))
        print("Outlier median score {}".format(med))

        circles = []
        for window_wrap in class_dict[-1]:
            if window_wrap.score > med > 1.3:
                circles.append(
                    (*(window_wrap.get_center()), window_wrap.height / 2,
                     float(window_wrap.score)))
            elif args.outliers:
                edge = colors.hsv_to_rgb(
                    (0, (lowest_score - window_wrap.score) /
                     (lowest_score - highest_score), 1))
                circle_outl = patches.Rectangle(window_wrap.get_top_left(),
                                                window_wrap.width,
                                                window_wrap.height,
                                                linewidth=2,
                                                edgecolor=edge,
                                                facecolor='none')
                ax.add_patch(circle_outl)

        overlaps = {}
        for pair in itertools.combinations(circles, 2):
            c1, c2 = pair
            if c1 not in overlaps:
                overlaps[c1] = []
            c1, c2 = pair
            d = np.linalg.norm(
                np.array([c1[0], c1[1]]) - np.array([c2[0], c2[1]]))
            if (c1[2] - c2[2]) ** 2 <= (c1[0] - c2[0]) ** 2 + (c1[1] - c2[1]) ** 2 <= (c1[2] + c2[2]) ** 2\
                    or c1[2] > (d+c2[2]) or c2[2] > (d+c1[2]):
                overlaps[c1].append(c2)

        groups = get_groups(overlaps)

        for group in groups:
            circle = group[0]
            max_score = float("-inf")
            for candidate in group:
                if candidate[3] > max_score:
                    max_score = candidate[3]
                    circle = candidate

            edge = colors.hsv_to_rgb((.15, (lowest_score - circle[3]) /
                                      (lowest_score - highest_score), 1))
            circle_outl = patches.Circle((circle[0], circle[1]), (circle[2]),
                                         linewidth=2,
                                         edgecolor=edge,
                                         facecolor='none')
            ax.add_patch(circle_outl)
        print("Added {} outliers as faces".format(len(groups)))
    for i in range(max(cl_labels) + 1):
        cluster = Cluster(class_dict[i])
        edge = colors.hsv_to_rgb(
            (.38, (lowest_score - cluster.get_max_score()) /
             (lowest_score - highest_score), 1))
        circle_cluster = patches.Circle(cluster.get_center(),
                                        cluster.get_radius(),
                                        linewidth=2,
                                        edgecolor=edge,
                                        facecolor='none')
        ax.add_patch(circle_cluster)
    plt.show()
Example #33
0
def get_sum(num):
    for comb in combinations(lines, num):
        if sum(comb) == 2020:
            print(prod(comb))
    if (n == 1):
        return (R2)

    r_adj = (R2 - k / (n - 1)) * ((n - 1) / (n - k - 1))
    return (r_adj)


k = len(x_str)
n = len(X)

r_square_adjust = r_adjust(r_square, k, n)

coef = model.coef_
intercept = model.intercept_

predict = model.predict(X)


def sum_squares(predict, y):
    stack_ = np.column_stack((predict, y))
    squares = [(row[0] - row[1])**2 for row in stack_]
    #error = np.sqrt(squares)
    #se = error/np.sqrt(n)
    return (sum(squares))


ssq = sum_squares(predict, y)

test = list(itertools.combinations(x_str, 2))
print(test)
Example #35
0
    def get_dict(self):
        return dict(
            doc="""General Hexahedron, define with nodes 1 to 20{} and node 27 present.

   Position of node in node array for element node numbers > 20:

{}""".format(self.gen_extnodes(), self.gen_nodepos(20 * " ")),
            procs=(general, Sestra),
            nnodes=20 + len(self.pos),
            eltyp=self.eltyp)

    def get_name(self):
        return "ghex{:03d}".format(self.eltyp)

for j in range(7):
    for k in itertools.combinations((1, 2, 4, 8, 16, 32), j):
        g = ghex(k)
        _ELEMENTS[g.get_name()] = g.get_dict()

[_ELEMENTS[i].setdefault("base", "elem") for i in _ELEMENTS]


def s_key(i):
    return i[1]['eltyp']

ELEMENTS = sorted([(i, _ELEMENTS[i]) for i in _ELEMENTS], key=s_key)
ENUMS = [(i[0].upper(), i[1]['eltyp']) for i in ELEMENTS]


def list_init(start, stop):
    if start == stop:
def powerset(iterable):
    s = list(iterable)
    return list(itertools.chain.from_iterable(itertools.combinations(s, r) for r in
        range(len(s) + 1)))
Example #37
0
from itertools import combinations
N, M = map(int, input().split())
link = [[0 for i in range(N)] for j in range(N)]
for m in range(M):
    x, y = map(int, input().split())
    link[x - 1][y - 1] = 1
    link[y - 1][x - 1] = 1

for i in range(N, 1, -1):
    for comb in combinations(range(N), i):
        for c in combinations(comb, 2):
            if (link[c[0]][c[1]] == 0):
                break
        else:
            print(i)
            exit()
print(1)
Example #38
0
    def get_clue(self):
        cos_dist = scipy.spatial.distance.cosine
        red_words = []
        bad_words = []

        # Creates Red-Labeled Word arrays, and everything else arrays
        for i in range(25):
            if self.words[i][0] == '*':
                continue
            elif self.maps[i] == "Assassin" or self.maps[i] == "Blue" or self.maps[i] == "Civilian":
                bad_words.append(self.words[i].lower())
            else:
                red_words.append(self.words[i].lower())
        print("RED:\t", red_words)

        all_vectors = (self.word_vectors,)
        bests = {}

        if not self.bad_word_dists:
            self.bad_word_dists = {}
            for word in bad_words:
                self.bad_word_dists[word] = {}
                for val in self.cm_wordlist:
                    b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))
                    self.bad_word_dists[word][val] = b_dist

            self.red_word_dists = {}
            for word in red_words:
                self.red_word_dists[word] = {}
                for val in self.cm_wordlist:
                    b_dist = cos_dist(self.concatenate(val, all_vectors), self.concatenate(word, all_vectors))
                    self.red_word_dists[word][val] = b_dist

        else:
            to_remove = set(self.bad_word_dists) - set(bad_words)
            for word in to_remove:
                del self.bad_word_dists[word]
            to_remove = set(self.red_word_dists) - set(red_words)
            for word in to_remove:
                del self.red_word_dists[word]

        for clue_num in range(1, 3 + 1):
            best_per_dist = np.inf
            best_per = ''
            best_red_word = ''
            for red_word in list(itertools.combinations(red_words, clue_num)):
                best_word = ''
                best_dist = np.inf
                for word in self.cm_wordlist:
                    if not self.arr_not_in_word(word, red_words + bad_words):
                        continue

                    bad_dist = np.inf
                    worst_bad = ''
                    for bad_word in self.bad_word_dists:
                        if self.bad_word_dists[bad_word][word] < bad_dist:
                            bad_dist = self.bad_word_dists[bad_word][word]
                            worst_bad = bad_word
                    worst_red = 0
                    for red in red_word:
                        dist = self.red_word_dists[red][word]
                        if dist > worst_red:
                            worst_red = dist

                    if worst_red < best_dist and worst_red < bad_dist:
                        best_dist = worst_red
                        best_word = word
                        # print(worst_red,red_word,word)

                        if best_dist < best_per_dist:
                            best_per_dist = best_dist
                            best_per = best_word
                            best_red_word = red_word
            bests[clue_num] = (best_red_word, best_per, best_per_dist)

        print("BESTS: ", bests)
        li = []
        pi = []
        chosen_clue = bests[1]
        chosen_num = 1
        for clue_num, clue in bests.items():
            best_red_word, combined_clue, combined_score = clue
            worst = -np.inf
            best = np.inf
            worst_word = ''
            for word in best_red_word:
                dist = cos_dist(self.concatenate(word, all_vectors), self.concatenate(combined_clue, all_vectors))
                if dist > worst:
                    worst_word = word
                    worst = dist
                if dist < best:
                    best = dist
            if worst < 0.7 and worst != -np.inf:
                print(worst, chosen_clue, chosen_num)
                chosen_clue = clue
                chosen_num = clue_num

            li.append((worst / best, best_red_word, worst_word, combined_clue,
                       combined_score, combined_score ** len(best_red_word)))

        if chosen_clue[2] == np.inf:
            chosen_clue = ('', li[0][3], 0)
            chosen_num = 1
        # print("LI: ", li)
        # print("The clue is: ", li[0][3])
        print('chosen_clue is:', chosen_clue)
        # return in array styled: ["clue", number]
        return chosen_clue[1], chosen_num  # [li[0][3], 1]
Example #39
0
fighters.append(Player('Formiga', 130, 7600, 56.0, 7))
fighters.append(Player('S. Pettis', -155, 8600, 56.0, 7))
fighters.append(Player('Luque', -829, 9400, 85.4, 8))
fighters.append(Player('Turner', 566, 6800, 70, 8))
fighters.append(Player('Ladd', -171, 8200, 94, 9))
fighters.append(Player('Evinger', 143, 8000, 21, 9))
fighters.append(Player('Kunitskaya', -212, 8500, 10.5, 10))
fighters.append(Player('Lansberg', 174, 7700, 39.4, 10))
fighters.append(Player('Patrick', -268, 9100, 77.1, 11))
fighters.append(Player('Holtzman', 216, 7100, 82.5, 11))
fighters.append(Player('Lentz', -241, 9000, 69.1, 12))
fighters.append(Player('Maynard', 195, 7200, 56.7, 12))
fighters.append(Player('LaFlare', -136, 8800, 70.1, 13))
fighters.append(Player('Martin', 113, 7400, 52.2, 13))

my_combos = combinations(fighters, 6)

best_score = 99999
real_lineups = 0
for lineup in my_combos:
    total_score = 0
    points = 0
    salary = 0
    for player in lineup:
        points += player.fppf
        salary += player.salary
        if player.odds < 0:
            total_score += float(100) / (player.odds * float(-1))
        else:
            total_score += player.odds / float(100)
    if salary > 50000 or salary < 49500:
#Combination of a given length
import itertools
li = ['23', '97', '26', '27'] #list of which combination is to be found
r = 3 #length of combinations
list(itertools.combinations(li,r))

#OUTPUT
[('23', '97', '26'), ('23', '97', '27'), ('23', '26', '27'), ('97', '26', '27')]



#All possible combinations except null combination. To include null combination replace 1 by 0 in range i.e. change range(1,len(li)+1) to range(0,len(li)+1)
import itertools
li = ['23', '97', '26', '27']
li_of_li = []
for k in range(1,len(li)+1):
    for j in [list(i) for i in list(itertools.combinations(li,k))]:
        li_of_li.append(j)

print(li_of_li)

#OUTPUT
[['23'], ['97'], ['26'], ['27'], ['23', '97'], ['23', '26'], ['23', '27'], ['97', '26'], ['97', '27'], ['26', '27'], ['23', '97', '26'], ['23', '97', '27'], ['23', '26', '27'], ['97', '26', '27'], ['23', '97', '26', '27']]
Example #41
0
def get_upper_inds(d):
    return list(combinations(range(d), 2))
Example #42
0
# build list of possible muzzle device orientations
while True:
    orientation = {
        'Angle': inputs.degrees + cnt * 360 / inputs.orientations,
        'Shims': [],
        'Error': None
    }
    if orientation['Angle'] * pitch / 360 > inputs.max_height:
        break
    orientations.append(orientation)
    cnt += 1

# test all possible shim combinations for best fit at given muzzle device orientation
for L in range(0, len(inputs.shims)+1):
    for combo in itertools.combinations(inputs.shims, L):
        for orientation in orientations:
            error = orientation['Angle'] * pitch / 360 - sum(combo)
            if not orientation['Shims'] or abs(error) < abs(orientation['Error']):
                orientation['Shims'] = combo
                orientation['Error'] = error

# sort by lowest (absolute) error
orientations = sorted(orientations, key=lambda i: abs(i['Error']))

# output
for orientation in orientations:
    if orientation['Shims'] and abs(orientation['Error'] / pitch * 360) < inputs.degrees:
        print('Angle: {}\nShims: {}\nError: {}Āŗ\n\n'.format(
            orientation['Angle'],
            ' + '.join(str(x) for x in orientation['Shims']),
Example #43
0
# ģ”°ķ•© ķ’€ģ“
from itertools import combinations
while True:
    k, *s = list(map(int, input().split()))
    if k == 0: break
    for c in combinations(s, 6):
        print(' '.join(map(str, c)))
    print()


# ģ™„ģ „ķƒģƒ‰ ķ’€ģ“
def go(s, i, lotto):
    if len(lotto)==6:
        print(' '.join(map(str, lotto)))
        return
    if i==len(s):
        return
    go(s, i+1, lotto+[s[i]])
    go(s, i+1, lotto)

while True:
    k,*s=list(map(int, input().split()))
    if k == 0: break
    go(s, 0, [])
    print()

# ģž…ė „ģœ¼ė”œ ģ£¼ģ–“ģ§„ ė”œė˜ ė°°ģ—“, ģ„ ķƒķ• ģ§€ ė§ģ§€ ź²°ģ •ķ•“ģ•¼ķ•˜ėŠ” ģøė±ģŠ¤, ķ˜„ģž¬ź¹Œģ§€ ķ¬ķ•Øķ•œ ģˆ˜ģ˜ ź°œģˆ˜
# go(s, i, cnt):

# 1) ģ •ė‹µģ„ ģ°¾ģ€ ź²½ģš°
# cnt == 6
Example #44
0

testSquareDf = testDf
testSquareDf=testSquareDf.drop('Y',1)
# print testSquareDf
testSquareDf = testSquareDf.applymap(np.square)
featuresList = np.array(boston.feature_names)
headers = ['s' + i for i in featuresList ]
# print headers
testSquareDf.columns = headers
# trainSquareDf.insert(0,'X0',1.0)
# print testSquareDf

#--------
select = boston.feature_names
comb = list(itertools.combinations(select, 2))
# print len(comb)
# print comb
trainDf2=trainDf
trainDf2=trainDf2.drop('Y',1)
trainCombDf = pd.DataFrame();
for i in comb:
    trainCombDf[i] = trainDf2[i[0]]*trainDf2[i[1]]


trainCombDf = pd.concat([trainDf2,trainSquareDf,trainCombDf],axis=1)
# print len(trainCombDf.ix[0])
# print trainCombDf
trainCombDf['Y'] = trainDf['Y']
# print trainCombDf
Example #45
0
    def build_skeleton(nodes, independencies):
        """Estimates a graph skeleton (UndirectedGraph) from a set of independencies
        using (the first part of) the PC algorithm. The independencies can either be
        provided as an instance of the `Independencies`-class or by passing a
        decision function that decides any conditional independency assertion.
        Returns a tuple `(skeleton, separating_sets)`.

        If an Independencies-instance is passed, the contained IndependenceAssertions
        have to admit a faithful BN representation. This is the case if
        they are obtained as a set of d-seperations of some Bayesian network or
        if the independence assertions are closed under the semi-graphoid axioms.
        Otherwise the procedure may fail to identify the correct structure.

        Parameters
        ----------
        nodes: list, array-like
            A list of node/variable names of the network skeleton.

        independencies: Independencies-instance or function.
            The source of independency information from which to build the skeleton.
            The provided Independencies should admit a faithful representation.
            Can either be provided as an Independencies()-instance or by passing a
            function `f(X, Y, Zs)` that returns `True` when X _|_ Y | Zs,
            otherwise `False`. (X, Y being individual nodes and Zs a list of nodes).

        Returns
        -------
        skeleton: UndirectedGraph
            An estimate for the undirected graph skeleton of the BN underlying the data.

        separating_sets: dict
            A dict containing for each pair of not directly connected nodes a
            separating set ("witnessing set") of variables that makes then
            conditionally independent. (needed for edge orientation procedures)

        Reference
        ---------
        [1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
            http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
        [2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
            Section 3.4.2.1 (page 85), Algorithm 3.3

        Examples
        --------
        >>> from pgm.estimators import ConstraintBasedEstimator
        >>> from pgm.models import BayesianModel
        >>> from pgm.independencies import Independencies

        >>> # build skeleton from list of independencies:
        ... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
        >>> # we need to compute closure, otherwise this set of independencies doesn't
        ... # admit a faithful representation:
        ... ind = ind.closure()
        >>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton("ABCD", ind)
        >>> print(skel.edges())
        [('A', 'D'), ('B', 'D'), ('C', 'D')]

        >>> # build skeleton from d-seperations of BayesianModel:
        ... model = BayesianModel([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
        >>> skel, sep_sets = ConstraintBasedEstimator.build_skeleton(model.nodes(), model.get_independencies())
        >>> print(skel.edges())
        [('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]
        """

        nodes = list(nodes)

        if isinstance(independencies, Independencies):
            def is_independent(X, Y, Zs):
                return IndependenceAssertion(X, Y, Zs) in independencies
        elif callable(independencies):
            is_independent = independencies
        else:
            raise ValueError("'independencies' must be either Independencies-instance " +
                             "or a ternary function that decides independencies.")

        graph = UndirectedGraph(combinations(nodes, 2))
        lim_neighbors = 0
        separating_sets = dict()
        while not all([len(graph.neighbors(node)) < lim_neighbors for node in nodes]):
            for node in nodes:
                for neighbor in graph.neighbors(node):
                    # search if there is a set of neighbors (of size lim_neighbors)
                    # that makes X and Y independent:
                    for separating_set in combinations(set(graph.neighbors(node)) - set([neighbor]), lim_neighbors):
                        if is_independent(node, neighbor, separating_set):
                            separating_sets[frozenset((node, neighbor))] = separating_set
                            graph.remove_edge(node, neighbor)
                            break
            lim_neighbors += 1

        return graph, separating_sets
Example #46
0
def main():
    header = ["level", "lang", "tweets", "phd", "interviewed_well"]
    table = [
            ["Senior", "Java", "no", "no", "False"],
            ["Senior", "Java", "no", "yes", "False"],
            ["Mid", "Python", "no", "no", "True"],
            ["Junior", "Python", "no", "no", "True"],
            ["Junior", "R", "yes", "no", "True"],
            ["Junior", "R", "yes", "yes", "False"],
            ["Mid", "R", "yes", "yes", "True"],
            ["Senior", "Python", "no", "no", "False"],
            ["Senior", "R", "yes", "no", "True"],
            ["Junior", "Python", "yes", "no", "True"],
            ["Senior", "Python", "yes", "yes", "True"],
            ["Mid", "Python", "no", "yes", "True"],
            ["Mid", "Java", "yes", "no", "True"],
            ["Junior", "Python", "no", "yes", "False"]
        ]

    # task: Define a function that prepends the 
    # attribute label and "=" before each attribute value in the table
    prepend_attribute_labels(table, header)
    for row in table:
        print(row)
    # why did we do this?
    # if we treat each row (instance) as a set
    # we will lose a no and/or a yes if tweets and phd
    # have the same value
    # summary: sets have no order and have no duplicates

    # unsupervised learning
    # we are going to cover association rule
    # mining (ARM) and k means clustering
    # we don't have a special attribute whose
    # value we are trying predict
    # so no more "class label"
    # looking for patterns, groups, associations
    # etc.

    # ARM
    # recall: decision trees give us classification rules
    # IF att1 = val1 AND att2 = val2 ... THEN class = classlabel1
    # let the "left hand side" or LHS be 
    # everything to the left of the THEN
    # let the RHS be everything to the right of the THEN
    # classification rules only have on "term"
    # attribute/value pair in its RHS
    # and at least one term in its LHS
    # for ARM we relax the constraint of one term in RHS
    # for ARM we have at least on term in LHS and
    # at least one term in RHS
    # for example
    # IF att1 = val1 AND att2 = val2 ... 
    # THEN att10 = val10 AND att11 = val11 AND ...
    # how to generate these rules?
    # one approach... brute force combinations
    # VERY computationally expensive
    # the approach we are going to use is Apriori
    # a few notes about ARM/Apriori
    # 1. even with tricks STILL VERY computationally expensive
    # 2. association does not imply causality
    # 3. we need to define new ways to evaluate
    # our rules... with apriori we will get 
    # alot of rules... some are super weak
    # some are super rare

    # our game plan for learning ARM
    # 1. given some rules, use them/evaluate them
    # 2. introduce apriori via lab examples
    # 3. starter code for apriori
    # 4. full implementation of apriori (PA8 :) last one)

    # Rule 1: interviewed_well=False => tweets=no
    # Rule 2: phd=no AND tweets=yes => interviewed_well=True
    # how to represent rules in Python?
    rule1 = {"lhs": ["interviewed_well=False"], "rhs": ["tweets=no"]}
    rule2 = {"lhs": ["phd=no", "tweets=yes"], "rhs": ["interviewed_well=True"]}
    # task: what are the confidence, support, and completeness measures
    # for these two rules? desk check then code it up!
    # Nleft, Nright, Nboth, Ntotal, confidence, support, completeness
    # rule1 desk check: 5, 7, 4, 14, 4/5, 4/14, 4/7
    # rule2 desck check: 4, 9, 4, 14, 4/4, 4/14, 4/9
    for rule in [rule1, rule2]:
        compute_rule_interestingness(rule, table)
        print(rule)

    # set theory basics
    # set: an unordered list with no duplicates
    # python has a set type
    # for example
    transaction = ["eggs", "milk", "milk", "sugar"]
    transaction_set = set(transaction)
    print(transaction_set)
    # note: order was lost, but we could just use a list
    # with no duplicates as a set
    transaction = list(transaction_set)
    print(transaction)
    # A union B: set of all items in A or B or both
    # A intersect B: set of all items in both A and B
    # can use set methods union() and intersection()
    # we need union for apriori
    # lets say we have an LHS set and and RHS set
    # LHS intersect RHS should be 0
    # LHS union RHS is sorted(LHS + RHS)
    # A is a subset of B: if all elements in A are also in B
    # check_row_match(A, B) will return 1 if A is a subset of B, 0 otherwise
    # powerset of A: the set of all possible subsets of A, including 0 and A
    import itertools
    powerset = []
    for i in range(0, len(transaction) + 1):
        # i represnets the size of our subsets
        subsets = list(itertools.combinations(transaction, i))
        powerset.extend([s for s in subsets])
    print(powerset)

    # intro to market basket analysis (MBA)
    # find associations between products customers buys
    # IF {"milk=true", "sugar=true"} THEN {"eggs=true"}
    # we are only interested in products purchased, not products not purchased
    # e.g. =true not the =false
    # for shorthand we can drop the =true
    # IF {"milk", "sugar"} THEN {"eggs"}
    # {"milk", "sugar"} -> {"eggs"}
    # terminology
    # each row in our dataset is now called a "transaction"
    # a transaction is an "itemset" 

    transactions = [
        ["b", "c", "m"],
        ["b", "c", "e", "m", "s"],
        ["b"],
        ["c", "e", "s"],
        ["c"],
        ["b", "c", "s"],
        ["c", "e", "s"],
        ["c", "e"]
    ]
    I = compute_unique_values(table)
    print(I)
    I = compute_unique_values(transactions)
    print(I)

    subsets = compute_k_1_subsets(transactions[3])
    print(subsets)

    rules = apriori(transactions, 0.25, 0.8)
    print(rules)
Example #47
0
    for line in tqdm(fin, total=get_num_lines(infilename), desc="Generate document-level cooccurrence features (pass 1)"):
      sentInfo = json.loads(line)
      articleId = sentInfo['articleId']
      eidlist = [em['entityId'] for em in sentInfo['entityMentions']]
      articeID2eidlist[articleId].extend(eidlist)
      for eid in eidlist:
        eid2freq[eid] += 1


  eidPair2count = defaultdict(int)
  eidTotalCount = 0
  eidPairTotalCount = 0
  for articleId in tqdm(articeID2eidlist, desc="Generate document-level coocurrence features (pass 2)"):
    eidlist = articeID2eidlist[articleId]
    eidTotalCount += len(eidlist)
    for pair in itertools.combinations(eidlist,2):
      eidPairTotalCount += 1
      if pair[0] == pair[1]:
        continue
      eidPair2count[frozenset(pair)] += 1


  with open(outfilename, 'w') as fout:
    for ele in eidPair2count:
      count = eidPair2count[ele]
      ele = list(ele)
      fout.write(str(ele[0]) + "\t" + str(ele[1]) + "\t" + str(count) + "\n")

  with open(outfilename2, 'w') as fout:
    for ele in eidPair2count:
      p_x_y =  eidPair2count[ele] / eidPairTotalCount
Example #48
0
def plot_results(grid,
                 film_x,
                 film_y,
                 Ep,
                 traces,
                 plot_fluence=True,
                 plot_quiver=False,
                 plot_traces=True,
                 save_images=False):
    """
    Plot proton fluence, vector field slice, and/or 3D particle traces

    Parameters
    ----------
    grid : Grid
        Grid object produced by load_grid()
    film_x : array of size NP
        'x' positions of protons on film plane (returned by push_protons)
    film_y : array of size NP
        'y' positions of protons on film plane (returned by push_protons)
    Ep : array of size NP
        Energies of protons at film plane (in MeV).
    traces : ndarray of shape (ntraces, nz_prot+2, 3)
        Record of particle traces for random subset of protons. First dimension is the proton being tracked,
        second dimension is the index of the step, third dimension is the coordinates (x,y,z) at each location
    plot_fluence : boolean, optional
        Whether to plot proton fluence as a 2D histogram
    plot_quiver : boolean, optional
        Whether to plot a 2D x-y slice of the magnetic or electric fields using a quiver plot.
        Also plots material Z using imshow. 
    plot_traces : boolean, optional
        Whether to plot particle traces in 3D using mpl_toolkits.mplot3d (if installed)
    save_images : boolean, optional
        Whether to save fluence and/or quiver plots in the 'outputs' directory. Will be saved as png images,
        and named according to the name of the input parameter file.

    """

    quiver_fig = None
    traces_fig = None
    fluence_fig = None
    cbar = None

    if plot_quiver:
        # TODO: Clean up this section

        # Plot material Z and field cross section
        quiver_fig = plt.figure(1)
        plt.axes().set_aspect('equal')

        # Material Z
        # if grid.cyl_coords is False:
        #     plt.imshow(grid.vals[:,:,grid.nz/2,7].T, origin='lower', cmap='Reds', interpolation='nearest') # plot material Z

        # x-y electric fields
        # quiverU = grid.vals[:,:,grid.nz/2,0]
        # quiverV = grid.vals[:,:,grid.nz/2,1]
        # x-y magnetic fields
        quiverU = grid.vals[:, :, grid.nz / 3, 3]
        quiverV = grid.vals[:, :, grid.nz / 3, 4]
        quiverX = range(len(quiverU[:, 0]))
        quiverY = range(len(quiverU[0, :]))
        quiverX, quiverY = np.meshgrid(quiverX, quiverY, indexing='ij')

        if grid.cyl_coords:
            # X-Y view
            quiverR = np.linspace(grid.dx / 2, grid.dx * grid.nx + grid.dx / 2,
                                  grid.nx)
            quiverTheta = np.linspace(0.0, 2 * np.pi - grid.dy, grid.ny)
            quiverR, quiverTheta = np.meshgrid(quiverR,
                                               quiverTheta,
                                               indexing='ij')
            quiverX = quiverR * np.cos(quiverTheta)
            quiverY = quiverR * np.sin(quiverTheta)

            # Z-X view (uncomment to activate)
            # quiverX = np.linspace(grid.zoffset, grid.zoffset+grid.lz, grid.nz)
            # quiverY = np.linspace(grid.dx/2,grid.dx*grid.nx+grid.dx/2,grid.nx)
            # quiverX, quiverY = np.meshgrid(quiverX,quiverY, indexing='ij')
            # quiverU = grid.vals[:,0,:,2]
            # quiverV = grid.vals[:,0,:,0]

        # Mask to get rid of the zero vectors
        quiverMask = ((quiverU != 0.0) | (quiverV != 0.0))
        plt.quiver(quiverX[quiverMask], quiverY[quiverMask],
                   quiverU[quiverMask], quiverV[quiverMask])
        plt.xlabel("x")
        plt.ylabel("y")
        plt.xlim([min(quiverX.flatten()), max(quiverX.flatten())])
        plt.ylim([min(quiverY.flatten()), max(quiverY.flatten())])

        if save_images:
            plt.savefig('outputs/' + sys.argv[1] + '_quiver.png',
                        bbox_inches='tight',
                        transparent=True,
                        dpi=200)

    if plot_traces:
        # Plot particle traces in 3D
        try:
            from mpl_toolkits.mplot3d import Axes3D
            traces_fig = plt.figure(2, figsize=(7, 7))
            ax = traces_fig.gca(projection='3d')
            for i in range(params.ntraces):
                #ax.plot(traces[i,0:params.nsteps+2,0],traces[i,0:params.nsteps+2,1],traces[i,0:params.nsteps+2,2])
                ax.plot(traces[i, :, 0], traces[i, :, 1], traces[i, :, 2])

            # Plot grid bounds.
            # NOTE: DO NOT MISTAKE GRID BOUNDS FOR WHERE YOUR HOHLRAUM IS, ESPECIALLY IN CYLINDRICAL CASE
            if grid.cyl_coords:
                # Plot 3d transparent cylinder
                import mpl_toolkits.mplot3d.art3d as art3d
                from matplotlib.patches import Circle
                radius = grid.lx

                # Cylinder top and bottom
                # cyl_bottom = Circle((0, 0), radius, color='k', alpha=0.2)
                # cyl_top = Circle((0, 0), radius, color='k', alpha=0.2)
                # ax.add_patch(cyl_bottom)
                # ax.add_patch(cyl_top)
                # art3d.pathpatch_2d_to_3d(cyl_top, z=grid.zoffset+grid.lz, zdir="z")
                # art3d.pathpatch_2d_to_3d(cyl_bottom, z=grid.zoffset, zdir="z")

                # Cylinder sides
                X, Z = np.meshgrid(
                    np.linspace(-radius, radius, 20),
                    np.linspace(grid.zoffset, grid.zoffset + grid.lz, 20))
                Y = np.sqrt(radius**2 - X**2)  # Pythagorean theorem
                ax.plot_surface(X, Y, Z, linewidth=1, color='k', alpha=0.2)
                ax.plot_surface(X, -Y, Z, linewidth=1, color='k', alpha=0.2)
            else:
                # Plot edges of 3d box as dotted black lines
                for s, e in itertools.combinations(
                        np.array(
                            list(
                                itertools.product([
                                    params.gridcorner[0],
                                    params.gridcorner[0] + grid.lx
                                ], [
                                    params.gridcorner[1],
                                    params.gridcorner[1] + grid.ly
                                ], [
                                    params.gridcorner[2],
                                    params.gridcorner[2] + grid.lz
                                ]))), 2):
                    if np.sum(np.abs(s - e)) in (grid.lx, grid.ly, grid.lz):
                        ax.plot3D(*zip(s, e), color='k', linestyle='--')

            if grid.cyl_coords:
                ax.set_xlim([grid.xoffset - grid.lx, grid.xoffset + grid.lx])
                ax.set_ylim([grid.xoffset - grid.lx, grid.xoffset + grid.lx])
                ax.set_zlim([grid.zoffset, grid.zoffset + grid.lz])
            else:
                ax.set_xlim([grid.xoffset, grid.xoffset + grid.lx])
                ax.set_ylim([grid.yoffset, grid.yoffset + grid.ly])
                ax.set_zlim([grid.zoffset, grid.zoffset + grid.lz])

            ax.set_xlabel('x')
            ax.set_ylabel('y')
            ax.set_zlabel('z')

        except ImportError:
            print(
                "Unable to plot particle traces--module mpl_toolkits.mplot3d is not installed."
            )

    if plot_fluence:
        fluence_fig = plt.figure(3)
        plt.clf()
        plt.axes().set_aspect('equal')
        xmax = linalg.norm(params.film_axis1)
        ymax = linalg.norm(params.film_axis2)
        maxfluence = 150.0
        try:
            maxfluence = params.hist_maxfluence
        except AttributeError:
            pass
        myhist = plt.hist2d(film_x * 10,
                            film_y * 10,
                            bins=300,
                            cmap='gray_r',
                            range=[[-xmax * 10, xmax * 10],
                                   [-ymax * 10, ymax * 10]],
                            vmin=0.0,
                            vmax=maxfluence)
        plt.xlabel('mm')
        plt.ylabel('mm')
        #plt.title('9.5 MeV deuteron')

        cbar = plt.colorbar(format='%05.2f')
        try:
            # Include interactive draggable colorbar, if available
            from libraries import draggable_cbar
            cbar = draggable_cbar.DraggableColorbar(cbar, myhist[3])
            cbar.connect()
        except ImportError:
            pass

        if save_images:
            plt.savefig('outputs/' + sys.argv[1] + '_fluence.png',
                        bbox_inches='tight')

    return cbar, fluence_fig, traces_fig, quiver_fig
Example #49
0
def generate_permutations(profile: Profile) -> Iterator[PermutedCharacter]:
    """Generate PermutedLoadouts for a profile."""
    parsed_gear = profile.all_gear()
    logging.debug(_("Parsed gear: {}").format(parsed_gear))
    # TODO: handle gems, handle Shards of Domination

    # Filter each slot to only have unique items, before doing any gem permutation.
    for key, value in parsed_gear.items():
        parsed_gear[key] = stable_unique(value)

    simple_permutation_gear = {
        k: v
        for k, v in parsed_gear.items()
        if k not in (GearType.FINGER, GearType.TRINKET)
    }
    simple_permutations = itertools.product(*simple_permutation_gear.values())

    # Get every ring pairing.
    if len(parsed_gear[GearType.FINGER]) > 2:
        rings = list(itertools.combinations(parsed_gear[GearType.FINGER], 2))
    else:
        rings = (parsed_gear[GearType.FINGER], )
    logging.debug(f'Rings: {rings}')

    # Get every trinket pairing.
    if len(parsed_gear[GearType.TRINKET]) > 2:
        trinkets = list(
            itertools.combinations(parsed_gear[GearType.TRINKET], 2))
    else:
        trinkets = (parsed_gear[GearType.TRINKET], )
    logging.debug(f'Trinkets: {trinkets}')

    for permutation in simple_permutations:
        # Build a base EquipmentLoadout
        base_loadout = EquipmentLoadout()
        for item in permutation:
            base_loadout.equip(item)

        # Check if the base loadout is a valid loadout
        # (this will let us skip a bunch of processing later!)
        if not base_loadout.valid_loadout() or not base_loadout.valid_weapons(
                profile.player_class, profile.spec):
            continue

        for ring_permutation in rings:
            ring_loadout = copy.copy(base_loadout)
            for ring in ring_permutation:
                ring_loadout.equip(ring)
            if not ring_loadout.valid_loadout():
                continue

            for trinket_permutation in trinkets:
                # Build a Loadout
                loadout = copy.copy(ring_loadout)
                for trinket in trinket_permutation:
                    loadout.equip(trinket)

                # Check if this is a valid loadout
                if not loadout.valid_loadout():
                    continue

                for talent in profile.talents:
                    yield PermutedCharacter(loadout, talent)
Example #50
0
    def skeleton_to_pdag(skel, separating_sets):
        """Orients the edges of a graph skeleton based on information from
        `separating_sets` to form a DAG pattern (DirectedGraph).

        Parameters
        ----------
        skel: UndirectedGraph
            An undirected graph skeleton as e.g. produced by the
            estimate_skeleton method.

        separating_sets: dict
            A dict containing for each pair of not directly connected nodes a
            separating set ("witnessing set") of variables that makes then
            conditionally independent. (needed for edge orientation)

        Returns
        -------
        pdag: DirectedGraph
            An estimate for the DAG pattern of the BN underlying the data. The
            graph might contain some nodes with both-way edges (X->Y and Y->X).
            Any completion by (removing one of the both-way edges for each such
            pair) results in a I-equivalent Bayesian network DAG.

        Reference
        ---------
        Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
        http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf


        Examples
        --------
        >>> import pandas as pd
        >>> import numpy as np
        >>> from pgm.estimators import ConstraintBasedEstimator
        >>> data = pd.DataFrame(np.random.randint(0, 4, size=(5000, 3)), columns=list('ABD'))
        >>> data['C'] = data['A'] - data['B']
        >>> data['D'] += data['A']
        >>> c = ConstraintBasedEstimator(data)
        >>> pdag = c.skeleton_to_pdag(*c.estimate_skeleton())
        >>> pdag.edges() # edges: A->C, B->C, A--D (not directed)
        [('B', 'C'), ('A', 'C'), ('A', 'D'), ('D', 'A')]
        """

        pdag = skel.to_directed()
        node_pairs = combinations(pdag.nodes(), 2)

        # 1) for each X-Z-Y, if Z not in the separating set of X,Y, then orient edges as X->Z<-Y
        # (Algorithm 3.4 in Koller & Friedman PGM, page 86)
        for X, Y in node_pairs:
            if not skel.has_edge(X, Y):
                for Z in set(skel.neighbors(X)) & set(skel.neighbors(Y)):
                    if Z not in separating_sets[frozenset((X, Y))]:
                        pdag.remove_edges_from([(Z, X), (Z, Y)])

        progress = True
        while progress:  # as long as edges can be oriented (removed)
            num_edges = pdag.number_of_edges()

            # 2) for each X->Z-Y, orient edges to Z->Y
            for X, Y in node_pairs:
                for Z in ((set(pdag.successors(X)) - set(pdag.predecessors(X))) &
                          (set(pdag.successors(Y)) & set(pdag.predecessors(Y)))):
                    pdag.remove(Y, Z)

            # 3) for each X-Y with a directed path from X to Y, orient edges to X->Y
            for X, Y in node_pairs:
                for path in nx.all_simple_paths(pdag, X, Y):
                    is_directed = True
                    for src, dst in path:
                        if pdag.has_edge(dst, src):
                            is_directed = False
                    if is_directed:
                        pdag.remove(Y, X)
                        break

            # 4) for each X-Z-Y with X->W, Y->W, and Z-W, orient edges to Z->W
            for X, Y in node_pairs:
                for Z in (set(pdag.successors(X)) & set(pdag.predecessors(X)) &
                          set(pdag.successors(Y)) & set(pdag.predecessors(Y))):
                    for W in ((set(pdag.successors(X)) - set(pdag.predecessors(X))) &
                              (set(pdag.successors(Y)) - set(pdag.predecessors(Y))) &
                              (set(pdag.successors(Z)) & set(pdag.predecessors(Z)))):
                        pdag.remove(W, Z)

            progress = num_edges > pdag.number_of_edges()

        return pdag
Example #51
0
lines = cv2.HoughLines(thresh3, 1, np.pi / 180, 400)
line_equation_params = []
shape = img.shape
img_rows = shape[0]
img_cols = shape[1]

for line in lines:
    for rho, theta in line:
        a = np.cos(theta)
        b = np.sin(theta)
        line_equation_params.append([[a, b], rho])

# calculate intersection for all lines
# se puede hacer tambiƩn con dos bucles for.
combinations = itertools.combinations(line_equation_params, 2)
intersections = []
for combination in combinations:

    a = np.array([combination[0][0], combination[1][0]])
    b = np.array([combination[0][1], combination[1][1]])
    x = None
    try:
        x = np.linalg.solve(a, b)
        x_coord = x[0]
        y_coord = x[1]

        if x_coord >= 0 and y_coord >= 0 and x_coord <= img_cols and y_coord <= img_rows:
            intersections.append((int(x_coord), int(y_coord)))
    except:
        # parallel lines
Example #52
0
	def coinc_params(events, offsetvector, triangulators):
		#
		# check for coincs that have been vetoed entirely
		#

		if len(events) < 2:
			return None

		#
		# Initialize the parameter dictionary, sort the events by
		# instrument name (the multi-instrument parameters are defined for
		# the instruments in this order and the triangulators are
		# constructed this way too), and retrieve the sorted instrument
		# names
		#

		params = {}
		events = tuple(sorted(events, key = lambda event: event.ifo))
		instruments = tuple(event.ifo for event in events)

		#
		# zero-instrument parameters
		#

		ignored, ignored, ignored, rss_timing_residual = triangulators[instruments](tuple(event.peak + offsetvector[event.ifo] for event in events))
		# FIXME:  rss_timing_residual is forced to 0 to disable this
		# feature.  all the code to compute it properly is still here and
		# given suitable initializations, the distribution data is still
		# two-dimensional and has a suitable filter applied to it, but all
		# events are forced into the RSS_{\Delta t} = 0 bin, in effect
		# removing that dimension from the data.  We can look at this again
		# sometime in the future if we're curious why it didn't help.  Just
		# delete the next line and you're back in business.
		rss_timing_residual = 0.0
		params["instrumentgroup,rss_timing_residual"] = (frozenset(instruments), rss_timing_residual)

		#
		# one-instrument parameters
		#

		for event in events:
			params["%s_snr2_chi2" % evemt.ifo] = (event.snr**2.0, event.chisq / event.chisq_dof)

		#
		# two-instrument parameters.  note that events are sorted by
		# instrument
		#

		for event1, event2 in itertools.combinations(events, 2):
			assert event1.ifo != event2.ifo

			prefix = "%s_%s_" % (event1.ifo, event2.ifo)

			dt = float((event1.peak + offsetvector[event1.ifo]) - (event2.peak + offsetvector[event2.ifo]))
			params["%sdt" % prefix] = (dt,)

			dA = math.log10(abs(event1.amplitude / event2.amplitude))
			params["%sdA" % prefix] = (dA,)

			# f_cut = central_freq + bandwidth/2
			f_cut1 = event1.central_freq + event1.bandwidth / 2
			f_cut2 = event2.central_freq + event2.bandwidth / 2
			df = float((math.log10(f_cut1) - math.log10(f_cut2)) / (math.log10(f_cut1) + math.log10(f_cut2)))
			params["%sdf" % prefix] = (df,)

		#
		# done
		#

		return params
Example #53
0
    def __init__(self, range_=''):
        self._hands = set()
        self._combos = set()

        #print(type(range_), range_)
        for name, value in _RegexRangeLexer(range_):
            #print(name, value)
            if name == 'ALL':
                for card in itertools.combinations('AKQJT98765432', 2):
                    self._add_offsuit(card)
                    self._add_suited(card)
                for rank in 'AKQJT98765432':
                    self._add_pair(rank)

                # full range, no need to parse any more name
                break

            elif name == 'PAIR':
                self._add_pair(value)

            elif name == 'PAIR_PLUS':
                smallest = Rank(value)
                for rank in (rank.val for rank in Rank if rank >= smallest):
                    self._add_pair(rank)

            elif name == 'PAIR_MINUS':
                biggest = Rank(value)
                for rank in (rank.val for rank in Rank if rank <= biggest):
                    self._add_pair(rank)

            elif name == 'PAIR_DASH':
                first, second = Rank(value[0]), Rank(value[1])
                ranks = (rank.val for rank in Rank if first <= rank <= second)
                for rank in ranks:
                    self._add_pair(rank)

            elif name == 'BOTH':
                self._add_offsuit(value[0] + value[1])
                self._add_suited(value[0] + value[1])

            elif name == 'X_BOTH':
                for rank in (r.val for r in Rank if r < Rank(value)):
                    self._add_suited(value + rank)
                    self._add_offsuit(value + rank)

            elif name == 'OFFSUIT':
                self._add_offsuit(value[0] + value[1])

            elif name == 'SUITED':
                self._add_suited(value[0] + value[1])

            elif name == 'X_OFFSUIT':
                biggest = Rank(value)
                for rank in (rank.val for rank in Rank if rank < biggest):
                    self._add_offsuit(value + rank)

            elif name == 'X_SUITED':
                biggest = Rank(value)
                for rank in (rank.val for rank in Rank if rank < biggest):
                    self._add_suited(value + rank)

            elif name == 'BOTH_PLUS':
                smaller, bigger = Rank(value[0]), Rank(value[1])
                for rank in (rank.val for rank in Rank if smaller <= rank < bigger):
                    self._add_suited(value[1] + rank)
                    self._add_offsuit(value[1] + rank)

            elif name == 'BOTH_MINUS':
                smaller, bigger = Rank(value[0]), Rank(value[1])
                for rank in (rank.val for rank in Rank if rank <= smaller):
                    self._add_suited(value[1] + rank)
                    self._add_offsuit(value[1] + rank)

            elif name in ('X_PLUS', 'X_SUITED_PLUS', 'X_OFFSUIT_PLUS'):
                smallest = Rank(value)
                first_ranks = (rank for rank in Rank if rank >= smallest)

                for rank1 in first_ranks:
                    second_ranks = (rank for rank in Rank if rank < rank1)
                    for rank2 in second_ranks:
                        if name != 'X_OFFSUIT_PLUS':
                            self._add_suited(rank1.val + rank2.val)
                        if name != 'X_SUITED_PLUS':
                            self._add_offsuit(rank1.val + rank2.val)

            elif name in ('X_MINUS', 'X_SUITED_MINUS', 'X_OFFSUIT_MINUS'):
                biggest = Rank(value)
                first_ranks = (rank for rank in Rank if rank <= biggest)

                for rank1 in first_ranks:
                    second_ranks = (rank for rank in Rank if rank < rank1)
                    for rank2 in second_ranks:
                        if name != 'X_OFFSUIT_MINUS':
                            self._add_suited(rank1.val + rank2.val)
                        if name != 'X_SUITED_MINUS':
                            self._add_offsuit(rank1.val + rank2.val)

            elif name == 'COMBO':
                self._combos.add(Combo(value))

            elif name == 'OFFSUIT_PLUS':
                smaller, bigger = Rank(value[0]), Rank(value[1])
                for rank in (rank.val for rank in Rank if smaller <= rank < bigger):
                    self._add_offsuit(value[1] + rank)

            elif name == 'OFFSUIT_MINUS':
                smaller, bigger = Rank(value[0]), Rank(value[1])
                for rank in (rank.val for rank in Rank if rank <= smaller):
                    self._add_offsuit(value[1] + rank)

            elif name == 'SUITED_PLUS':
                smaller, bigger = Rank(value[0]), Rank(value[1])
                for rank in (rank.val for rank in Rank if smaller <= rank < bigger):
                    self._add_suited(value[1] + rank)

            elif name == 'SUITED_MINUS':
                smaller, bigger = Rank(value[0]), Rank(value[1])
                for rank in (rank.val for rank in Rank if rank <= smaller):
                    self._add_suited(value[1] + rank)

            elif name == 'BOTH_DASH':
                smaller, bigger = Rank(value[1]), Rank(value[2])
                for rank in (rank.val for rank in Rank if smaller <= rank <= bigger):
                    self._add_offsuit(value[0] + rank)
                    self._add_suited(value[0] + rank)

            elif name == 'OFFSUIT_DASH':
                smaller, bigger = Rank(value[1]), Rank(value[2])
                for rank in (rank.val for rank in Rank if smaller <= rank <= bigger):
                    self._add_offsuit(value[0] + rank)

            elif name == 'SUITED_DASH':
                smaller, bigger = Rank(value[1]), Rank(value[2])
                for rank in (rank.val for rank in Rank if smaller <= rank <= bigger):
                    self._add_suited(value[0] + rank)
Example #54
0
def all_intervals(notes):
    intervals_list = [intervals(n) for n in combinations(sorted(notes), 2)]
    return sorted(chain.from_iterable(intervals_list))
Example #55
0
def nearest_R_pair(vectors):
    l = []
    for i ,k  in combinations(range(len(vectors)),2):
        l.append(([i,k], vectors[i].DeltaR(vectors[k]) ))  
    l = sorted(l, key=itemgetter(1), reverse=True)
    return l[0][0]
def generate_rules_apriori(
    itemsets: typing.Dict[int, typing.Dict[tuple, int]],
    min_confidence: float,
    num_transactions: int,
    verbosity: int = 0,
):
    """
    Bottom up algorithm for generating association rules from itemsets, very
    similar to the fast algorithm proposed in the original 1994 paper by 
    Agrawal et al.
    
    The algorithm is based on the observation that for {a, b} -> {c, d} to
    hold, both {a, b, c} -> {d} and {a, b, d} -> {c} must hold, since in
    general conf( {a, b, c} -> {d} ) >= conf( {a, b} -> {c, d} ).
    In other words, if either of the two one-consequent rules do not hold, then
    there is no need to ever consider the two-consequent rule.
    
    Parameters
    ----------
    itemsets : dict of dicts
        The first level of the dictionary is of the form (length, dict of item
        sets). The second level is of the form (itemset, count_in_dataset)).
    min_confidence :  float
        The minimum confidence required for the rule to be yielded.
    num_transactions : int
        The number of transactions in the data set.
    verbosity : int
        The level of detail printing when the algorithm runs. Either 0, 1 or 2.
        
    Examples
    --------
    >>> itemsets = {1: {('a',): 3, ('b',): 2, ('c',): 1}, 
    ...             2: {('a', 'b'): 2, ('a', 'c'): 1}}
    >>> list(generate_rules_apriori(itemsets, 1.0, 3))
    [{b} -> {a}, {c} -> {a}]
    """
    # Validate user inputs
    if not ((0 <= min_confidence <= 1)
            and isinstance(min_confidence, numbers.Number)):
        raise ValueError("`min_confidence` must be a number between 0 and 1.")

    if not ((num_transactions >= 0)
            and isinstance(num_transactions, numbers.Number)):
        raise ValueError("`num_transactions` must be a number greater than 0.")

    def count(itemset):
        """
        Helper function to retrieve the count of the itemset in the dataset.
        """
        return itemsets[len(itemset)][itemset]

    if verbosity > 0:
        print("Generating rules from itemsets.")

    # For every itemset of a perscribed size
    for size in itemsets.keys():

        # Do not consider itemsets of size 1
        if size < 2:
            continue

        if verbosity > 0:
            print(" Generating rules of size {}.".format(size))

        # For every itemset of this size
        for itemset in itemsets[size].keys():

            # Special case to capture rules such as {others} -> {1 item}
            for removed in itertools.combinations(itemset, 1):

                # Compute the left hand side
                lhs = set(itemset).difference(set(removed))
                lhs = tuple(sorted(list(lhs)))

                # If the confidence is high enough, yield the rule
                conf = count(itemset) / count(lhs)
                if conf >= min_confidence:
                    yield Rule(
                        lhs,
                        removed,
                        count(itemset),
                        count(lhs),
                        count(removed),
                        num_transactions,
                    )

            # Generate combinations to start off of. These 1-combinations will
            # be merged to 2-combinations in the function `_ap_genrules`
            H_1 = list(itertools.combinations(itemset, 1))
            yield from _ap_genrules(itemset, H_1, itemsets, min_confidence,
                                    num_transactions)

    if verbosity > 0:
        print("Rule generation terminated.\n")
Example #57
0
def max_deltaeta_pair(vectors):
    l = []
    for i ,k  in combinations(range(len(vectors)),2):
        l.append( ([i,k], abs(vectors[i].Eta() - vectors[k].Eta())))
    l = sorted(l, key=itemgetter(1), reverse=True)
    return l[0][0]
Example #58
0
#!/usr/bin/env python
import argparse
psr = argparse.ArgumentParser("generate id pairs")
psr.add_argument("-o", dest='opt', help="output")
psr.add_argument('ipt', help="input")
psr.add_argument('--field',
                 default='org',
                 help="the field to count common entries in")

args = psr.parse_args()

import pandas as pd, itertools as it, h5py, numpy as np
au = pd.read_csv(args.ipt)

# the central fucntion is sum((Counter(al[1]) & Counter(bl[1])).values())

# it counts the common org of a and b including duplications.  For
# example, if a has 3 "Tsinghua" and b has 2, the common org is
# counted as 2.

# this is expanded to be used with keywords as well

dl = ((al[0], bl[0])
      for (al, bl) in it.combinations(au.groupby('id')[args.field], 2))
x = np.array(list(dl), dtype=[('id1', 'S24'), ('id2', 'S24')])

with h5py.File(args.opt, 'w') as opt:
    opt.create_dataset('id_pairs', data=x, compression="gzip", shuffle=True)
Example #59
0
import itertools
a = [1,2,3,4,5]
b = 'manan'
iter_a = []
iter_b = []
iter_a2 = []
iter_b2 = []
for i in itertools.combinations(a,2):
    iter_a.append(i)
print(iter_a)
for i in itertools.combinations(b,2):
    iter_b.append(''.join(i))
print(iter_b)
for i in itertools.permutations(a):
    iter_a2.append(i)
print(iter_a2)
for j in range(len(b)+1):    
    for i in itertools.combinations(b,j):
        if ''.join(i)!='':
            iter_b2.append(''.join(i))
print(iter_b2)
Example #60
0
def max_mjj_pair(vectors):
    l = []
    for i ,k  in combinations(range(len(vectors)),2):
        l.append( ([i,k], (vectors[i]+ vectors[k]).M() ))
    l = sorted(l, key=itemgetter(1), reverse=True)
    return l[0][0]