Esempio n. 1
0
def get_composed_feat_1h():
	Q=getfeat()
	X=Q[0][:,:-1].T
	names=Q[2][:-1]
	couples=[list(y) for y in combo(list(range(len(X))), 2)]
	triples=[list(y) for y in combo(list(range(len(X))), 3)]
	Xn=normalize(X)
	for c in couples:
		X=np.vstack((X,np.sum(Xn[c],0),np.diff(Xn[c].T)[:,0]))
		names.append(str(c)+'+')
		names.append(str(c)+'-')
	for t in triples:
		X=np.vstack((X,np.sum(Xn[t],0)))
		names.append(str(t))
	n=len(X)
	for i in range(n):
	    f_p=percentilize(X[i])
	    f_p_100=np.round(f_p*100)
	    f_p_20=np.round(f_p*20)
	    f_p_10=np.round(f_p*10)
	    f_p_5=np.round(f_p*5)
	    X=np.vstack([X,f_p,f_p_100,f_p_20,f_p_10,f_p_5])
	    names+=[names[i]+'_q',names[i]+'_q100',names[i]+'_q20',names[i]+'_q10',names[i]+'_q5']
	X=np.vstack((X,Q[0][:,-1]))
	names.append('weather')
	start=datetime(2015,3,1,0)
	end=datetime(2015,5,1,0)
	timestep=3600
	n_timeslots=int((end-start).total_seconds()/timestep+1)
	H=np.array([(start+i*timedelta(0,timestep)).hour for i in range(n_timeslots)])
	names.append('hour')
	X=np.vstack((X,H))
	return [X,Q[1],names]
def featuresExtractor(h):
	start=datetime(2015,3,1,0)
	Q=getfeat()
	X=Q[0][:,:-1].T
	X=hourly_aggregation(X,h)
	names=Q[1][:-1]
	couples=[list(y) for y in combo(list(range(len(X))), 2)]
	triples=[list(y) for y in combo(list(range(len(X))), 3)]
	Xn=normalize(X)
	for c in couples:
		X=np.vstack((X,np.sum(Xn[c],0),np.diff(Xn[c].T)[:,0]))
		names.append(str(c)+'+')
		names.append(str(c)+'-')
	for t in triples:
		X=np.vstack((X,np.sum(Xn[t],0)))
		names.append(str(t))
	n=len(X)
	for i in range(n):
	    f=X[i]
	    f_p=percentilize(f)
	    f_p_100=np.round(f_p*100)
	    f_p_20=np.round(f_p*20)
	    f_p_10=np.round(f_p*10)
	    f_p_5=np.round(f_p*5)
	    X=np.vstack([X,f_p,f_p_100,f_p_20,f_p_10,f_p_5])
	    names+=[names[i]+'_q',names[i]+'_q100',names[i]+'_q20',names[i]+'_q10',names[i]+'_q5']

	weather=Q[0][:,-1]
	weather_h=np.array([weather[i] for i in range(len(weather)) if i%h==0])
	names.append('weather')
	X=np.vstack((X,weather_h[:-1]))
	
	date=datetime(2015,3,1,0)
	timestep=3600
	H=np.array([(start+i*timedelta(0,h*timestep)).hour for i in range(len(X[0]))])
	names.append('hour')
	X=np.vstack((X,H))
	
	outfile=open('../features/features_matrix','w')
	outfile.write('datetime,')
	for n in names:
		outfile.write(str(n)+',')
	outfile.write('\n')
	X=X.T
	for x in X:
		outfile.write(str(date)+',')
		for f in x:
			outfile.write(str(f)+',')
		outfile.write('\n')
		date+=timedelta(0,h*timestep)
	return
	#return [X,names]
Esempio n. 3
0
def rss(simdf):
    """
    Produces an RSS matrix from a similarity matrix.
    :param simdf (pandas dataframe): A symmetric similarity matrix.
    :return (pandas dataframe): A symmetric RSS matrix.
    """
    # Fill diagonal with Na so that self similarity is not included in
    # distribution.
    np.fill_diagonal(simdf.values, np.NAN)

    # Gather mean and standard deviations of each objects similarities.
    mus = simdf.mean()
    stds = simdf.std()

    # Initialize matrix and fill diagonals with NAN.
    rssmat = np.zeros((simdf.shape[0], simdf.shape[1]), np.float)
    np.fill_diagonal(rssmat, np.NAN)

    # Loop over combos of object i, j in similarity matrix
    # and calculate individual RSS values. Assume symmetry.
    for i, j in combo(range(len(simdf.columns)), 2):
        sim_value = simdf.iloc[i, j]
        # Averages the zscore from perspective of i and j.
        rss_value = .5*(( (sim_value - mus.iloc[i]) / stds.iloc[i])\
                    + ( (sim_value - mus.iloc[j]) / stds.iloc[j]))
        # Enforces symmetry.
        rssmat[i, j] = rss_value
        rssmat[j, i] = rss_value

    rssdf = pd.DataFrame(rssmat, columns=simdf.columns, index=simdf.index)
    return rssdf
Esempio n. 4
0
def get_composed_feat_h(h):
	Q=getfeat()
	X=Q[0][:,:-1].T
	X=aggrega_su_ore(X,h)
	names=Q[2][:-1]
	couples=[list(y) for y in combo(list(range(len(X))), 2)]
	triples=[list(y) for y in combo(list(range(len(X))), 3)]
	Xn=normalize(X)
	for c in couples:
		X=np.vstack((X,np.sum(Xn[c],0),np.diff(Xn[c].T)[:,0]))
		names.append(str(c)+'+')
		names.append(str(c)+'-')
	for t in triples:
		X=np.vstack((X,np.sum(Xn[t],0)))
		names.append(str(t))
	n=len(X)
	for i in range(n):
	    f=X[i]
	    f_p=percentilize(f)
	    f_p_100=np.round(f_p*100)
	    f_p_20=np.round(f_p*20)
	    f_p_10=np.round(f_p*10)
	    f_p_5=np.round(f_p*5)
	    X=np.vstack([X,f_p,f_p_100,f_p_20,f_p_10,f_p_5])
	    names+=[names[i]+'_q',names[i]+'_q100',names[i]+'_q20',names[i]+'_q10',names[i]+'_q5']

	weather=Q[0][:,-1]
	weather_h=np.array([weather[i] for i in range(len(weather)) if i%h==0])
	print len(weather_h[:-1])
	print len(X[-1])
	X=np.vstack((X,weather_h[:-1]))
	names.append('weather')
	
	start=datetime(2015,3,1,0)
	timestep=3600
	H=np.array([(start+i*timedelta(0,h*timestep)).hour for i in range(len(X[0]))])
	names.append('hour')
	X=np.vstack((X,H))
	
	Y=np.zeros(len(Q[1])/h)
	k=0
	j=0
	while j<len(Q[1])-h:
			Y[k]=sum(Q[1][j:j+h])
			k+=1
			j+=h
	return [X,Y,names]
Esempio n. 5
0
def all_corr(
    results, variables
):  #R. Henkin, “VA_brexit_practical_w7,” INM433 Visual Analytics (PRD1 A 2019/20), 2019.
    """
    Computes  local correlation coefficients (n, (((p+1)**2) + (p+1) / 2) within a geographically
    weighted design matrix
    Returns one array with the order and dimensions listed above where n
    is the number of locations used as calibrations points and p is the
    number of explanatory variables; +1 accounts for the dependent variable.
    Local correlation coefficient is not calculated for constant term.
    """
    #print(self.model)
    x = results.X
    y = results.y
    x = np.column_stack((x, y))
    w = results.W
    nvar = x.shape[1]
    nrow = len(w)
    if results.model.constant:
        ncor = (((nvar - 1)**2 + (nvar - 1)) / 2) - (nvar - 1)
        jk = list(combo(range(1, nvar), 2))
    else:
        ncor = (((nvar)**2 + (nvar)) / 2) - nvar
        jk = list(combo(range(nvar), 2))
    corr_mat = np.ndarray((nrow, int(ncor)), dtype=dict)

    for i in range(nrow):
        wi = w[i]
        sw = np.sum(wi)
        wi = wi / sw
        tag = 0

        for j, k in jk:
            val = corr(np.cov(x[:, j], x[:, k], aweights=wi))[0][1]
            corr_mat[i, tag] = {
                "var": variables[j - 1] + "_" + variables[k - 1],
                "var_1": variables[j - 1],
                "var_2": variables[k - 1],
                "value": val
            }
            tag = tag + 1

    return corr_mat
Esempio n. 6
0
def intsect(breps, thickness):
    combs = combo(breps, 2)
    for pair in combs:
        b1, b2 = pair
        res, curves, _ = Intersect.Intersection.BrepBrep(b1, b2, tol)
        if res:
            dir1 = b1.Surfaces[0].TryGetPlane()[1].Normal
            dir2 = b2.Surfaces[0].TryGetPlane()[1].Normal
            if dir1.IsParallelTo(dir2, 0.001) : continue
            width = _caculate_thick(dir1, dir2, thickness)
            cutlines[str(b1)].extend( make_notch(curves, width, b1, 1) )
            cutlines[str(b2)].extend( make_notch(curves, width, b2, -1) )
Esempio n. 7
0
    def _simulate(self):
        size = len(self._raw)

        def shorten(value):
            """
            Converts a boolean into either "T" or "F"
            """
            if value:
                return "T"
            return "F"

        self.results = []
        nodes = [
            value for value in self.items.keys()
            if self.items[value] == self.VAR
        ]

        vals = {}

        header = ""
        nodes.sort()
        for node in nodes:
            vals[node] = False
            header += "{} | ".format(node)

        header += self._raw

        for i in xrange(len(nodes) + 1):
            for comb in combo(nodes, i):
                for node in vals.keys():
                    if node in comb:
                        vals[node] = True
                    else:
                        vals[node] = False

                val = self._eval(vals)
                line = ""
                for node in nodes:
                    line += "{} | ".format(shorten(vals[node]))
                line += " " * (size / 2)
                line += shorten(val)
                line += " " * (size / 2)
                self.results.append(line)

        self.results.sort()
        self.results.reverse()

        self.results.insert(0, "-" * len(header))
        self.results.insert(0, header)

        #print nodes
        pass
Esempio n. 8
0
def e127(top):
    ret = 0
    l = 0
    for a,b in combo(range(1,top),2):
        if not a&1 and not b&1:continue
        c = a+b
        if not a&1 and not c&1:continue
        if not c&1 and not b&1:continue
        if a!=l:
            print(a,b,end='\r')
            l=a
        if a>b:continue
        if gcd(a,b)!=1 or gcd(b,c)!=1 or gcd(c,a)!=1:
            continue
        if rad(a,b,c)<c:
            ret+=c
    print()
    return ret
Esempio n. 9
0
def countGoodTriplets(arr, a, b, c):
    """
    Given an array of integers arr, and three integers a, b and c. 
    You need to find the number of good triplets.
    A triplet (arr[i], arr[j], arr[k]) is good if the following conditions are true:
        0 <= i < j < k < arr.length
        |arr[i] - arr[j]| <= a
        |arr[j] - arr[k]| <= b
        |arr[i] - arr[k]| <= c
    Where |x| denotes the absolute value of x.
    Return the number of good triplets.i
    """
    counter = 0
    for triplet in combo(arr, 3):
        if abs(triplet[0] - triplet[1]) <= a and abs(triplet[1] - triplet[2]) \
            <= b and abs(triplet[0] - triplet[2]) <= c:
            counter += 1
    return counter
Esempio n. 10
0
def e127(top):
    ret = 0
    l = 0
    for a, b in combo(range(1, top), 2):
        if not a & 1 and not b & 1: continue
        c = a + b
        if not a & 1 and not c & 1: continue
        if not c & 1 and not b & 1: continue
        if a != l:
            print(a, b, end='\r')
            l = a
        if a > b: continue
        if gcd(a, b) != 1 or gcd(b, c) != 1 or gcd(c, a) != 1:
            continue
        if rad(a, b, c) < c:
            ret += c
    print()
    return ret
Esempio n. 11
0
def journeyToMoon(n, astronaut):                   # Uses set-arithmetic & counting

    C = []                                          # partition (as array) of sets of astronauts by country

    for a,b in astronaut:
        p, m = {a,b}, len(C)                        # p is doubleton set, m is #countries

        i = next( (k for k in range(m) if p & C[k]), -1 )
        if i == -1:
            C.append( p )
            continue                                # form a new country

        j = next( (k for k in range(i+1,m) if p & C[k]), -1 )
        if j == -1:
            C[i] |= p                               # chain annexation of pair p
        else:
            C[i] |= C[j]
            del C[j]                                # merge countries along p

    nC = list(map( len, C ))
    s = n - sum( nC )                               # find subtotals

    return s*(s-1)//2 + s*(n-s) + sum( x*y for x,y in combo(nC,2))
Esempio n. 12
0
from itertools import combinations as combo
from itertools import product as prod

weapons = [(8, 4, 0), (10, 5, 0), (25, 6, 0), (40, 7, 0), (74, 8, 0)]

armor = [(13, 0, 1), (31, 0, 2), (53, 0, 3), (75, 0, 4), (102, 0, 5)]

rings = [(25, 1, 0), (50, 2, 0), (100, 3, 0), (20, 0, 1), (40, 0, 2),
         (80, 0, 3)]

lowest_gold = 9999999999999999999999
max_gold = 0
part_1 = False

weapons_perms = list(combo(weapons, 1))
armor_perms = list(combo(armor, 1)) + list(combo(armor, 0))
rings_perms = list(combo(rings, 2)) + list(combo(rings, 1)) + list(
    combo(rings, 0))
all_perms = [weapons_perms, armor_perms, rings_perms]
tool_perms = list(prod(*all_perms))

for tool_groups in tool_perms:
    cost, play_dmg, play_armor = [
        sum(zipped)
        for zipped in zip(*[tool for group in tool_groups for tool in group])
    ]
    play_health = 100

    boss_health = 100
    boss_dmg = 8
    boss_armor = 2
Esempio n. 13
0
    import sys
    import pdb
    import itertools
    from itertools import combinations as combo

    f = open(sys.argv[1], 'r')

    test_cases = int(f.readline())
    for i in range(test_cases):

        N = f.readline()
        candies = map(int, f.readline().split())

        winner = [-1]
        candy_tries = (set(y) for x in range(1, len(candies))
                       for y in combo(range(len(candies)), x))
        for cset1 in candy_tries:
            cset2 = set(range(len(candies))) - cset1  # set difference

            real1 = trans(candies, cset1)  # go from index to the candy value
            real2 = trans(candies, cset2)

            big1, big2 = bigbro(real1), bigbro(real2)
            lil1, lil2 = lilbro(real2), lilbro(real1)

            if lil1 == lil2:
                winner[0] = max(big1, big2, winner[0])

        if winner[0] == -1:
            print 'Case #%d: NO' % ((i + 1))
        else:
toup1 = ('a', 'b', 'c')
toup2 = ('1', '2', '3')
comb_list = pro(toup1, toup2)
amount = 0
comb_string = ''
for n in comb_list:
    comb_string += str(n)
    amount += 1
print('If we combine {} and {}, we get {} combinations'.format(
    toup1, toup2, amount))
print('It looks like this: ')
print('{}'.format(comb_string))

# combine abcd in pairs

comb_list2 = list(combo('abcd', 2))
comb_string2 = ''
amount2 = 0
for n in comb_list2:
    comb_string2 += str(n)
    amount2 += 1

print('If we combine a,b,c,d in pairs, we get {} combinations'.format(amount2))
print('It looks like this: ')
print('{}'.format(comb_string2))

# heres a whole bunch of initializations

full_load = json.load(open('profiles.json'))
count_total = 0
count_active = 0
Esempio n. 15
0
#!/bin/python3
from itertools import combinations as combo
from math import factorial as fac
import sys

n = int(input().strip())
number = input().strip()
number = list(str(number))
count = number.count('8')

try:
    for i in combo(number, 2):
        if int("".join(i)) % 8 == 0:
            count += 1

    for i in combo(number, 3):
        if int("".join(i)) % 8 == 0:
            x = number.index(i[0])
            if x:
                count += fact(x) + 1
            else:
                count += 1
except:
    pass
m = 10**9 + 7
print(count % m)
Esempio n. 16
0
 def multiplier(adpts, low, high):
     lst = []
     for i in range(1, 4):
         lst += [ele for ele in combo(adpts, i) if high - max(ele) <= 3]
     return len(lst) + sum((1 for ele in lst if min(ele) - low <= 3))
Esempio n. 17
0
def score(dice, section, player):
    if section == upper_rows:
        score_choice = {}
        for i in range(len(upper_rows)):
            score_choice[section[i]] = sum([x for x in dice if x == (i + 1)])
        print()
        print("Possible scores:-", "\n")
        print(score_choice, "\n")
        time.sleep(t)
        print("Make a selection, type 1-6. E.g. for 'Four's', enter: 4")
        print("(Note you can only choose a free option)")
        print()
        while True:
            try:
                choice = int(input("Choose: ")) - 1  # -1 to match PC counting
                if choice not in range(6):
                    print("Try again, type between 1-6: ")
                elif pd.notna(scoresheet.loc[section[choice], player.n]):
                    print("Score already filled-in, please choose again.\n")
                else:
                    return section[choice], score_choice[section[choice]]
            except (ValueError, IndexError):
                print("Try again, type between 1-6: ")
                continue
    elif section == lower_rows:
        score_choice = {}

        # we create a dictionary with the dice and their roll counts
        dice_count = {x: dice.count(x) for x in set(dice)}

        # a list of dice appearing frequency MOD 2 times
        # using tuples in order to do the calc, so must convert to str
        # will convert back afterwards
        all_pairs = [tuple(str(x)) * (dice_count[x] // 2) for x in dice_count]

        # eg. [1,1,1,1,2,3] -> [('1','1'), ('1','1'), ('1','1')]
        # so we use list(set()) for a unique list of tuples
        # sorting  saves codes in the later cases
        all_pairs = sorted(list(set(all_pairs)))

        # then loop through the tuples and concatenate the values as int
        all_pairs = [int(x) for tup in all_pairs for x in tup]

        score_choice["1 pair"] = {
            # sorted added for easier user readability
            "Dice": [x for x in set(all_pairs)],
            "Score":
            [0] if all_pairs == [] else [2 * x for x in set(all_pairs)]
        }

        score_choice["2 pairs"] = {
            "Dice":
            list(set(combo(all_pairs, 2))),
            "Score": [0] if len(all_pairs) < 2 else
            [2 * sum(x) for x in set(combo(all_pairs, 2))]
        }

        score_choice["3 pairs"] = {
            "Dice": list(combo(all_pairs, 3)),
            # 0/1 item so no set required, output is either [] or [x]
            # therefore below we can use double sum to give [0] or [sum()]
            "Score": [2 * sum([sum(x) for x in combo(all_pairs, 3)])]
        }

        # same process as above for 'all_pairs'
        all_triples = [tuple(str(x)) * (dice_count[x] // 3) for x in dice]
        all_triples = sorted(list(set(all_triples)))
        all_triples = [int(x) for tup in all_triples for x in tup]

        score_choice["3 of a kind"] = {
            "Dice":
            list(set(all_triples)),
            "Score":
            [0] if all_triples == [] else [3 * x for x in set(all_triples)]
        }

        all_quads = [x for x in dice_count if dice_count[x] > 3]

        score_choice["4 of a kind"] = {
            "Dice": all_quads,
            "Score": [4 * sum(all_quads)]
        }

        five_kind = [x for x in dice_count if dice_count[x] >= 5]

        score_choice["5 of a kind"] = {
            "Dice": five_kind,
            "Score": [5 * sum(five_kind)]
        }

        score_choice["Small straight"] = {
            "Dice": ([1, 2, 3, 4, 5] if all(x in dice
                                            for x in [1, 2, 3, 4, 5]) else []),
            "Score": ([15] if all(x in dice for x in [1, 2, 3, 4, 5]) else [0])
        }

        score_choice["Big straight"] = {
            "Dice": ([2, 3, 4, 5, 6] if all(x in dice
                                            for x in [2, 3, 4, 5, 6]) else []),
            "Score": ([20] if all(x in dice for x in [2, 3, 4, 5, 6]) else [0])
        }

        score_choice["Full straight"] = {
            "Dice": ([1, 2, 3, 4, 5, 6] if len(dice_count) == 6 else []),
            "Score": ([25] if len(dice_count) == 6 else [0])
        }

        if all_triples == [] or len(all_pairs) < 2:
            full_house = []
        else:
            full_house = list(
                combo(([x for x in all_triples] +
                       [y for y in all_pairs if y not in all_triples]), 2))

        score_choice["Full House (3+2)"] = {
            "Dice": full_house,
            "Score": [sum([3 * x[0] + 2 * x[1] for x in full_house])]
        }

        score_choice["Villa (3+3)"] = {
            "Dice": list(combo(all_triples, 2)),
            # list is [] or 1 item only
            # so below we can use double sum to give [0] or [sum()]
            "Score": [3 * sum([sum(x) for x in combo(all_triples, 2)])]
        }

        tower = list(
            combo(
                all_quads + len(all_quads) * [
                    x for x in set(all_pairs) if (x not in all_quads) or
                    (dice_count[x] == 6)
                ], 2))
        # latter case in 'or' allows case of Yatzy
        # len(all_quads) = 0 if no quads so ensure tower = []

        score_choice["Tower (4+2)"] = {
            "Dice": tower,
            "Score": [sum([4 * x[0] + 2 * x[1] for x in tower])]
        }

        score_choice["Chance"] = {"Dice": sorted(dice), "Score": [sum(dice)]}

        score_choice["MAXI YATZY"] = {
            "Dice": dice if len(dice_count) == 1 else [],
            "Score": [100] if len(dice_count) == 1 else [0]
        }

        print()
        print()
        print("Possible scores   = [Dice used]                    [Score]:-",
              "\n")

        for key in score_choice.keys():
            # adjustments ensure table is aligned visually for the user
            print(key.ljust(18), end="= ")
            print(str(score_choice[key]["Dice"]).ljust(30), end=" ")
            print(score_choice[key]["Score"])
        time.sleep(t)

        print()
        print()
        print("Make a selection, use this code:")
        print(
            "(...only the highest possible score per chosen row will be taken, of course!)"
        )
        print()
        for i in range(len(lower_codes)):
            print((lower_codes[i] + ": " + lower_rows[i]).ljust(20), end="\t")
            if ((i + 1) % 3 == 0) and (i != 0):
                print()
        print()
        print()
        print("(Note you can only choose a free option)")
        print()

        while True:
            code = input("Choose from code list (e.g. '4k'): ").lower()
            try:
                choice = lower_rows[lower_codes.index(code)]
                if pd.notna(scoresheet.loc[choice, player.n]):
                    print("Score already fixed, please choose again.")
                    print()
                else:
                    return choice, score_choice[choice]["Score"]
            except (ValueError, KeyError):
                print("Try again, type a code from the list above.")
                print()
    else:
        print("Unexpected 'section' error...")
Esempio n. 18
0
    def probability_ref(self):
        # Writes to file probability tables later used in binomial assessments
        viral_peptidome = open(self.par['file_annotation'], 'r')
        peptide_lib = []
        next(viral_peptidome)
        for line in viral_peptidome:
            items = line.split('\t')
            peptide_lib.append(str(items[0]))
        viral_peptidome.close()
        peptide_lib = peptide_lib[:-1]
        #peptide_lib.sort(key=int)

        binary_b = flex_array.sparse_aln_df(self.par['file_aln'])
        binary_b = binary_b.reindex(peptide_lib).fillna(0)
        binary_b = flex_array.array(binary_b).filter_aln(
            ref_seq=self.par['dir_ref_seq'])
        binary_b = pd.DataFrame(index=binary_b.index,
                                columns=binary_b.columns,
                                data=binary_b.values,
                                dtype=bool)

        virus_aln = {
            i: binary_b.loc[binary_b[i], i]
            for i in binary_b.columns
        }  #list of alignments to feed into filter
        dep_pep = self.dependent_peptides()
        virus_tot_filter = [
            flex_array.gen_ind_hits(virus_aln[i], dep_pep) for i in virus_aln
        ]
        #virus_aln = dict(zip(list(binary_b.columns), virus_aln))

        virus_sums = pd.Series(index=binary_b.columns,
                               data=[
                                   len(i) for i in virus_tot_filter
                               ])  #binary_b.apply(np.count_nonzero, axis=0)
        first_round_prob = pd.Series(index=binary_b.columns)
        viruses = list(binary_b.columns)
        for i in first_round_prob.index:
            print("Virus " + str(viruses.index(i)))
            first_round_prob[i] = virus_sums[i] / (
                len(peptide_lib) - (len(virus_aln[i]) - virus_sums[i]))
        first_round_prob.to_csv(self.par['dir_ref_seq'] +
                                "total_probabilities_20180524.csv",
                                header=False,
                                index=True)
        print("First probability file generated.")
        '''
		virus_shared = pd.DataFrame(index=viruses, columns=viruses)
		virus_unique = pd.DataFrame(index=viruses, columns=viruses)
		for i in viruses:
			for j in viruses:
				shared = virus_aln[i]*virus_aln[j]; shared.fillna(0.0, inplace=True);
				shared = shared[shared>0]#; shared = list(shared.index); shared = [str(i) for i in shared]
				#shared = ';'.join(shared)
				
				virus_shared.loc[i,j] = len(shared)#(flex_array.gen_ind_hits(shared, dep_pep))
				
		for i in virus_shared.columns:
			virus_unique[i] = virus_sums[i] - virus_shared[i]
			'''

        second_round_prob = pd.DataFrame(index=viruses, columns=viruses)
        third_round_prob = pd.DataFrame(index=viruses, columns=viruses)
        #virus_pairs = []
        #total = 139656
        #count = 0

        virus_pairs = list(combo(viruses, 2))

        def calc_pair(pair):
            i = pair[0]
            j = pair[1]
            d1 = {}
            d2 = {}
            i_index = set(virus_aln[i].index)
            j_index = set(virus_aln[j].index)
            shared_index = set(i_index).intersection(j_index)
            shared = virus_aln[i].loc[list(shared_index)]
            if len(shared) == 0:
                d1[(i, j)] = first_round_prob[j]
                d1[(j, i)] = first_round_prob[i]
                d2[(i, j)] = 0.0
                d2[(j, i)] = 0.0
            else:
                unique_j = j_index - shared_index
                unique_j = virus_aln[j].loc[unique_j]
                filter_unique_j = flex_array.gen_ind_hits(unique_j, dep_pep)
                d1[(i, j)] = len(filter_unique_j) / (
                    len(peptide_lib) - len(shared) -
                    (len(unique_j) - len(filter_unique_j)))
                unique_i = i_index - shared_index
                unique_i = virus_aln[i].loc[unique_i]
                filter_unique_i = flex_array.gen_ind_hits(unique_i, dep_pep)
                d1[(j, i)] = len(filter_unique_i) / (
                    len(peptide_lib) - len(shared) -
                    (len(unique_i) - len(filter_unique_i)))
                filter_shared = flex_array.gen_ind_hits(shared, dep_pep)
                d2[(i, j)] = len(filter_shared) / (
                    len(peptide_lib) - len(unique_i) - len(unique_j) -
                    (len(shared) - len(filter_shared)))
                d2[(j, i)] = d2[(i, j)]

            return d1, d2

        results = Parallel(n_jobs=-1, verbose=100000)(delayed(calc_pair)(pair)
                                                      for pair in virus_pairs)
        m1, m2 = zip(*results)

        #		for i in index:
        #			for j in range(index.index(i),len(second_round_prob.columns)):
        #				#pair = set([i, j])
        #				#if pair not in virus_pairs:
        #					# print progress
        #				j = second_round_prob.columns[j]
        #				count += 1
        #				print("Proportion of pairs evaluated: " + str(count/total))
        #				# add pair to list of sets
        #				#virus_pairs.append(pair)
        #				'''
        #				#do uniques
        #				shared = virus_aln[i] & virus_aln[j]; shared.fillna(False, inplace=True); shared = shared[shared];
        #				unique_j = virus_aln[j] ^ shared; unique_j.fillna(True, inplace=True); unique_j = unique_j[unique_j];
        #				filter_unique_j = flex_array.gen_ind_hits(unique_j, dep_pep)
        #				second_round_prob.loc[i,j] = len(filter_unique_j)/(len(peptide_lib)-len(shared)-(len(unique_j)-len(filter_unique_j)))
        #
        #				unique_i = virus_aln[i] ^ shared; unique_i.fillna(True, inplace=True); unique_i = unique_i[unique_i];
        #				filter_unique_i = flex_array.gen_ind_hits(unique_i, dep_pep)
        #				second_round_prob.loc[j,i] = len(filter_unique_i)/(len(peptide_lib)-len(shared)-(len(unique_i)-len(filter_unique_i)))
        #				# now do shared probabilities
        #				filter_shared = flex_array.gen_ind_hits(shared, dep_pep)
        #				third_round_prob.loc[i,j] = len(filter_shared)/(len(peptide_lib)-len(unique_i)-len(unique_j)-(len(shared)-len(filter_shared)))
        #				third_round_prob.loc[j,i] = third_round_prob.loc[i,j]
        #				'''
        #				# find shared
        #				i_index = set(virus_aln[i].index); j_index = set(virus_aln[j].index)
        #				shared_index = set(i_index).intersection(j_index); shared = virus_aln[i].loc[list(shared_index)]
        #				# set values if shared is 0
        #				if len(shared) == 0:
        #					second_round_prob.loc[i,j] = first_round_prob[j]
        #					second_round_prob.loc[j,i] = first_round_prob[i]
        #					third_round_prob.loc[i,j] = 0.0
        #					third_round_prob.loc[j,i] = 0.0
        #				else:
        #					# unique at j
        #					unique_j = j_index - shared_index; unique_j = virus_aln[j].loc[unique_j]
        #					filter_unique_j = flex_array.gen_ind_hits(unique_j, dep_pep)
        #					second_round_prob.loc[i,j] = len(filter_unique_j)/(len(peptide_lib)-len(shared)-(len(unique_j)-len(filter_unique_j)))
        #					# unique at i
        #					unique_i = i_index - shared_index; unique_i = virus_aln[i].loc[unique_i]
        #					filter_unique_i = flex_array.gen_ind_hits(unique_i, dep_pep)
        #					second_round_prob.loc[j,i] = len(filter_unique_i)/(len(peptide_lib)-len(shared)-(len(unique_i)-len(filter_unique_i)))
        #					# shared prob
        #					filter_shared = flex_array.gen_ind_hits(shared, dep_pep)
        #					third_round_prob.loc[i,j] = len(filter_shared)/(len(peptide_lib)-len(unique_i)-len(unique_j)-(len(shared)-len(filter_shared)))
        #					third_round_prob.loc[j,i] = third_round_prob.loc[i,j]
        #

        second_round_prob.to_csv(self.par['dir_ref_seq'] +
                                 "unique_probabilities_20180524.csv",
                                 header=True,
                                 index=True)
        print("Second probability file generated.")

        third_round_prob.to_csv(self.par['dir_ref_seq'] +
                                "shared_probabilities_20180524.csv",
                                header=True,
                                index=True)
        print("Third (and last) probability file generated.")
        '''
				a = binary_b[i]; b = binary_b[j]
				virus_intersections.loc[i,j] = np.dot(a,b)
		
		virus_unique = pd.DataFrame(index=viruses, columns=viruses)
		for i in virus_intersections.columns:
			virus_unique[i] = virus_sums[i] - virus_intersections[i]
		
		second_round_prob = pd.DataFrame(index=viruses, columns=viruses)
		for i in virus_intersections.index:
			for j in virus_intersections.columns:
				second_round_prob.loc[i,j] = virus_unique.loc[i,j]/(len(peptide_lib)-virus_intersections.loc[i,j])
		second_round_prob.to_csv(self.par['dir_ref_seq']+"unique_probabilities.csv", header=True, index=True)
		print("Second probability file generated.")
		
		third_round_prob = pd.DataFrame(index=viruses, columns=viruses)
		for i in virus_intersections.index:
			for j in virus_intersections.columns:
				third_round_prob.loc[i,j] = virus_intersections.loc[i,j]/(len(peptide_lib)-virus_unique.loc[i,j]-virus_unique.loc[j,i])
		third_round_prob.to_csv(self.par['dir_ref_seq']+"shared_probabilities.csv", header=True, index=True)
		print("Third (and last) probability file generated.")
		'''

        return None
Esempio n. 19
0
def permute(n, k=K):
  return chain.from_iterable(permutations(form) for form in set(combo(k, n)))
Esempio n. 20
0
def findTriple():
  for triple in combo(xrange(1, 998), 3):
    if sum(triple) == 1000:
      if triple[0]**2 + triple[1]**2 == triple[2]**2: return reduce((lambda x,y: x*y), triple)
Esempio n. 21
0
from itertools import combinations as combo
import enchant
from bs4 import BeautifulSoup as bs
import requests as req
from textwrap import fill


print('')
print('welcome to scrabble helper'.upper().center(65, '*'))
print('remember that you can\'t use abbreviations in the game'.upper().center(65, '*'))

while True:
    print('')
    word = ''.join(input('Letters you have in scrabble: ').strip().split(' '))
    d = enchant.Dict("en_US")
    a = [combo(word, i) for i in range(1, len(word) + 1)]
    b = list()
    v = 'aeiou'

    for f in range(len(a)):
        for g in list(a[f]):
            g = ''.join(g).strip()
            if g not in b:
                b.append(''.join(g))

    words = sorted([i for i in b if d.check(i)], key=len)
    print('\nword list\n'.upper().center(65, '*'))

    for word in words:
        word = word.ljust(15, ' ')
        word = word + '<'
	virus_aln = {i:virus_aln[i] for i in virus_sums.index}
	virus_xr = {i:virus_xr[i] for i in virus_sums.index}

	first_round_prob = pd.Series(index=virus_sums.index)
	viruses = list(virus_sums.index)
	for i in first_round_prob.index:
		print("Virus " + str(viruses.index(i)))
		# Numerator: number of filtered evidence peptides for each virus
		# Denominator terms: library - cross reactives to virus i - dependent evidence peptides (thrown out)
		#first_round_prob[i] = virus_sums[i]/(len(peptide_lib)-len(virus_xr[i])-(len(virus_aln[i])-virus_sums[i]))
		first_round_prob[i] = virus_sums[i]/(len(peptide_lib)-(len(virus_aln[i])-virus_sums[i]))
	first_round_prob.to_csv("total_probabilities_20181020.csv", header=False, index=True) #print to file
	print("First probability file generated.", flush=True)
	
	pair_time = timeit.default_timer()
	virus_pairs = list(combo(viruses, 2)) # list of all unique pairs of viruses under consideration (10 viruses for testing)
	
	print("Starting unique probability calculations.", flush=True)
	print("Progress: ", flush=True)
	# start parallel loop for calculating unique probabilities (see function)s
	results = Parallel(n_jobs=-1)(delayed(calc_pair)(pair, virus_aln, virus_xr, dep_pep, first_round_prob, peptide_lib, it_num, len(virus_pairs)) for pair,it_num  in zip(virus_pairs, list(range(1,len(virus_pairs)+1))))
	m1, m2 = zip(*results)
	del results
	m1 = {k:v for d in m1 for k, v in d.items()} # housekeeping for unpacking results
	#m2 = {k:v for d in m2 for k, v in d.items()}
	print("Done with calculations.", flush=True)
	time_end = timeit.default_timer()
	print('Time to finish pairs: '+str(time_end-pair_time))
	
	# operations to unpack the parallelized results into a viruses x viruses 2D matrix
	second_round_prob = pd.Series(m1)
Esempio n. 23
0
'''
Given string S, print all possible k replacement
combinations in lexigraphical sorted order. Output
on separate lines.
'''

from itertools import combinations_with_replacement as combo

S, k = input().split()

for c in combo(sorted(S), int(k)):
    print("".join(c))
Esempio n. 24
0
        try:
            f = fight(spell, f)
        except AssertionError:
            return False
        except Win:
            return True
    return 0


#print (run_fight(["recharge", "shield", "drain", "poison", "missile"], sample_2))

from itertools import combinations_with_replacement as combo


def mana(spelllist):
    m = 0
    for spell in spelllist:
        m += spells[spell]
    return m


depth = 0
while True:
    depth = depth + 1
    spelllists = combo(spells, depth)
    for spelllist in spelllists:
        victory = run_fight(spells, sample_2)
        if victory == False:
            print("BAD")
            exit()
Esempio n. 25
0
def get_adresses(base, adds):
    yield base
    for i in range(1, len(adds) + 1):
        for cmb in combo(adds, i):
            yield base + sum(cmb)
Esempio n. 26
0
# Advent of Code 2020
# Day 9, part 1 & 2
# https://adventofcode.com/2020/day/9

data = [int(line.rstrip()) for line in list(open("data/input9.txt", "r"))]

# Part 1 solution
i = 26
target = None
while i < len(data):

    vals = data[i - 25:i]
    sumto = data[i]
    found = False

    for nums in combo(vals, 2):
        if sum(nums) == sumto and found != True:
            found = True
            continue

    if not found:
        target = sumto
        print("/ part 1: ", target)
        break
    i += 1

# Part 2 solution
found = False
for i in range(len(data)):
    for N in range(len(data)):
Esempio n. 27
0
#!/bin/python3
from itertools import combinations as combo
import sys
'''
MY SOLUTION,..WORKS FOR 50%  CASES
'''

n, q = input().strip().split(' ')
n, q = [int(n), int(q)]
s = input().strip()
for a0 in range(q):
    left, right = input().strip().split(' ')
    left, right = [int(left), int(right)]
    string = s[left:right + 1]
    # substrings = ([string[i:j] for i,j in combo(range(len(string)+1),2)])
    substrings = []
    count = 0
    for i, j in combo(range(right - left + 2), 2):
        subs = string[i:j]
        if subs not in substrings:
            substrings.append(subs)
            count += 1
    print(count)
Esempio n. 28
0
    deeper = scheduleWeek((depth + 1), matchups[1:], used)

    # If the later pairings reached our goal, append the current and later lists
    if (deeper[0] == 5):
      return (5, [pairing] + deeper[1])

    # Otherwise, the current pairing won't work, so try again with the next
    else:
      map(used.remove, pairing[:2])
      return scheduleWeek(depth, matchups[1:], used)



# Create list of all matchups between teams
matchups = []
combos = combo(xrange(1, 11), 2)
matchups.extend(combos)
matchups = sorted((2 * matchups), key=lambda tup: tup[0])

# Simulate 18 weeks of matches
for week in (range(1,19)):
  print "Simulating week %d:" % (week)
  threads = []
  matchnum = 0

  # Generate 1 weeks worth of matches
  weekly = scheduleWeek(1, matchups, [])
  for pairing in weekly[1]:
    matchnum += 1
    matchups.remove(pairing)
Esempio n. 29
0
    def local_collinearity(self):
        """
        Computes several indicators of multicollinearity within a geographically
        weighted design matrix, including:
        
        local correlation coefficients (n, ((p**2) + p) / 2)
        local variance inflation factors (VIF) (n, p-1)
        local condition number (n, 1)
        local variance-decomposition proportions (n, p) 
        
        Returns four arrays with the order and dimensions listed above where n
        is the number of locations used as calibrations points and p is the
        nubmer of explanatory variables. Local correlation coefficient and local
        VIF are not calculated for constant term. 

        """
        x = self.X
        w = self.W
        nvar = x.shape[1]
        nrow = len(w)
        if self.model.constant:
            ncor = (((nvar - 1)**2 + (nvar - 1)) / 2) - (nvar - 1)
            jk = list(combo(range(1, nvar), 2))
        else:
            ncor = (((nvar)**2 + (nvar)) / 2) - nvar
            jk = list(combo(range(nvar), 2))
        corr_mat = np.ndarray((nrow, int(ncor)))
        if self.model.constant:
            vifs_mat = np.ndarray((nrow, nvar - 1))
        else:
            vifs_mat = np.ndarray((nrow, nvar))
        vdp_idx = np.ndarray((nrow, nvar))
        vdp_pi = np.ndarray((nrow, nvar, nvar))

        for i in range(nrow):
            wi = w[i]
            sw = np.sum(wi)
            wi = wi / sw
            tag = 0

            for j, k in jk:
                corr_mat[i, tag] = corr(np.cov(x[:, j], x[:, k],
                                               aweights=wi))[0][1]
                tag = tag + 1

            if self.model.constant:
                corr_mati = corr(np.cov(x[:, 1:].T, aweights=wi))
                vifs_mat[i, ] = np.diag(
                    np.linalg.solve(corr_mati, np.identity((nvar - 1))))

            else:
                corr_mati = corr(np.cov(x.T, aweights=wi))
                vifs_mat[i, ] = np.diag(
                    np.linalg.solve(corr_mati, np.identity((nvar))))

            xw = x * wi.reshape((nrow, 1))
            sxw = np.sqrt(np.sum(xw**2, axis=0))
            sxw = np.transpose(xw.T / sxw.reshape((nvar, 1)))
            svdx = np.linalg.svd(sxw)
            vdp_idx[i, ] = svdx[1][0] / svdx[1]
            phi = np.dot(svdx[2].T, np.diag(1 / svdx[1]))
            phi = np.transpose(phi**2)
            pi_ij = phi / np.sum(phi, axis=0)
            vdp_pi[i, :, :] = pi_ij

        local_CN = vdp_idx[:, nvar - 1].reshape((-1, 1))
        VDP = vdp_pi[:, nvar - 1, :]

        return corr_mat, vifs_mat, local_CN, VDP
Esempio n. 30
0
        return "<" + self.name + ">"

ballpic = pygame.image.load("ball.png").convert_alpha()


balls = [Ball('ball1', 300, 200, 2, 7),
         Ball('ball2', 300, 300, 4, 7),
         Ball('ball3', 400, 300, 9, 7),
         Ball('ball4', 300, 400, 5, 9),]

screen.fill(white)

while 1:
    for ball in balls:
        ball.speed = [s * -1 for s in ball.speed]
        ball.move()
        ball.boarders()
        screen.blit(ballpic, ball.rect)

    for b1, b2 in combo(balls, 2):
        b1.collide(b2)

    for i in range(1):
        if rand.randint(0, accelChance) == 0:
            for ball in balls:
                ball.speed[i] *= accelRate

    screen.blit(blank, blank.get_rect())
    pygame.display.flip()
    c.tick(frameRate)
Esempio n. 31
0
    viruses = list(binary_b.columns)
    for i in first_round_prob.index:
        print("Virus " + str(viruses.index(i)))
        # Numerator: number of filtered evidence peptides for each virus
        # Denominator terms: library - cross reactives to virus i - dependent evidence peptides (thrown out)
        first_round_prob[i] = virus_sums[i] / (
            len(peptide_lib) - len(virus_xr[i]) -
            (len(virus_aln[i]) - virus_sums[i]))
    first_round_prob.to_csv("total_probabilities_20181008.csv",
                            header=False,
                            index=True)  #print to file
    print("First probability file generated.", flush=True)

    pair_time = timeit.default_timer()
    virus_pairs = list(
        combo(viruses[10:20], 2)
    )  # list of all unique pairs of viruses under consideration (10 viruses for testing)

    print("Starting unique probability calculations.", flush=True)
    print("Progress: ", flush=True)
    # start parallel loop for calculating unique probabilities (see function)s
    results = Parallel(
        n_jobs=-1
    )(delayed(calc_pair)(pair, virus_aln, virus_xr, dep_pep, first_round_prob,
                         peptide_lib, it_num, len(virus_pairs))
      for pair, it_num in zip(virus_pairs, list(range(1,
                                                      len(virus_pairs) + 1))))
    m1, m2 = zip(*results)
    del results
    m1 = {k: v
          for d in m1
Esempio n. 32
0
    viruses = list(virus_sums.index)
    for i in first_round_prob.index:
        print("Virus " + str(viruses.index(i)))
        # Numerator: number of filtered evidence peptides for each virus
        # Denominator terms: library - cross reactives to virus i - dependent evidence peptides (thrown out)
        #first_round_prob[i] = virus_sums[i]/(len(peptide_lib)-len(virus_xr[i])-(len(virus_aln[i])-virus_sums[i]))
        first_round_prob[i] = virus_sums[i] / (
            len(peptide_lib) - (len(virus_aln[i]) - virus_sums[i]))
    first_round_prob.to_csv("total_probabilities_20181110.csv",
                            header=False,
                            index=True)  #print to file
    print("First probability file generated.", flush=True)

    pair_time = timeit.default_timer()
    virus_pairs = list(
        combo(viruses, 2)
    )  # list of all unique pairs of viruses under consideration (10 viruses for testing)

    print("Starting unique probability calculations.", flush=True)
    print("Progress: ", flush=True)
    # start parallel loop for calculating unique probabilities (see function)s
    results = Parallel(
        n_jobs=-1
    )(delayed(calc_pair)(pair, virus_aln, virus_xr, dep_pep, first_round_prob,
                         peptide_lib, it_num, len(virus_pairs))
      for pair, it_num in zip(virus_pairs, list(range(1,
                                                      len(virus_pairs) + 1))))
    m1, m2 = zip(*results)
    del results
    m1 = {k: v
          for d in m1
Esempio n. 33
0
from itertools import combinations as combo

d = {'a': 1, 'b': 2, 'c': 3, 'd': 4, 'e': 5}


def get_key_from_value(dic, value):
    return list(dic.keys())[list(dic.values()).index(value)]


ans = []
for i in combo(d.values(), 2):
    print(i)
    x = i[0]
    y = i[1]
    if x + y == 5:
        ans.append([get_key_from_value(d, x) + get_key_from_value(d, y)])

print(ans)