示例#1
0
def create_visml_from_sdata(s, distance='euclidean', dmatrix=False, allow_zero_weight=False):

    from pyvisml import VisML
    
    if not dmatrix:
        import scipy
        dm = eval('scipy.spatial.distance.%s' % distance)
    
        # Distance matrix M
        M = N.zeros((s.M.shape[0], s.M.shape[0]), dtype=N.float32)
        for i, j in comb(xrange(len(s)), 2):
            M[i][j] = dm(s.M[i], s.M[j])
    
        # Normed to [0,1]
        M /= M.max()
    else:
        M = s.M

    tree = VisML.create_empty_visml_tree(layout='elegant:100', fineArt='False')

    nodes = []
    for name in s.sample_ids:
        nodes.append(tree.add_node(name, '0', '0'))

    for i, j in comb(xrange(len(s)), 2):
        if M[i][j] or allow_zero_weight:
            tree.add_edge(nodes[i], nodes[j], weight=str(M[i][j]))

    return tree
示例#2
0
def get_all_comb(files_byreg):
    tolist = lambda iterobj: [elem for elem in iterobj]
    if isinstance(files_byreg[0], list):
        pairs = []
        for fnames in files_byreg:  # one list at a time
            pairs.extend(tolist(comb(fnames, 2)))
    else:
        pairs = tolist(comb(files_byreg, 2))
    return pairs
def rfa_init(atom_to_fact, task, atoms, actions):
    s_init = [atom_to_fact[x] for x in set(task.init) & set(atoms)]
    for f1, f2 in comb(s_init, 2):
        f1.add_conflict(f2)

    for a in actions:
        for f1, f2 in comb(a.add_eff, 2):
            f1.add_conflict(f2)
        for f1, f2 in comb(a.pre_del, 2):
            f1.add_conflict(f2)
示例#4
0
def _calculate_sig_connections(fn):
    # OLD METHOD, SUPERCEDED ON JULY 29 2014

    s = clustio.ParseNormal('auc_results/%s_results_reweight_RAW.txt' % fn)
    for i, j in comb(xrange(len(s)), 2):
        v = N.sqrt(s.M[i][j] * s.M[j][i])
        s.M[i][j] = s.M[j][i] = v
    results = [ float(x) for x in clustio.read_list('auc_results/%s_perm_test_4_500.txt' % fn) ]
    results.sort()
    t95 = results[int(0.95 * ITER_PERM)]
    for i, j in comb(xrange(len(s)), 2):
        if s.M[i][j] < t95:
            s.M[i][j] = s.M[j][i] = 0
    clustio.write_normal(s, 'sig_connections/%s_sig_connections_999.txt' % fn)
示例#5
0
def find_sets(cards):
    """ Prints the number of unique sets of cards and the number of cards in
        the maximum disjoint set and prints the sets contained in the disjoint set.
        -:param cards -- The string listing the cards
    """
    cards = process_cards(cards)
    subsets = []
    # finding subsets
    for subset in comb(cards[1:], 3):
        if is_set(subset):
            subsets.append(list(subset))
    print len(subsets)

    #finding max disjoint
    subsets = process_sets(subsets)
    max_disjoint = []
    for set in combine_recurse(subsets):
        if len(set) > len(max_disjoint):
            max_disjoint = set

    print len(max_disjoint), "\n"
    for card in max_disjoint:
        for i in xrange(3):
            print card[i]
        # to separate lines
        print " "
def main():
    path = "C:\Users\Jayant\Desktop\Tomato\IIM_internship"  # raw_input("Enter the path to save to data : ")
    sets = 38  # input("Enter the total sets : ")
    site_list = ["FORBES", "NYTimes", "BusinessInsider", "TheWeek", "Money.CNN", "Bloomberg", "REUTERS", "FORTUNE"]
    print "\n***************Starting the Data Download***********\n"
    crawl_bot(path, sets)
    print "\n***************Starting the processing part**********\n"
    for set_no in xrange(2, sets + 1):
        n = "a"
        hash_for = {}
        hash_back = {}
        nameSet_list = [" "]
        comb_str = ""
        f = open(path + r"\SETS\SET" + str(set_no) + ".txt", "r")
        names = [name.strip() for name in f.readlines()]
        f.close()
        for name in names:
            hash_for[name] = n
            hash_back[n] = name
            comb_str += n
            n = chr(ord(n) + 1)
        for site_no in xrange(len(site_list)):
            f = open(path + r"\SET" + str(set_no) + "\\" + site_list[site_no] + r"\links.txt", "r")
            total_links = len(f.readlines())
            f.close()
            count_list = []
            for r in xrange(1, len(comb_str[1:]) + 1):
                for com in comb(comb_str[1:], r):
                    links_list = []
                    count = 0
                    string = "a"
                    for i in com:
                        string += i
                    for link_no in xrange(1, total_links + 1):
                        f = open(
                            path + r"\SET" + str(set_no) + "\\" + site_list[site_no] + "\\" + str(link_no) + ".txt", "r"
                        )
                        data = f.read()
                        if validate(string, data, hash_back):
                            count += 1
                            f.seek(0)
                            links_list.append(f.next().strip())
                            f.seek(0)
                            # print f.next(),string
                        f.close()
                    count_list.append(count)
                    nameSet = ""
                    for i in string:
                        nameSet = nameSet + hash_back[i] + ","
                    with open(path + r"\SET" + str(set_no) + "\\" + site_list[site_no] + ".csv", "ab") as c:
                        f = csv.writer(c)
                        f.writerow([nameSet[:-1]] + links_list)
                    if site_no == 0:
                        nameSet_list.append(nameSet)
            with open(path + r"\SET" + str(set_no) + r"\SET" + str(set_no) + ".csv", "ab") as c:
                f = csv.writer(c)
                f.writerow([site_list[site_no]] + count_list)
        with open(path + r"\SET" + str(set_no) + r"\SET" + str(set_no) + ".csv", "ab") as c:
            f = csv.writer(c)
            f.writerow(nameSet_list)
示例#7
0
def search(limit):
    """search for the best a, b coefficients for a quadratic func prime gen"""
    coffs = {}
    for t in comb(range(-limit + 1, limit + 1), 2):
        coffs[t] = max([eval_func(quadratic(a, b)) for a, b in [t, t[::-1]]])

    return max(coffs, key=lambda x: coffs[x])
示例#8
0
文件: uri1316.py 项目: ishiikurisu/PC
def draw():
    global M
    global L
    global N
    global C
    cc = range(N)
    sol = None

    for n in xrange(1, N):
        combs = comb(cc, n)
        for cb in combs:
            m = 0
            l = 0
            for it in cc:
                if it in cb:
                    m += C[it]
                else:
                    l += C[it]
            if m <= M and l <= L:
                sol = cb
                break
        if sol:
            break

    if sol:
        out = ''
        out += '%d' % (len(sol))
        for it in sol:
            out += ' %d' % (it + 1)
        print out
    else:
        print 'Impossible to distribute'
示例#9
0
文件: dobble.py 项目: jokoon/eio
def gen_set(n, sym_per_card):
    syms = [a for a in range(n)]
    valid_card_set = []
    combs = comb(syms, sym_per_card)
    # print([a for a in comb(syms, sym_per_card)])
    # combs2 = comb(syms, sym_per_card)
    # valid_card_set.append(next(combs))
    print(valid_card_set)
    # print(len([a for a in comb(syms, sym_per_card)]))
    # combs = [a for a in comb(syms, sym_per_card)]
    # print(combs[:10])
    # intersections
    # stop = 50000
    for i,c in enumerate(combs):
        # print(i,c)
        # if i > 50000: break
        if is_valid(valid_card_set, c):
            # print(i,c)
            # print('gfg')

            valid_card_set.append(c)
            # sys.stdout.write(repr(c)); sys.stdout.flush()

    # print(valid_card_set)
    # print()
    return valid_card_set
示例#10
0
def SPDP(full,shorter):
    full.sort()        
    if (len(full) % 2) != 0 : 
        error(' Zle velkosti ')
    n = len(full)/2
    X=(n)*[0]    
    suma=full[0]+full[2*n-1]
    compShorter = (n+1) * [0]
    order=2*n*[0]    
    for i in range(n) :
        order[i]=(full[i],full[2*n-i-1])
        order[2*n-i-1]=(full[2*n-i-1],full[i])
        
    for k in comb(range(2*n),n) :        
        combination=n*[0]
        for i in range(n) :
            X[i]=order[k[i]][0]                        
            combination[i]=order[k[i]]
                                 
        compShorter[0]=X[0]
        for i in range(n-1) :
            compShorter[i+1]=X[i+1]-X[i]
        compShorter[n]=suma-X[n-1]        
        if compShorter == shorter :
            print X,'with orderings',combination,'with indexes',k
示例#11
0
def permutationTest2(a, b):
    ab = a + b
    Tobs = sum(a)
    under = 0
    for count, perm in enumerate(comb(ab, len(a)), 1):
        if sum(perm) <= Tobs:
            under += 1
    return under * 100.0 / count
示例#12
0
def anotherBFPDP(L,n) :
    maximum = max(L)    
    fullL=[0]+L
    
    for xList in comb(fullL,n) :
        if L==formDelta(xList) :
            return xList
    return []
def brute(n, k, b):
	avail = range(1,k+1)
	for seq in comb(avail, b):
		seq = list(seq)
		# print "seq:",seq
		if sum(seq) == n:
			return 1
	return -1
示例#14
0
def pack(L, items):

    for pair in comb(items, 2):
        if not sum(pair) - L:
            one = items.index(pair[0]) + 1
            items[one - 1] = None
            two = items.index(pair[1]) + 1
            return sorted([one, two])
def permutationTest(a, b):
    ab = a + b
    Tobs = statistic(ab, a)
    under = 0
    for count, perm in enumerate(comb(ab, len(a)), 1):
        if statistic(ab, perm) <= Tobs:
            under += 1
    return under * 100. / count
示例#16
0
 def substringsExists(self, list):
     for i in range(len(list) - 1,1, -1):
         for c in comb(list, i):
             temp = self.L
             for val in c:
                 if temp is None or not temp.has_key(val):
                     return False
                 temp = temp[val]
     return True
示例#17
0
    def _gen_distance_matrix(self, data_matrix, distance, num_samples):
        """Generate a distance matrix so that we can use indices to specify distance rather than data vectors"""

        distance_matrix = numpy.zeros((num_samples, num_samples), dtype = float)

        for i, j in comb(xrange(num_samples), 2):
            distance_matrix[i][j] = distance(data_matrix[i], data_matrix[j])

        return distance_matrix + distance_matrix.T
def goldbach(n):
    listOfPrimeToN = []
    for num in iterator:
        if num >= n:
            break
        listOfPrimeToN.append(num)
    listToCheck = list(comb(listOfPrimeToN, 2))
    for elem in listToCheck:
        if elem[0] + elem[1] == n:
            return elem
示例#19
0
def calculate_overlaps(pathway_dict):

    overlaps = []
    pathways = pathway_dict.keys()

    for i, j in comb(xrange(len(pathways)), 2):
        pi, pj = set(pathway_dict[pathways[i]]), set(pathway_dict[pathways[j]])
        overlaps.append((len(pi), len(pj), len(pi & pj)))

    return list(set(overlaps))
示例#20
0
文件: mi.py 项目: JakeMick/pycoevolve
def nmi(alignment,alphabet=aminobet,epsilon=1e-9,gap_cut=.1,cnsrv_cut=.95):
    """
    Returns the joint entropy normalized mutual information matrix.
    Alphabet refers to the aminoacids to consider.
    Epsilon is there so the 0*log(0) is approximately 0.
    gap_cut refers to the frequency of gaps tolerated at scorable positions.
        gap_cut = 0 means that only positions with no gaps will be scored.
    consrv_cut refers the maximum frequency of a single aminoacid at scorable
        positions. cnsrv_cut = .95 means that if a column has any single aminoacid
        more than 95% of the time, the position will be ignored
        This is because calculation of NMI becomes unreliable at low entropy.
    """
    alpha_size = len(alphabet)

    trans_align = transpose_alignment(alignment)
    position_size = len(trans_align)
    seq_size = float(len(trans_align[0]))

    combo_alpha = {}
    for ind,i in enumerate(alphabet):
        for jnd,j in enumerate(alphabet):
            combo_alpha[i+j] = (ind,jnd)

    mi_matrix = np.zeros((position_size,position_size),dtype=float)

    for two_pos in comb(range(position_size),2):
        pos1 = two_pos[0]
        pos2 = two_pos[1]
        col1 = trans_align[pos1]
        col2 = trans_align[pos2]
        if ((col1.count("-")/seq_size) > gap_cut) \
                or ((col2.count("-")/seq_size) > gap_cut):
            pass
        else:
            pmf = np.zeros((alpha_size,alpha_size),dtype=int)
            two_letter_counts = Counter([i[0] + i[1] for i \
                in zip(col1,col2)])
            for two_letter in combo_alpha:
                i,j = combo_alpha[two_letter]
                pmf[i,j] = two_letter_counts[two_letter]
            npmf = np.array(pmf,dtype=float) + epsilon
            npmf /= npmf.sum() 
            npmf += epsilon
            row_sum = npmf.sum(axis=1)
            col_sum = npmf.sum(axis=0)
            if np.any(row_sum > cnsrv_cut) \
                    or np.any(col_sum > cnsrv_cut):
                pass
            else:
                sum_ent = -(row_sum*np.log(row_sum)).sum() \
                        - (col_sum*np.log(col_sum)).sum()
                joint_ent = -(npmf*np.log(npmf)).sum()
                mi_matrix[pos1,pos2] = 2*(sum_ent \
                        - joint_ent)/joint_ent
    return mi_matrix + mi_matrix.T
示例#21
0
def set_halo(cords):
	"""Функция генерации ореола по координатам"""
	halo = []
	delta_comb = list(comb(range(-1, 2), 2))
	delta_comb += [(1, 1), (-1, -1)]
	for cord in cords:
		for delta in delta_comb:
			ads = adds(cord, delta)
			if ads != 0 and ads not in halo and ads not in cords:
				halo.append(ads)
	return filter(lambda x: 0 <= x[0] <= 9 and 0 <= x[1] <= 9, halo)
示例#22
0
def partition(s,w,n):
	for x in range(1,len(w)):
		for c in comb(w,x):
			if s == sum(c):
				if n > 1:
#					print(sorted(c), sum(c))
					return partition(s, w-set(c), n-1)
				else:
#					print(sorted(c), sum(c))
#					print(sorted(tuple(w-set(c))), sum(w-set(c)))
					return True
	return False
示例#23
0
文件: detect.py 项目: Auzzy/pystick
def detect_ordering(section):
	ordering = {}
	for var1,var2 in comb(vars,2):
		order = var_order(var1,var2,section)
		if order:
			if var1 not in ordering:
				ordering[var1] = {}
			if comp_ops[order] not in ordering[var1]:
				ordering[var1][comp_ops[order]] = []
			ordering[var1][comp_ops[order]].append(var2)
	
	return ordering
示例#24
0
def solve(n):
	w = {1,2,3,7,11,13,17,19,23,31,37,41,43,47,53,59,61,67,71,73,79,83,89,97,101,103,107,109,113}
	for x in range(1, len(w)):
		for c in comb(w, x):
			ww = w - set(c)
			sumc = sum(c)
			if sum(ww) == sumc*n:
				if partition(sumc, ww, n-1):
					qe = 1
					for p in c:
						qe*=p
#					print(sorted(c), sumc)
					return qe
示例#25
0
def guess(ls,pattern):    
    m = {i:ls.index(i) for i in ls}
    pl = []
    cl = []
    e,w = pattern        
    if e != 0:
        for p in list(comb(ls,e)):
            ed = {}
            for i in p:                
                ed[i] = m[i]
            pl.append(ed)        
    if w != 0:
        for c in list(comb(ls,w)):
            wd = {}
            for i in c:                               
                wd[i] = range(step)
                #对于位置错误的个数,我们需要将数字原来的位置进行排除
                #但是9除外,因为(9,)没有进行比对,所以不能说9不在第一位
                if i != 9:
                    wd[i].remove(m[i]) 
            cl.extend(unwind(wd))
    return merge(pl,cl)
示例#26
0
    def _memo_chessboard_dists(self, hdim, vdim):

        M = {} #A dict is several times faster in random 4D access than a numpy array, though it takes more memory

        #FIXME: This function still does too much work.  Because of the abs function klmn = lkmn, etc.
        iter = [ (i, j) for i in xrange(vdim) for j in xrange(hdim) ]

        for (k,l), (m,n) in comb(iter, 2):
            val = max(abs(k - m), abs(l - n))
            M[(k,l,m,n)] = val
            M[(m,n,k,l)] = val

        for i, j in iter:
            M[(i,j,i,j)] = 0

        return M
示例#27
0
    def upd_similarity_matrix(self):
        """Generate a similarity matrix from each new clustering algorithm results"""

        datapoints = self.datapoints
        num_samples = len(datapoints)
        
        clustcount, totalcount = self.sim_matrix_clustcount, self.sim_matrix_totalcount

        for i, j in comb(xrange(num_samples), 2):
            i_id, j_id = datapoints[i].cluster_id, datapoints[j].cluster_id

            if not (i_id is None or j_id is None):
                if i_id == j_id:
                    clustcount[i][j] += 1
                
                totalcount[i][j] += 1
示例#28
0
def findpathwaysizes(fn1, fn2, pathway_dict, sizes, threshold):
    mat1 = clustio.ParseNormal('auc_results/%s_results_reweight_RAW.txt' % fn1)
    mat2 = clustio.ParseNormal('auc_results/%s_results_reweight_RAW.txt' % fn2)
    assert (mat1.gene_names == mat2.gene_names).all()
    mutualize(mat1)
    mutualize(mat2)
    M = N.abs(mat1.M - mat2.M)
    res = []
    nd = {}
    for i in xrange(len(sizes)):
        nd[sizes[i]] = i
    for i, j in comb(xrange(len(M)), 2):
        if M[i][j] >= threshold:
            res.append(list(sorted([ nd[len(pathway_dict[mat1.gene_names[i]])], nd[len(pathway_dict[mat1.gene_names[j]])] ])))
    #return list(set([ tuple(x) for x in res if (N.array(x) > 16).all() and (N.array(x) < 350).all() ]))
    return list(set([ tuple(x) for x in res ]))
示例#29
0
def calculate_sig_connections(fn, pathway_dict):

    s = clustio.ParseNormal('auc_results/%s_results_reweight_RAW.txt' % fn)
    mutualize(s)
    
    f  = open('auc_results/%s_perm_test.txt' % fn)
    pt = cp.load(f)
    f.close()

    for i, j in comb(xrange(len(s)), 2):
        p1 = set(pathway_dict[s.gene_names[i]])
        p2 = set(pathway_dict[s.gene_names[j]])
        thresh = pt[(len(p1), len(p2), len(p1 & p2))]
        if s.M[i][j] <= thresh:
            s.M[i][j] = s.M[j][i] = 0

    clustio.write_normal(s, 'sig_connections/%s_sig_connections_999.txt' % fn)
示例#30
0
def _calculate_sig_connections(fn, pathway_dict):

    s = clustio.ParseNormal('auc_results/%s_results_reweight_RAW.txt' % fn)
    mutualize(s)
    c = clustio.ParseNormal('auc_results/%s_perm_test.txt' % fn)
    
    psizes = dict([ (x, len(pathway_dict[x])) for x in pathway_dict ])
    cidx   = dict([ (int(c.gene_names[i]), i) for i in xrange(len(c.gene_names)) ])

    for i, j in comb(xrange(len(s)), 2):
        ilen = psizes[s.gene_names[i]]
        jlen = psizes[s.gene_names[j]]
        thresh = c.M[cidx[ilen]][cidx[jlen]]
        if s.M[i][j] <= thresh:
            s.M[i][j] = s.M[j][i] = 0

    clustio.write_normal(s, 'sig_connections/%s_sig_connections_999.txt' % fn)
示例#31
0
def measure_resistance_multichannel(
        threads,
        excitation_currents_A,
        threadnames_RES,
        threadnames_CURR,
        iv_characteristic,
        threadname_Temp="control_LakeShore350",
        # temperature_sensor='Sensor_1_K',
        # n_measurements=1,
        current_reversal_time=0.08,
        **kwargs):
    """conduct one 'full' measurement of resistance:
        arguments: dict conf
            threads = dict of threads running of the mainWindow class
            threadname_Temp  = name of the (LakeShore) Temperature thread
            threadnames_RES  = list of names of the (Keithley) Voltage measure threads
            threadnames_CURR  = list of names of the (Keithley) Current set threads
            n_measurements  = number of measurements (dual polarity) to be averaged over
                            default = 1 (no reason to do much more)
            excitation_currents_A = list of excitations currents for the measurement
        returns: dict data
            T_mean_K : dict of means of temperature readings
                    before and after measurement [K]
            T_std_K : dict of stds of temperature readings
                    before and after measurement [K]
            resistances, voltages, currents:
                dicts with corresponding values for all measurement channels
            timeseconds: pythons time.time()
            ReadableTime: Time in %Y-%m-%d %H:%M:%S
            SearchableTime: Time in %Y%m%d%H%M%S
    """
    # measured current reversal = 40ms.
    # reversal measured with a DMM 7510 of a 6221 Source (both Keithley)
    lengths = [
        len(threadnames_CURR),
        len(threadnames_RES),
        len(excitation_currents_A)
    ]
    for c in comb(lengths, 2):
        if c[0] != c[1]:
            raise AssertionError(
                "number of excitation currents, current sources and voltmeters does not coincide!"
            )
    data = dict()
    resistances = {
        key: dict(coeff=0, residuals=0, nonohmic=0)
        for key in threadnames_RES
    }
    voltages = {key: [] for key in threadnames_RES}
    currents = {key: [] for key in threadnames_CURR}

    with loops_off(threads):

        temp1 = threads[threadname_Temp][0].read_Temperatures()
        temps = {key: [val] for key, val in zip(temp1.keys(), temp1.values())}

        for ct, (name_curr, exc_curr, name_volt) in enumerate(
                zip(threadnames_CURR, excitation_currents_A, threadnames_RES)):
            threshold_residuals = 1e4
            # threshold_coefficients = 1e4

            threads[name_curr][0].enable()

            for current_base in iv_characteristic:
                for currentfactor in [-1, 1]:
                    current = exc_curr * currentfactor * current_base
                    # print(current)
                    currents[name_curr].append(current)
                    threads[name_curr][0].gettoset_Current_A(current)
                    threads[name_curr][0].setCurrent_A()
                    # wait for the current to be changed:
                    time.sleep(current_reversal_time)
                    voltage = threads[name_volt][0].read_Voltage()
                    voltages[name_volt].append(voltage)
            c, stats = polyfit(currents[name_curr],
                               voltages[name_volt],
                               deg=1,
                               full=True)
            resistances[name_volt]["coeff"] = c[1]
            resistances[name_volt]["residuals"] = stats[0][0]
            # c_wrong = polyfit(currents[name_curr], voltages[
            #                   name_volt], deg=4)
            # print(stats[0], c_wrong)

            if stats[0] > threshold_residuals:
                resistances[name_volt]["nonohmic"] = 1
            # if np.any(np.array([x > threshold_coefficients for x in stats[2:]])):
            #     resistances[name_volt]['nonohmic'] = 1

            threads[name_curr][0].disable()

        temp2 = threads[threadname_Temp][0].read_Temperatures()
        for key in temps:
            temps[key].append(temp2[key])

    data["T_mean_K"] = {key + "_mean": np.mean(temps[key]) for key in temps}
    data["T_std_K"] = {
        key + "_std": np.std(temps[key], ddof=1)
        for key in temps
    }

    data["resistances"] = {
        key.strip("control_"): value
        for key, value in zip(resistances.keys(), resistances.values())
    }
    data["voltages"] = {
        key.strip("control_"): value
        for key, value in zip(voltages.keys(), voltages.values())
    }
    data["currents"] = {
        key.strip("control_"): value
        for key, value in zip(currents.keys(), currents.values())
    }

    df = pd.DataFrame.from_dict(data)
    data["datafile"] = kwargs["datafile"]
    timedict = {
        "timeseconds": time.time(),
        "ReadableTime": convert_time(time.time()),
        "SearchableTime": convert_time_searchable(time.time()),
    }
    data.update(timedict)

    data["df"] = df
    # print(data)
    # for x in data: print(x)
    # df = pd.DataFrame.from_dict(data)
    return data
示例#32
0
from itertools import combinations as comb


N, S = map(int, input().split())
nums = list(map(int, input().split()))

answer = 0
size = 1
while size <= N:
    for c in comb(range(N), size):
        if sum(nums[i] for i in c) == S:
            answer += 1
    size += 1

print(answer)
示例#33
0
import sys
from itertools import combinations as comb
q = '1000000000 2000'
w = '476 650 1257 1824 2311 2740 2856 3024 3453 3768 5865 6233 6865 7591 8254 8542 9818 9904 10190 10695 12089 12912 13317 14389 14645 14886 15525 15633 16144 16598 18131 20375 20473 20531 21420 21804 22107 22445 23056 24864 25662 26041 26276 26291 26976 27083 27172 27875 28327 28469 28992 30145 30322 30404 30485 31295 32588 32989 33377 33710 34276 34356 35036 37693 37829 38530 38868 39070 40166 40656 41388 41723 41848 41883 42236 42264 43115 43911 45612 45818 46279 46593 47600 47777 48101 48276 48509 49456 51477 52629 52708 52778 53086 53096 53317 53923 54169 54666 54995 56264 56853 56888 57225 58706 58896 59693 59993 60789 61375 61534 61784 62586 63207 63542 63607 63969 65341 66338 66664 66696 67187 67399 68568 69454 70725 71313 71795 71900 71993 72002 72183 72357 72625 72843 72984 73252 73423 74350 74492 74688 74921 74929 76117 76416 76870 77500 77731 78514 78762 78949 79008 79097 79739 81298 82633 82678 82855 83718 84053 84267 84400 84542 85182 85935 86016 88076 88965 89369 89822 89865 90923 91212 92388 94562 94740 94820 95376 95725 95814 96177 96832 97302 97624 97787 97844 97974 98297 98958 101465 101690 101934 101964 102715 103249 103276 103882 104104 104231 104606 104888 105290 105440 105460 105958 106902 107226 107524 107621 107837 108669 109731 110716 111630 111815 111885 112286 113397 113400 113660 113719 114402 114627 115026 115317 116790 117021 117352 117359 119023 119220 119680 119997 120147 120167 120402 120815 120995 121086 121314 121603 121812 122394 123223 124256 125413 125761 126567 126933 126986 127304 127375 128144 129643 130445 130730 130940 131920 132091 132403 132429 132464 132782 132966 133521 133687 133878 134679 136357 137225 137286 137428 137846 138256 138630 138790 138941 139867 140865 142305 142342 143190 143474 145102 145146 146367 146464 146617 147042 147645 149196 149862 150120 151208 151719 151767 152691 154562 154772 154790 155338 155864 156277 156345 156496 158208 159148 159544 159835 160278 160314 160316 160473 161501 161910 162080 162111 162367 163049 163083 164187 164737 165131 165460 165700 165891 166122 166940 166982 167465 167804 168688 169447 169481 170446 170606 171390 171771 172588 172685 173112 173199 174033 174440 174497 175990 177985 178291 179502 182307 182531 182761 183024 183624 183751 184222 184481 184743 185703 187292 187381 188124 188209 188854 190356 190607 190723 192452 193370 194194 194902 196745 197867 199079 199173 199363 199418 199499 199710 199920 200388 200896 200947 201209 201680 201990 202562 203598 203931 206657 207479 208014 208547 208624 208853 208945 209095 209318 210599 210973 213497 213538 214232 215224 216932 217271 217932 219735 220332 220443 220912 221282 221326 221992 222137 222460 222474 223006 223559 224133 224181 224396 224694 225823 226492 226946 227874 228326 228603 229309 229522 229693 230025 230131 230334 230965 231199 231302 232171 232362 232411 233289 233803 234104 235164 235603 235663 235666 235780 236625 236701 238584 238587 238702 239019 240095 240113 240225 241343 241347 241787 242546 242771 243518 244013 244149 244367 244813 244958 245231 245935 246225 246256 246328 246680 247633 247912 248288 248779 249540 249616 250183 250331 250456 250918 250972 251134 251567 252048 252579 252623 253131 254697 254852 255174 255451 255621 255760 255887 256110 256201 256753 256787 256838 257471 258226 258658 259300 259346 259998 260273 260298 260786 261354 261701 261992 262297 262580 262852 262880 263370 263666 263677 264576 264710 264793 266365 267146 267780 268741 269503 270953 271833 272844 273483 273954 274731 274947 275555 276234 276858 277355 277363 277421 277456 277552 277797 277947 277983 278133 278136 278309 278412 278568 278586 279817 279837 279997 280079 280201 280797 281484 282653 283111 283252 284127 284964 285521 285628 285816 286244 286757 287687 287864 287987 288440 289333 289717 289739 289981 290171 290794 290833 291420 291527 291719 291755 291824 292055 292126 292187 292528 292734 293294 293379 295493 296502 296629 296796 297622 298202 298881 299156 299658 300147 300901 301389 301654 302071 302150 302269 302681 303496 305458 306090 306378 307470 307669 307808 308579 309496 309529 309622 310067 310295 310687 311171 311848 312993 313109 313169 313736 313972 314063 314620 314961 315619 316283 316848 316920 317001 317263 318645 318686 318913 319200 320066 320265 321131 321568 322502 322605 323283 323990 324359 325549 325896 326198 326909 327161 328889 328965 329215 329496 329846 330014 330827 330927 331586 331788 332094 332762 333683 335290 336704 337459 337953 338011 338029 339028 339549 339953 340086 340182 340853 342078 342640 342773 343305 343990 344258 344570 344576 345875 346290 346530 346910 347005 347286 348172 348372 348639 349550 349760 349810 351521 352212 352508 352788 352995 353945 354559 355058 355219 355835 356436 356750 356829 356922 357307 357857 359587 360506 360714 361939 362402 362530 364120 364914 366013 366694 367011 368254 368896 368951 369486 370909 371558 372111 372579 375241 375695 375927 376147 376446 376468 378462 379252 379404 379631 379676 379832 380354 380663 381964 382415 382605 382635 383593 383680 385229 386541 386865 386996 387202 387528 388213 388579 388622 389084 389569 391610 392128 393100 393588 395584 396973 397433 397450 397570 397593 397613 397938 398433 398519 398642 398906 398909 399585 399800 399940 400207 401175 401177 401625 401692 402270 402681 403431 403508 403957 404883 407575 407797 408447 408981 409437 413211 413792 414901 415062 415083 415973 416767 416842 417530 417872 418424 418852 419117 419144 419955 420223 420572 420702 421998 422111 422230 423440 423859 424007 424096 425851 426168 426207 426221 426884 427449 427860 427991 428180 428235 428282 428412 429944 430779 431122 431692 431763 432118 432373 433372 434623 435071 435088 435149 435724 435737 436021 436441 439426 439537 439640 439949 440514 440603 440920 441460 442093 442425 442468 442717 443194 443606 443743 444202 444205 444971 445841 445849 446224 447521 448513 449237 449296 449484 449711 450715 450821 451658 452040 452299 453065 454633 455103 455724 455935 456692 456783 457058 457109 457290 457643 459074 459432 459449 459520 460536 461290 461745 461916 462287 463238 463297 463349 463564 463689 463762 464122 464277 464707 464971 465281 466348 467625 468289 469996 470101 470456 471080 471983 472438 472449 472751 473468 474013 475486 475730 475814 475837 476137 476837 476856 476928 476970 477161 477514 477589 478685 479026 479078 479174 479485 480598 481192 481792 481945 482122 482713 483601 483914 484176 484468 484765 485151 486870 487166 487264 487333 487506 487626 487674 487904 488608 489761 490145 490836 491132 491516 492249 492347 492789 492892 493203 493406 493605 494430 495209 495682 495999 496002 496222 496300 496615 497783 498267 498420 499270 499431 499999 500200 500986 502019 504791 505305 505988 506731 507034 507770 507919 508187 508308 509044 509282 509283 509648 510362 511284 511701 511996 512313 512362 512391 513168 513476 514064 514714 514774 515662 515684 516605 517539 517832 518368 518443 519440 519587 520907 521892 522635 522758 523532 524834 524953 526220 526741 527327 528364 529349 529954 530010 530509 530589 531807 532374 532393 532452 532547 532980 533641 534201 534328 536055 536316 538220 539182 539644 540541 541773 542768 543169 543674 544224 545226 546673 547142 548008 548228 550172 551550 551691 553357 554685 554867 556167 556690 556762 556977 556983 557019 558633 558674 559594 560120 560132 560137 561945 562313 562326 562387 562687 563105 563709 563801 564366 564416 564658 564793 565209 565478 565543 565630 565661 565672 566262 566441 566483 566507 566898 567347 568583 569001 569020 569656 569878 570964 571066 571463 571909 572143 572712 573200 574536 574604 575949 576561 576869 577108 577175 577647 578094 578529 578778 578856 578907 579128 579415 580148 580150 580289 580409 580879 581179 581394 581593 582195 582816 582840 584055 584435 584533 585232 586126 587131 588204 588559 588688 588729 590231 590392 591415 591858 592053 592734 593469 593470 593474 594782 594967 596302 596801 596907 597398 597581 598833 599624 599755 602000 602784 603190 603685 603706 603817 604252 604289 604647 605101 605448 606845 607091 607365 607778 608144 608209 608843 609856 610856 611535 611929 612075 612502 612605 613006 613033 613813 614918 615499 615694 615921 616164 616937 617075 617164 617508 617940 618277 618490 618900 619227 620695 620910 621305 622554 622707 623449 623990 623999 624428 624885 624983 625135 625164 625431 625601 625831 626203 627137 628089 628543 629628 631313 632607 633090 633103 634261 635375 635707 635821 635914 636110 636181 636604 636796 638499 638602 638771 639149 639295 639768 640030 641018 641441 642463 645465 645609 645881 647504 648509 648615 649393 649519 649692 649864 650861 651102 651245 651383 651568 652534 652929 653476 653585 655471 655725 655839 655951 657244 657297 657762 657975 657988 658816 659227 660072 660407 660677 661810 661871 663737 664583 665097 665146 665671 665873 666320 666510 666614 667593 668112 668687 668905 668942 669618 670026 670038 670538 670607 671139 672148 672693 672926 674022 674130 676200 676907 677329 677619 677788 679049 679145 679163 679223 679565 680827 681454 681497 681504 681776 682105 683208 683465 683742 683874 685248 685794 686559 686638 686786 687328 687977 689356 689657 690254 690906 691044 691252 691849 692049 692145 692560 693220 693351 693435 693706 694073 694820 695014 695256 695663 695664 695979 696152 696263 696554 697280 699284 699338 699381 700056 700356 700697 701415 701525 702029 702309 702499 702736 702780 703106 703255 704365 704369 704989 705051 705528 705562 706124 707104 707166 707217 707668 708062 708497 709227 709621 710270 710301 710577 710738 712182 712577 713379 713587 713979 714876 714970 715249 716369 716802 717548 717695 717921 719038 719163 719847 720272 720826 720943 721075 721641 722182 722909 723429 723739 723862 723897 724207 724212 724637 725442 726404 727420 727710 727763 729359 729767 730231 730367 730480 731110 731127 731346 731431 732103 732219 733298 733493 733774 735183 735295 735394 735443 735962 737047 737186 737766 738134 738848 739536 739728 739996 740785 741347 741717 742276 742462 743266 744312 746471 747632 748257 749798 750851 751032 751464 752521 752838 753291 753303 753356 753680 754264 754428 754690 754768 755104 755782 755999 756372 757219 757588 758541 759410 759447 759833 761370 761945 763638 763985 764542 764595 765230 765439 766041 766587 767511 767985 769491 770265 770370 770814 771527 771762 772297 772394 772891 774534 776090 776697 777287 777623 777798 778042 778376 778531 779018 779289 779830 780892 780930 781228 781420 781937 782220 782595 782875 783092 783351 783841 783877 784307 784360 784526 785390 785446 785459 785935 786982 787403 787494 787676 788346 788438 788590 789410 790126 790221 790836 790921 791988 792003 792331 792546 793056 793239 795044 795718 796341 796958 797186 797356 797492 797600 798258 799026 799264 799601 799838 799980 800228 800632 801628 801960 802376 802835 803064 803086 804201 804802 805686 805870 805985 807240 807502 808628 810055 810126 810259 810671 810942 812035 812321 813268 813424 813474 813745 815502 815624 815860 816169 816283 816500 818062 819233 819539 820442 820703 821465 822208 822784 823318 823978 824680 824769 825064 825257 825673 826032 826148 826781 826969 827003 828409 828432 829141 829202 829734 830606 830935 831067 831182 831501 832342 834031 834743 834845 834867 834955 836638 836963 837353 837716 838192 838773 838793 839533 840397 840937 841255 843007 843221 843349 843722 843769 844202 844708 845013 845715 845738 845793 846015 846356 846386 846772 847323 847509 848800 850183 850206 850595 850874 852598 853136 853848 854647 855713 856304 856846 858230 859150 860445 860459 860667 861373 861773 862101 862327 862418 863925 864277 864329 864800 865049 865896 866836 869013 869685 869744 870178 870408 870864 870965 870968 871135 872115 872386 872616 873125 873345 873400 874241 874275 874295 874679 875187 875322 875518 876376 876617 876689 878208 879111 880046 880711 881385 882328 882605 883845 884693 886139 886416 886878 887255 888713 889259 889278 889596 889972 890895 891065 891648 892339 892748 893136 893191 893795 894192 894394 894838 894956 895967 896916 897055 897597 897645 898399 899449 900155 900351 901136 901413 901444 901973 902001 902288 902368 902563 902643 902982 903240 903393 903580 903616 903633 903733 905849 906048 906087 906390 906655 906702 906975 907139 907342 908587 908624 909010 910698 911578 911648 911894 911992 911997 912728 912855 913192 913290 913699 914584 914810 914863 915016 915163 916706 917015 917253 918449 918989 919086 919692 920710 920833 921457 922032 922091 922420 922910 923133 923486 923685 923704 924282 924508 925296 926055 927133 927238 927708 927818 928474 928514 928528 929232 930409 930597 932678 932982 933584 934778 934935 935004 935402 935569 936072 936457 940841 941474 941486 942624 942641 942701 943250 943364 943818 944326 945007 945037 945548 946330 946711 946846 947338 947359 947888 948126 948842 949365 949452 950957 951034 952185 952324 952589 952686 953613 953864 954718 954902 957010 957221 957327 957543 957851 957911 958500 959023 959766 960622 962088 963117 963161 964098 964260 964340 965549 965715 965991 966699 967147 967293 968165 968841 969446 970393 970586 970784 971417 971575 972786 974059 974087 974197 974281 975101 976082 977068 978008 978217 978464 978644 979073 979681 980073 980161 981008 981239 982204 984590 984719 984912 985988 986018 986093 986827 987254 987386 988261 988518 988587 988763 989035 990000 990103 991008 991158 991202 991391 991976 992068 992278 992893 992999 993116 993671 993846 994166 995018 995160 995281 995548 995791 996239 996780 997022 997118 997460 997793 998211 998294 999795 999867'
x = list(map(int, q.split()))
n = list(map(int, w.split()))
max = x[0]
for i in range(x[1], 2, -1):
    a = list(comb(n, i))
    a.reverse()
    for j in a:
        if sum(j) <= max:
            print(len(j))
            u = ''
            for p in j:
                u += str((n.index(p))) + " "
                n[n.index(p)] = -1
            print(u)
            sys.exit()
示例#34
0
    #'envi_rxl': '/home/fpaolo/data/envi/raxla/*_{0}_?',
    'ice_rxl': '/home/fpaolo/data/ice/raxla/*_{0}_?_filt'
}

sect = ['01', '02', '03', '04']

if len(sys.argv) == 2:  # specifies wall time
    wtime = sys.argv[1]
elif len(sys.argv) == 3:  # specifies nodes and time
    nodes = sys.argv[1]
    wtime = sys.argv[2]
else:
    nodes = 50
    wtime = 25  # default time and nodes (1 task/proc)

comb2 = lambda f: [ff for ff in comb(f, 2)]
date = lambda s: re.findall(DATE, s)


def remove_duplicates(pairs):
    for i, (f1, f2) in enumerate(pairs):
        if (ASC in f1 and ASC in f2) or \
           (DES in f1 and DES in f2) or \
           date(f1) == date(f2):
            pairs[i] = 'x'
    return [ff for ff in pairs if ff != 'x']


def get_job_args(sats, sect):
    args = {}
    for sat, files in sats.items():
示例#35
0
from itertools import combinations as comb

T = int(input())

for tc in range(1, T + 1):
    N, K = map(int, input().split())
    numbers = list(map(int, input().split()))

    ans = 0
    for i in range(len(numbers)):
        for com in comb(numbers, i):
            if sum(com) == K:
                ans += 1

    print(f'#{tc} {ans}')
示例#36
0
from itertools import combinations as comb
inarr = list(map(int, input().split(',')))
given_sum = int(input())
inarr_com = comb(inarr, 4)
info_list = []
solu_list = []
for it in inarr_com:
    info_list.append((sum(it), it))
for it in info_list:
    if it[0] == given_sum:
        solu_list.append(it[1])
print(len(solu_list))
示例#37
0
First, consider each unique combination of top dice that sum to the
target.

For each of these, consider each number of remaing dice equal to the
minimum value of the top dice.

Partition into two groups: dice >= min value of top, and dice < this
value. Compute the number of arrangements of each of these groups, 
as well as the number of ways to distribute those two partitions 
over all dice.

Add it all up.
"""
result = 0
# unique sequences of top dice
for rolls in comb(range(1, sides + 1), n_top):
    if sum(rolls) != target_sum: continue
    lo = min(rolls)

    #group lo dice by how may of them = lo
    for nlo in range(n_bot + 1):

        #partition of dice >= lo
        hi = rolls + tuple([lo] * (n_bot - nlo))
        nhi = unique(hi)

        #partition of dice < lo
        numlo = pickless(nlo, lo)

        #number of ways to arrange
        #these partitions among all dice
示例#38
0
     ["TDD", "{:.1%}".format(prob_trust_distrust_distrust), No_TDD],
     ["DDD", "{:.1%}".format(prob_distrust_distrust_distrust), No_DDD],
     ["Total", "{:.1%}".format(Total_per), Total_number]],
    headers=["Type", "Percent", "Number"],
    tablefmt='orgtbl')))

# Actual Distribution
# Adding weight to an edge
weight = nx.get_edge_attributes(g, 'w')
# Calculating triads
Triads = [edge for edge in nx.enumerate_all_cliques(g) if len(edge) == 3]
#Creating triads llist along with the weight
triads_list = list(
    map(
        lambda edge: list(map(lambda edge:
                              (edge, weight[edge]), comb(edge, 2))), Triads))
# INTIALizing the values
T_T_T = 0
T_T_D = 0
T_D_D = 0
D_D_D = 0
# Iterating through each triangles
for triad in triads_list:
    edge_1 = triad[0][1]
    edge_2 = triad[1][1]
    edge_3 = triad[2][1]
    # Based on trust and distrust values calculate the triad types
    if (edge_1 == 1 and edge_2 == 1 and edge_3 == 1):
        triad_type = "TTT"
        T_T_T = T_T_T + 1
    # Based on trust and distrust values calculate the triad types
from itertools import combinations_with_replacement as comb
num = [1,5,10,50]
n = int(input())
print(len(set(map(sum,(comb(num,n))))))
示例#40
0
def GetPowerset(s):
    #Get the powerset of a given set (all subsets incl. empty and full set).

    return list(chain(*map(lambda x: comb(s, x), range(0, len(s) + 1))))
示例#41
0
import sys
from itertools import combinations as comb

input = sys.stdin.readline

N, S = map(int, input().split())

A = list(map(int, input().split()))

count = 0
for i in range(2, len(A) + 1):
    for c in comb(A, i):
        if sum(c) == 0:
            count += 1

print(count)
示例#42
0
from itertools import combinations as comb

with open('AOC2017_2.data','r') as f:
    data = [[int(x) for x in row.split('\t')] for row in f.read().split('\n')]
    
total = 0
total_2 = 0
for row in data:
    total += max(row)-min(row)
    for c in comb(row,2):
        a,b = sorted(c)
        if b%a == 0:
            total_2 += b//a
print('part 1:', total)
print('part 2:', total_2)
示例#43
0
    lamps = f.read().split()

# Clear and create execution folder
dir_name = "exec" + os.sep
shutil.rmtree(dir_name, True)
os.makedirs(dir_name)

count = 0
multival = [k for k in params if isinstance(params[k], list)]
multival = sorted(multival, key=len, reverse=True)  # Semi-arbitrary sort
param_space = [params[k] for k in multival]
N = np.prod(list(map(len, param_space)))
print("Number of executions:", N)

ms = 0
for i, param_vals in enumerate(comb(*param_space), 1):
    if 100. * i / N >= ms:
        if ms % 10 == 0:
            print(ms, end=' ')
        else:
            print('..', end=' ')
        sys.stdout.flush()
        ms += 5
    if i == N:
        print('')

    local_params = OrderedDict(zip(multival, param_vals))
    P = ChainMap(local_params, params)
    if "azimuth_angle" in multival \
        and P['elevation_angle'] == 90 \
        and params['azimuth_angle'].index(P['azimuth_angle']) != 0:
示例#44
0
文件: HEADER.py 项目: GurEmre/zumzum
    def __init__(self,ROBN,codeBeginTime,showFrames,globalQ,record,Lx,Ly,cueRadius,visibleRaduis,\
        paramReductionMethod,PRMparameter,noise,localMinima,method):

        self.Etol = 4
        self.Lx = m2px(Lx)
        self.Ly = m2px(Ly)
        self.cueRadius = m2px(cueRadius)
        self.visibleRaduis = m2px(visibleRaduis)
        self.QRloc={'QR1':(self.Lx//2,0),'QR2':(self.Lx,self.Ly//4),\
            'QR3':(self.Lx,self.Ly//4*3),'QR4':(self.Lx//2,self.Ly),'QR5':(0,3*self.Ly//4),'QR6':(0,self.Ly//4)}
        '''self.localMinima: flag to determine if local minima will exist or not '''
        self.localMinima = localMinima
        self.ground = self.generateBackground(self.Lx, self.Ly, self.cueRadius,
                                              self.visibleRaduis)
        self.codeBeginTime = codeBeginTime
        self.sharedParams()
        self.showFrames = showFrames
        self.record = record
        self.vizFlag = record or showFrames
        self.fps = int(self.timeStep * 100 * 2)
        self.ROBN = ROBN
        self.collisionDist = m2px(0.05)
        self.swarm = [0] * self.ROBN
        self.wallNum = 4
        self.time = 0
        self.flagsR = np.zeros((self.ROBN, self.ROBN))
        self.counterR = np.zeros((self.ROBN, self.ROBN))
        self.collisionDelay = 1
        self.detectRad = 0.06
        self.robotRad = 0.06
        self.robotSenseRad = m2px(self.robotRad + self.detectRad)
        self.collisionDetectDist = self.robotSenseRad + self.collisionDist
        self.Wmax = 120 if not self.debug else 0
        self.paramReductionMethod = paramReductionMethod
        if self.debug: print('\t[+] Debugging mode: Wmax=0')
        self.log = 0
        videoRecordTime = ctime(TIME()).replace(':', '_')
        FPS = 5
        '''
        size=(self.Xlen,self.Ylen)
        this wont work you must inverse it. an empty video will be saved
        '''
        size = (self.Ylen, self.Xlen)
        fourcc = cv.VideoWriter_fourcc(*'mp4v')
        if self.vizFlag:
            self.video = cv.VideoWriter(
                codeBeginTime + DirLocManage(returnchar=True) +
                videoRecordTime + '.mp4', fourcc, FPS, size, True)

        self.allRobotIndx = np.arange(0, self.ROBN)
        self.globalQ = globalQ
        self.allnodes = np.array(list(comb(self.allRobotIndx, 2)))
        self.colliders = []
        self.all_poses = []
        self.allRobotQRs = np.array(
            list(product(self.allRobotIndx, np.arange(0, len(self.QRloc)))))
        self.QRpos_ar = np.array(list(self.QRloc.values()))
        self.NASfunction1 = np.vectorize(lambda x: x.groundSensorValue1)
        self.NASfunction2 = np.vectorize(lambda x: x.groundSensorValue2)
        '''for local and global NAS '''
        if self.localMinima:
            self.NASGfunction = np.vectorize(lambda x: x.groundSensorValueG)
            self.NASLfunction = np.vectorize(lambda x: x.groundSensorValueL)

        if self.showFrames:
            ''' positioning window to make it easier to watch '''
            cv.namedWindow('background')
            cv.moveWindow('background', 1000, 0)
        self.noise = noise
        # self.sigma={"angle":int(180*0.25),"length":int(self.maxlen//4*0.25)}
        noise_ratio = self.noise / 180
        self.sigma = {
            "angle": int(180 * noise_ratio),
            "length": int(self.maxlen // 4 * noise_ratio * 0)
        }

        self.PRMparameter = PRMparameter
        self.method = method
def process_graph_data(data):
    
    # Create an empty graph
    g=nx.Graph()
    
    # Iterate over data of csv
    for index, row in data.iterrows():
        # Set reviewer, reviewee and trust values
        reviewer=row['reviewer']
        reviewee=row['reviewee']
        trust=row['trust']
        
        # add nodes and edge between nodes in graph
        g.add_node(reviewer)
        g.add_node(reviewee)
        g.add_edge(reviewer,reviewee,weight=trust)
       
    # Number of nodes
    count_nodes=g.number_of_nodes()
    print("\nNodes in the network: ",count_nodes)
  
    # 1. Number of edges in the network
    count_edges=g.number_of_edges()
    print("Edges in the network: ",count_edges)

    
    # 2. Number of self-loops
    count_self_loops=0
    for n in g.nodes_with_selfloops():
        count_self_loops=count_self_loops+1
    print("Self-loops: ",count_self_loops)
  
    # 3. Number of edges used to identify triads (referred to as TotEdges) [ It is 1. – 2. ]
    count_totedges=count_edges-count_self_loops
    print("Edges used - TotEdges: ", count_totedges)
    
    # 4. Number of positive (trust) edges (ignore self-loops) and
    # 5. Number of negative (distrust) edges (ignore self-loops)
    count_pos_edges=0
    count_neg_edges=0
    for reviewer,reviewee,trust in g.edges(data=True):
        if trust['weight'] == 1:
            count_pos_edges=count_pos_edges+1
        if trust['weight'] == -1:
            count_neg_edges=count_neg_edges+1
    print("Trust edges: ",count_pos_edges)
    print("Distrust edges: ",count_neg_edges)
    
    
    # 6. Probability p that an edge will be positive: (number of positive edges) / TotEdges
    p_pos_edge=count_pos_edges/count_totedges
    print("Probability p (an edge will be positive): ",round(p_pos_edge,2))
    
    # 7. Probability that an edge will be negative: 1 – p
    p_neg_edge=1 - p_pos_edge
    print("Probability 1 - p (an edge will be negative): ",round(p_neg_edge,2))
    
    # Count total number of triangles
    triangles = nx.triangles(g) 
    count_triangles = sum(triangles.values())/3     # When computing triangles for the entire graph each triangle is counted three times
    print("Numer of Triangles: ",count_triangles)
    
    # 8. Expected distribution of triad types ( based on p and 1 – p applied to the number of triangles in the
    # graph). Show number and percent.
    # a. Trust-Trust-Trust
    # b. Trust-Trust-Distrust
    # c. Trust- Distrust -Distrust
    # d. Distrust- Distrust- Distrust
    # e. Total
    print("\nExpected Distribution:")
    print("Type\tpercent\tnumber")
    
    TTT_percent= round((p_pos_edge ** 3) * 100,1)
    TTT_num= round((count_triangles * TTT_percent)/100,1)
    print("TTT\t" + str(TTT_percent) + "\t" + str(TTT_num))
    
    TTD_percent= round((p_pos_edge ** 2 * p_neg_edge * 3) * 100,1)
    TTD_num= round((count_triangles * TTD_percent)/100,1)
    print("TTD\t" + str(TTD_percent) + "\t" + str(TTD_num))
    
    TDD_percent= round((p_pos_edge * (p_neg_edge ** 2) * 3) * 100,1)
    TDD_num= round((count_triangles * TDD_percent)/100,1)
    print("TDD\t" + str(TDD_percent) + "\t" + str(TDD_num))
    
    
    DDD_percent= round((p_neg_edge ** 3) * 100,1)
    DDD_num= round((count_triangles * DDD_percent)/100,1)
    print("DDD\t" + str(DDD_percent) + "\t" + str(DDD_num))
    
    print("Total\t" + str(100) + "\t" + str(count_triangles))
    
    # 9. Actual distribution of triad types. Show number and percent.
    # a. Trust-Trust-Trust
    # b. Trust-Trust-Distrust
    # c. Trust- Distrust -Distrust
    # d. Distrust- Distrust- Distrust
    # e. Total
   
    print('\nTriads that are found:')
    weight = nx.get_edge_attributes(g, 'weight')
    # Get triangles
    Triads = [edge for edge in nx.enumerate_all_cliques(g)if len(edge) == 3]
    #Get triads list with each combination of nodes and respective weight of edge
    triads_list = list(map(lambda edge: list(map(lambda edge: (edge, weight[edge]), comb(edge, 2))), Triads))
    count_TTT=0
    count_TTD=0
    count_TDD=0
    count_DDD=0
    # Loop throgh all triangles
    for triad in triads_list:
        e1_weight=triad[0][1]
        e2_weight=triad[1][1]
        e3_weight=triad[2][1]
    
        # Check for edge_weight, determine type of triad and calcualate total triads of each type
        if(e1_weight==1 and e2_weight==1 and e3_weight==1):
            triad_type="TTT"
            count_TTT = count_TTT +1
        if(e1_weight==1 and e2_weight==1 and e3_weight==-1) or (e1_weight==-1 and e2_weight==1 and e3_weight==1) or (e1_weight==1 and e2_weight==-1 and e3_weight==1):
            triad_type="TTD"
            count_TTD=count_TTD+1
        if(e1_weight==-1 and e2_weight==-1 and e3_weight==1) or (e1_weight==1 and e2_weight==-1 and e3_weight==-1) or (e1_weight==-1 and e2_weight==1 and e3_weight==-1):
            triad_type="TDD"
            count_TDD =count_TDD + 1
        if(e1_weight==-1 and e2_weight==-1 and e3_weight==-1):
            triad_type="DDD"
            count_DDD=count_DDD+1
        print(triad_type + "\t" + str(triad[0]) + "\t" + str(triad[1]) + "\t" + str(triad[2]))
    
    # Calculate percentage of each triad  type
    TTT_percent=round((count_TTT *100)/count_triangles,1)
    TTD_percent=round((count_TTD *100)/count_triangles,1)
    TDD_percent=round((count_TDD *100)/count_triangles,1)
    DDD_percent=round((count_DDD *100)/count_triangles,1)
    print("\nActual Distribution:")
    print("Type\tpercent\tnumber")
    print("TTT\t" + str(TTT_percent) + "\t" + str(count_TTT))
    print("TTD\t" + str(TTD_percent) + "\t" + str(count_TTD))
    print("TDD\t" + str(TDD_percent) + "\t" + str(count_TDD))
    print("DDD\t" + str(DDD_percent) + "\t" + str(count_DDD))
    print("Total\t" + str(100) + "\t" + str(count_triangles))
from itertools import combinations_with_replacement as comb

#s, k = input().split()
#for i in comb(sorted(s), int(k)):
#    print(''.join(i))

#metoda 2 cu join este mai rapd decat ce este aici
#s, k = input().split()
#for el in comb (sorted(s),int(k)):
#    print(*el, sep='')

#metoda 3
#s, k = input().split()
#print(*[''.join(p) for p in comb(sorted(s), int(k))], sep='\n')

#metoda 4
#s = input().split()
#string, number = sorted(s[0]), int(s[1])
#print(*list(map(''.join, comb(string, number))), sep='\n')

#metoda 4.1
s, k = input().split()
print(*map(''.join, comb(sorted(s), int(k))), sep='\n')
示例#47
0
                     5,
                     initializationMode='k-means||',
                     maxIterations=50,
                     runs=10,
                     epsilon=10e-6)  #5个簇,可自定
print("Final centers: ", model.clusterCenters)
print("Total Cost: " + str(model.computeCost(LiveVectors)))
#pca = PCA(n_components=2)
#xx=pca.fit_transform(data_KNN)
#print(xx)
prediction = model.predict(LiveVectors)  #RDD形式
labels = prediction.collect()
print(labels)
K = 10  #5个量10副画
plt.figure(figsize=(10, 10))
comb_list = np.array(list(comb(range(5), 2)))  #排列组合
#%%=====画图=======#
for i in range(1, K + 1):
    ax = plt.subplot(4, 3, i)
    x = comb_list[i - 1, 0]
    y = comb_list[i - 1, 1]
    ax.scatter(Plotxx[:, x], Plotxx[:, y], c=labels)
    ax.scatter(np.array(model.clusterCenters)[:, x],
               np.array(model.clusterCenters)[:, y],
               c='red',
               marker='*',
               s=25)
    for a, b, j in zip(Plotxx[:, x], Plotxx[:, y], range(data.count())):
        ax.text(a,
                b + 0.0005,
                date_label[j],
def main(m: int, k: int, premade_group_count: int, player_data):
    """
    Строит наиболее оптимальные распределения элементов по группам.

    Сначала расчитываются appr_gr_count первых групп, таким образом чтобы
    средний рейтинг в полученных группах был наиболее близок к оптимальному.
    Затем лексикографически из оставшихся элементов строятся все возможные
    комбинации и дописываются к ранее полученным.
    :param m: число всех элементов
    :param k: число групп
    :param premade_group_count: количество групп для предварительного рассчета
    :param player_data: словарь или список с информацией о элементах
    """
    if type(player_data) == dict:
        player_dict = player_data
    else:
        player_dict = dict()
        for i in range(len(player_data)):
            player_dict[i + 1] = {
                'name': player_data[i][0],
                'rating': player_data[i][1],
                'assoc': player_data[i][2]
            }

    def calc_average_group_rating():
        """Вычисляет средний рейтинг групп."""
        result = 0
        for v in player_dict.values():
            result += v['rating']
        return result / k

    average_group_rating = calc_average_group_rating()
    assoc_count = dict()
    for v in player_dict.values():
        association = v['assoc']
        if assoc_count.get(association, False):
            assoc_count[association] += 1
        else:
            assoc_count[association] = 1

    n = int(m / k)
    free_values = list(range(1, m + 1))
    premade_groups = []

    def is_new_group_better_rating(n_group, b_group):
        s_ng = sum((player_dict[key]['rating'] for key in n_group))
        s_bg = sum((player_dict[key]['rating'] for key in b_group))
        if abs(s_ng - average_group_rating) <= abs(s_bg -
                                                   average_group_rating):
            return True
        else:
            return False

    def is_association_count_normal(group, threshold):
        current_assoc_dict = dict()
        for num in group:
            a = player_dict[num]['assoc']
            if current_assoc_dict.get(a, False):
                current_assoc_dict[a] += 1
            else:
                current_assoc_dict[a] = 1
        for k, v in current_assoc_dict.items():
            if threshold[k] < v:
                return False
        return True

    for _ in range(premade_group_count):

        threshold_assoc = dict(
        )  # лимит участников от ассоциации в одной группе
        for k, v in assoc_count.items():
            threshold_assoc[k] = ceil(v / n)
        comb_list = list(comb(free_values, n))

        # проверка, что во взятой группе нормально распределены ассоциации
        while True:
            current_group = comb_list.pop(0)
            if is_association_count_normal(current_group, threshold_assoc):
                best_group = current_group
                break

        # сравниваем группу на близость значения рейтинга группы к среднему
        # и проверяем распределение ассоциаций
        for gr in comb_list:
            if is_new_group_better_rating(
                    gr, best_group) and is_association_count_normal(
                        gr, threshold_assoc):
                best_group = gr
        best_group = list(best_group)
        #  убираем элементы группы из общего списка
        #  уменьшаем количество представителей ассоциации в списке
        for element in best_group:
            free_values.remove(element)
            assoc_count[player_dict[element]['assoc']] -= 1

        premade_groups.append(best_group)

    combinations = comb_alg(len(free_values), int(len(free_values) / n),
                            free_values)
    for combination in combinations:
        for premade_group in reversed(premade_groups):
            combination.insert(0, premade_group)

    return combinations
示例#49
0
                    a[nx][ny] == 0 or a[nx][ny] == '*'):
                ch[nx][ny] = 1
                q.append([nx, ny, cnt + 1])
                if a[nx][ny] == 0:
                    tmp_ans = cnt + 1

    for i in range(n):
        for j in range(n):
            if a[i][j] == 0 and ch[i][j] == 0:
                return 987654321
    return tmp_ans


for i in range(n):
    for j in range(n):
        if a[i][j] == 2:
            v_idx.append([i, j])
ans = 987654321
for c in comb(v_idx, len(v_idx) - m):
    for cc in c:
        a[cc[0]][cc[1]] = '*'
    tmp = bfs_check()
    if tmp < ans:
        ans = tmp
    for cc in c:
        a[cc[0]][cc[1]] = 2
if ans == 987654321:
    print(-1)
else:
    print(ans)
示例#50
0

weights = nx.get_edge_attributes(G, 'weight')


# In[3]:


triads = [x for x in nx.enumerate_all_cliques(G) if len(x) == 3]


# In[4]:


triads_and_weights = list(
    map(lambda x: list(map(lambda x: (x, weights[x]), comb(x, 2))), triads))


# In[5]:


pprint(triads_and_weights[:20])  # ABBREVIATED


# In[58]:


print("NUMBER OF SELF LOOPS:", self_loop_count)
print("NUMBER OF TOTNODES:", pos_count+neg_count-self_loop_count)
print("NUMBER OF TRUST EDGES:", pos_count)
print("NUMBER OF DISTRUST EDGES:", neg_count)
示例#51
0
#NOTE have not cleaned up this file yet.. T_T...
import time
from primes import *
from itertools import combinations as comb

START = time.time()

for dig in comb('12345', 2):
    a, b, c = 10**int(dig[0]), 10**int(dig[1]), 1
    for i in xrange(0, 10):
        ai = i * a
        for j in xrange(0, 10):
            bj = j * b
            for k in xrange(0, 10):
                ck = k
                count = 0
                multiplier = 111111 - a - b - c
                for val in xrange(0, 10):
                    if m_r(val * multiplier + ai + bj + ck):
                        count += 1
                if count == 8:
                    print ai + bj + ck, count

print "Time Taken:", time.time() - START