Example #1
0
def ini_placements(W, req, D):
    if (len(W) <= (1 + sqrt(len(D) * 8 + 1)) / 2):
        return W, req
    D1 = mset(D)
    D2 = mset(distances(W))
    D3 = mset(distances(req))
    UD = unused_distances(D3, D)
    k = max(UD)
    if (index(W, k) > -1 and index(W, D[len(D) - 1] - k) > -1):
        return W, req
    elif (index(W, k) > -1):
        req.append(k)
        req.sort()
        return ini_placements(possible_placements(req, D), req, D)
    elif (index(W, D[len(D) - 1] - k) > -1):
        req.append(D[len(D) - 1] - k)
        req.sort()
        return ini_placements(possible_placements(req, D), req, D)
    return W, req

    for j in range(0, len(D)):
        if (D[j] not in UD or (j > 0 and D[j] == D[j - 1])):
            continue
        i = D[j]
        cd2 = D2[i]
        if (cd2 == D1[i] and D3[i] != cd2):
            V = place_points_with_d(i, W, req)
            Wn = possible_placements(V, D)
            if (Wn == W):
                return W, req
            return ini_placements(Wn, V, D)
Example #2
0
def get_extra_nt(singular, plural):
    """ Returns either n or t that has to be in plural."""
    difference = mset(plural) - mset(singular)
    if difference['н'] != 0:
        return 'н'
    else:
        return 'т'
Example #3
0
def common_words(title, save_title, with_spelling, remove_stopwords):
    same = np.zeros((nbr_duplicates, 1))
    different = np.zeros((nbr_nonduplicates, 1))
    same_index = 0
    different_index = 0
    all_is_stop_count = 0
    for i in range(0, len(q1_arr)):
        str1 = q1_arr[i]
        str2 = q2_arr[i]
        q1 = str_to_array(str1)
        q2 = str_to_array(str2)
        if remove_stopwords:
            q1 = remove_stop(q1)
            q2 = remove_stop(q2)

        # Finds the common words
        common = list((mset(q1) & mset(q2)).elements())
        if len(q1) + len(q2) == 0:
            all_is_stop_count += 1
            continue

        if labels[i] == 0:
            different[different_index, 0] = len(common)/max(len(q1), len(q2))
            different_index += 1
        else:
            same[same_index, 0] = len(common)/max(len(q1), len(q2))
            same_index += 1
    bins = np.linspace(0, 1, 100)
    print(all_is_stop_count)
    plt.title(title)
    plt.hist(same, bins, alpha=0.5, label="is_duplicate=1")
    plt.hist(different, bins, alpha=0.5, label="is_duplicate=0")
    plt.legend(loc='upper right')
    plt.savefig('../Visualization/' + save_title)
    plt.show()
Example #4
0
def problem5():
	factors = mset()
	for i in range(21):
		pf = mset(hf.primeFactorization(i))
		a = pf - factors
		factors += a
	b = list(factors.elements())
	return reduce(lambda x,y: x*y, b)
Example #5
0
def problem5():
    factors = mset()
    for i in range(21):
        pf = mset(hf.primeFactorization(i))
        a = pf - factors
        factors += a
    b = list(factors.elements())
    return reduce(lambda x, y: x * y, b)
Example #6
0
def clist_manager(Scenario_Outcomes, Scenarios, Uncertain_Parameters, Parameters, cuts, i, sets, use_sets):
	new_set_hold = {}
	mst = set()
	### Combine groups
	for v in i:
		if v != None: 
			if not use_sets:
				new_sets = [[j,] for j in Scenarios]
				Remaining_Realizations = copy.deepcopy(cuts)
				
				### This builds sets from the ground up, not ideal 
				for c in v:
					Remaining_Realizations.remove(c)
					
					## Convert to Group Combine Form (1_2_3, 1_1_2, 1_0_1)
					RR = copy.deepcopy(Remaining_Realizations)
					for up in Uncertain_Parameters:
						ll = 0
						if up.Realization_Type == 'gradual':
							for k in Remaining_Realizations:
								if k == str(up.Name):
									idx = RR.index(k)
									RR[idx] = RR[idx] + '_' + str(ll) + '_' + str(ll + 1)
									ll += 1	
				
					new_sets = Combine_Group(new_sets,Scenario_Outcomes,RR, Parameters)
			else:
				Remaining_Realizations = copy.deepcopy(cuts)
				
				for c in v:
					Remaining_Realizations.remove(c)
				
				RR = copy.deepcopy(Remaining_Realizations)
				for up in Uncertain_Parameters:
						ll = 0
						if up.Realization_Type == 'gradual':
							for k in Remaining_Realizations:
								if k == str(up.Name):
									idx = RR.index(k)
									RR[idx] = RR[idx] + '_' + str(ll) + '_' + str(ll + 1)
									ll += 1	
				
				for sl in sets:
					
					if len(list((mset(v) & mset(sl)).elements())) == len(v) - 1:
						## Convert this
						new_sets = Combine_Group(sets[sl],Scenario_Outcomes,RR, Parameters)
						break
					
				if 'new_sets' not in locals():
					print(mset(v))
				
			new_set_hold[tuple(v)] = copy.deepcopy(new_sets)
			for ss in new_sets:
				NAC_add = Subset_MST(ss,Scenario_Outcomes,mst)
				mst = mst.union(set(NAC_add))
				
	return mst,new_set_hold
def findMaxSizeWords(wordlist, letters):
    words, size = [], 0
    #test if each word's non-unique intersection is the same length as the word
    for w in wordlist:
        if len(list((mset(w) & mset(letters)).elements())) == len(w) >= size:
            if len(w) > size:
                size, words = len(w), []
                words.append(w)
            else:
                words.append(w)
    return words, size
Example #8
0
def find_words(letters):
    res = []
    bits = bitify(letters)
    for key in hash:
        if letters[4] not in key:
            continue
        if hash[key] & bits == hash[key]:
            a = mset(key)
            b = mset(letters)
            if a & b == a:
                res.append(key)
    return sorted(res)
Example #9
0
def killdup(A,B):
    aa=mset(A)
    bb=mset(B)
    cc=aa & bb
    aa.subtract(cc)
    bb.subtract(cc)
    aa = list(aa.elements())
    aa.sort(reverse=True)
    bb = list(bb.elements())
    bb.sort(reverse=True)
    cc = list(cc.elements())
    cc.sort(reverse=True)
    return([aa,bb,cc])
Example #10
0
def killdup(A,B):
    aa=mset(A)
    bb=mset(B)
    cc=aa & bb
    aa.subtract(cc)
    bb.subtract(cc)
    aa = list(aa.elements())
    aa.sort(reverse=True)
    bb = list(bb.elements())
    bb.sort(reverse=True)
    cc = list(cc.elements())
    cc.sort(reverse=True)
    return([aa,bb,cc])
    def NERPass(self, text, id):
        Preprocessed = defaultdict()
        java_object = DynammicClustering.gateway.entry_point.getStack(
            text.lower())  # return {A: "adjective list",N:"nouns list....}
        keysToMatch = {'#', 'V', 'T'}
        nounKeys = {'N', '^', 'Z', 'M', 'S'}
        removeables = {'!', '~', 'G', 'E', '#'}
        slangs = []
        if 'U' in java_object:
            #lematization does better preprocessing for more matches
            Preprocessed['U'] = mset(
                self.lmtz.lemmatize(word, 'v')
                for word in re.split(" ", java_object['U']))
        #match { '#', 'V', 'T'}
        for key in keysToMatch:
            if (key in java_object):
                Preprocessed[key] = mset(
                    self.lmtz.lemmatize(word, 'v')
                    for word in re.split(" ", java_object[key]))
        sett = None
        #match Nouns (all kinds in here)
        for key in nounKeys:
            if (key in java_object):
                if sett is None:
                    sett = mset(
                        self.lmtz.lemmatize(word, 'v')
                        for word in re.split(" ", java_object[key]))
                else:
                    sett = sett | mset(
                        self.lmtz.lemmatize(word, 'v')
                        for word in re.split(" ", java_object[key]))

        if sett != None:
            Preprocessed['N'] = sett
        sett = None
        #slangs and emotican in the tweets
        for key in removeables:
            if (key in java_object):
                if sett is None:
                    sett = set(
                        self.lmtz.lemmatize(word, 'v')
                        for word in re.split(" ", java_object[key]))
                else:
                    sett = sett | set(
                        self.lmtz.lemmatize(word, 'v')
                        for word in re.split(" ", java_object[key]))

        if sett != None:
            self.slang_writer.writerow({'id': id, 'slangs': list(sett)})

        return Preprocessed
    def get_target_reservation_uniform(self, reservations):

        # First check whether there are reservations whose target replica osds aren't in the most recently used osd list
        # If yes, then grant those in FCFS order. Else, grant reservation in reverse most recently used order
        target_reservation = None
        target_reservation_alt = None
        target_reservation_alt_min = None

        for i in range(len(reservations)):
            if len(
                    set([osd.id for osd in reservations[i].pg.replica_osd])
                    & set(self.mru_osd)) == 0:
                target_reservation = reservations[i]
                # Found a reservation, so break
                break

            # Alternate reservation is none, so use FCFS
            elif target_reservation_alt is None:
                # Have a list of target reservations, and append according to how recently used the osd was
                target_reservation_alt = reservations[i]
                target_reservation_alt_min = list((mset(self.mru_osd) & mset([
                    osd.id for osd in target_reservation_alt.pg.replica_osd
                ])).elements())

            # If minimum intersection of current reservation is lesser than or equal to target_reservation_alt
            else:
                intersection = list((mset(self.mru_osd) & mset([
                    osd.id for osd in target_reservation_alt.pg.replica_osd
                ])).elements())
                if len(intersection) < len(target_reservation_alt_min):
                    target_reservation_alt_min = intersection
                    target_reservation_alt = reservations[i]
                elif len(intersection) == len(target_reservation_alt_min):
                    if intersection == target_reservation_alt_min:
                        # Same, so FCFS
                        break
                    else:
                        # Check how recently used the intersecting osds were
                        for i in range(len(intersection)):
                            # More recently used will be later on in the list
                            if self.mru_osd.index(
                                    intersection[i]) < self.mru_osd.index(
                                        target_reservation_alt_min[i]):
                                target_reservation_alt_min = intersection[i]
                                target_reservation_alt = reservations[i]
                                break

        if target_reservation is None:
            target_reservation = target_reservation_alt

        return target_reservation
def solution(str1, str2):
    s1_l = [str1[i:i + 2].upper() for i in range(0, len(str1) - 1) if not re.findall('[^a-zA-Z]+', str1[i:i + 2])]

    s2_l = [str2[i:i + 2].upper() for i in range(0, len(str2) - 1) if not re.findall('[^a-zA-Z]+', str2[i:i + 2])]

    if len(s1_l) == 0 and len(s2_l) == 0:
        return 65536
    mset_s1 = mset(s1_l)
    mset_s2 = mset(s2_l)

    interSetLen = len(list((mset_s1 & mset_s2).elements()))
    unionSetLen = len(list((mset_s1 | mset_s2).elements()))

    return int(interSetLen / unionSetLen * 65536)
def NERPass(text):
    Preprocessed = defaultdict()
    java_object = gateway.entry_point.getStack(
        text.lower())  # return {A: "adjective list",N:"nouns list....}
    keysToMatch = {'N', '^', 'Z', 'M', '#', 'V', 'T'}
    if 'U' in java_object:
        Preprocessed['U'] = mset(
            lmtz.lemmatize(word, 'v')
            for word in re.split(" ", java_object['U']))
    for key in keysToMatch:
        if (key in java_object):
            Preprocessed[key] = mset(
                lmtz.lemmatize(word, 'v')
                for word in re.split(" ", java_object[key]))
    return Preprocessed
Example #15
0
def compare_genotype(ref_vcf, eval_vcf):
    ref_alt = ref_vcf["ALT"].split(",")
    ref_gt = re.split("[/|]", ref_vcf['GT'])
    ref_alt.insert(0,ref_vcf['REF'])
    #use the GT indices as array indices as VCF intends - Ref = index 0, all alts = indices 1..n
    ref_genotype = [ref_alt[int(ref_gt[0])],ref_alt[int(ref_gt[1])]]
    eval_alt = eval_vcf["ALT"].split(",")
    eval_gt = re.split("[/|]", eval_vcf['GT'])
    eval_alt.insert(0, eval_vcf['REF'])
    eval_genotype = [eval_alt[int(eval_gt[0])],eval_alt[int(eval_gt[1])]]
    shared_allele_counts = mset(ref_genotype) & mset(eval_genotype)
    number_of_alleles_matched = sum(shared_allele_counts.values())
    if number_of_alleles_matched < 2:
        print ref_genotype, eval_genotype, shared_allele_counts, number_of_alleles_matched
    return number_of_alleles_matched 
Example #16
0
def getConcepts(input_file):
    """
    get concepts from file
    """
    concepts = mset() # use Counter for concepts
    cached_indices = {}

    with codecs.open(input_file, 'r', 'utf-8') as infile:
        for line in infile:
            line = line.strip()

            if line == '' and cached_indices:
                cached_indices = {}
                continue
                
            if line.startswith('(') and line.endswith(')'):
                line = line[1:-1] # remove parentheses
                triple = re.sub(r'[0-9A-Za-z\-]+ \/ ', '', line)
                
                try:
                    c1, _, c2 = triple.split(', ')
                    idx1, idx2 = re.findall(r'([0-9A-Za-z]+) \/', line)
                    
                    if idx1 not in cached_indices:
                        cached_indices[idx1] = 1
                        concepts[c1.lower()] += 1
                        
                    if idx2 not in cached_indices:
                        cached_indices[idx2] = 1
                        concepts[c2.lower()] += 1
                        
                except ValueError: pass
                
    return concepts
Example #17
0
def gcd(number1, number2):

	factor1 = factorize(number1)

	factor2 = factorize(number2)

	commonfactors =  list((mset(factor1) & mset(factor2)).elements())

	gcd = 1
	# the following loop takes each element in common factors
	# and makes gcd = gcd * e
	# the short form of which is gcd *= e
	for e in commonfactors: 
		gcd *= e
 
        return gcd 
Example #18
0
def getConcepts(input_file):
    """
    get concepts from file
    """
    concepts = mset()  # use Counter for concepts
    cached_indices = {}

    with codecs.open(input_file, 'r', 'utf-8') as infile:
        for line in infile:
            line = line.strip()

            if line == '' and cached_indices:
                cached_indices = {}
                continue

            if line.startswith('(') and line.endswith(')'):
                line = line[1:-1]  # remove parentheses
                triple = re.sub(r'[0-9A-Za-z\-]+ \/ ', '', line)

                try:
                    c1, _, c2 = triple.split(', ')
                    idx1, idx2 = re.findall(r'([0-9A-Za-z]+) \/', line)

                    if idx1 not in cached_indices:
                        cached_indices[idx1] = 1
                        concepts[c1.lower()] += 1

                    if idx2 not in cached_indices:
                        cached_indices[idx2] = 1
                        concepts[c2.lower()] += 1

                except ValueError:
                    pass

    return concepts
Example #19
0
    def p_keepdrop_expr(self, p):
        """dice_expr : dice_expr KEEPHIGHEST expression
                     | dice_expr KEEPLOWEST expression
                     | dice_expr DROPHIGHEST expression
                     | dice_expr DROPLOWEST expression
                     | dice_expr KEEPHIGHEST
                     | dice_expr KEEPLOWEST
                     | dice_expr DROPHIGHEST
                     | dice_expr DROPLOWEST"""
        rollList = p[1]
        op = p[2]
        if len(p) > 3:
            keepDrop = self._sumDiceRolls(p[3])
        else:
            # default to 1 if no right arg was given
            keepDrop = 1

        # filter dice that have already been dropped
        validRolls = [r for r in rollList.rolls if not r.dropped]

        # if it's a drop op, invert the number into a keep count
        if op.startswith('d'):
            opType = 'drop'
            keepDrop = len(validRolls) - keepDrop
        else:
            opType = 'keep'

        if len(validRolls) < keepDrop:
            raise InvalidOperandsException(u'attempted to {} {} dice when only {} were rolled'.format(opType,
                                                                                                      keepDrop,
                                                                                                      len(validRolls)))

        if op == 'kh' or op == 'dl':
            keptRolls = heapq.nlargest(keepDrop, validRolls)
        elif op == 'kl' or op == 'dh':
            keptRolls = heapq.nsmallest(keepDrop, validRolls)
        else:
            raise NotImplementedError(u"operator '{}' is not implemented (also, this should be impossible?)")

        # determine which rolls were dropped, and mark them as such
        dropped = list((mset(validRolls) - mset(keptRolls)).elements())
        for drop in dropped:
            index = rollList.rolls.index(drop)
            rollList.rolls[index].dropped = True

        p[0] = rollList
Example #20
0
def solution(str1, str2):
    str1_set = [str1[i:i + 2].lower() for i in range(0, len(str1) - 1)]
    str2_set = [str2[i:i + 2].lower() for i in range(0, len(str2) - 1)]

    filtered_str1 = [i for i in str1_set if len(filter_text(i)) == 2]
    filtered_str2 = [i for i in str2_set if len(filter_text(i)) == 2]

    intersection_count = len(
        list((mset(filtered_str1) & mset(filtered_str2)).elements()))
    union_count = len(filtered_str1) + len(filtered_str2) - intersection_count

    if union_count == 0:
        return 65536

    answer = int((intersection_count / union_count) * 65536)

    return answer
def insert_rule(lhs, rhs):
    nodes = lhs.split(",")
    outer_verts = {}
    inner_verts = {}
    i = 0
    if 'S' in lhs:
        outer_verts['S'] = 'S'
    else:
        for x in nodes:
            outer_verts[x] = num_to_word(i)
            i += 1
    total_in_cluser = i

    rhs_list = []
    i = 1
    for x in rhs:
        if isinstance(x, str):
            # hyperedge
            rhs_l = []
            for y in x.split(","):
                if y in outer_verts:
                    y = outer_verts[y]
                elif y in inner_verts:
                    y = inner_verts[y]
                else:
                    inner_verts[y] = str(i)
                    y = str(i)
                    i += 1
                rhs_l.append(y)
            rhs_list.append("("+",".join(sorted(rhs_l))+":N)")
        else:
            rhs_l = []
            for y in x:  # x is a terminal edge (tuple)
                y = str(y)
                if y in outer_verts:
                    y = outer_verts[y]
                elif y in inner_verts:
                    y = inner_verts[y]
                else:
                    inner_verts[y] = str(i)
                    y = str(i)
                    i += 1
                rhs_l.append(y)
            rhs_list.append("("+",".join(sorted(rhs_l))+":T)")
    lhs_str = ",".join(sorted(outer_verts.values()))
    rhs_str = "".join(sorted(rhs_list))
    #print lhs_str + " => " + rhs_str

    if lhs_str in production_rules:
        production_rules[lhs_str][rhs_str] += 1
    else:
        rhs_mset = mset()
        rhs_mset[rhs_str] += 1
        production_rules[lhs_str] = rhs_mset
    uncompressed_rule.append((lhs, lhs_str, (total_in_cluser, rhs_str)))
    return len(uncompressed_rule) - 1
Example #22
0
def is_multisyllabic(word):
    """ Returns true if word has 3 or more vowels, otherwise returns false.
        For example Zora is 2-syllabic (False), Nebojsa is 3-syllabic (True).
    """
    multiset = mset(word)
    sum = 0
    for vowel in defs.VOWELS:
        sum += multiset[vowel]
    if sum >= 3:
        return True
    return False
Example #23
0
    def getHint(self, secret, guess):
        """
        :type secret: str
        :type guess: str
        :rtype: str
        """
        #bulls = [val for i, val in enumerate(secret) if secret[i] == guess[i]]
        #print bulls
        #print filter(lambda (i, val): val == secret[i], enumerate(guess))
        #cow_candidates = [(val, guess[i]) for i, val in enumerate(secret) if secret[i] != guess[i]]
        #print cow_candidate

        bulls = [val for i, val in enumerate(secret) if secret[i] == guess[i]]
        secret_cow_candidates = [val for i, val in enumerate(secret) if secret[i] != guess[i]]
        guess_cow_candidates = [val for i, val in enumerate(guess) if secret[i] != guess[i]]
        #print bulls
        #print secret_cow_candidates
        #print guess_cow_candidates
        cows = list( (mset(secret_cow_candidates) & mset(guess_cow_candidates)).elements() )
        return "%dA%dB" % (len(bulls), len(cows))
Example #24
0
def rbo(ysess,gsess,topk,RankByG=False,multi=False):
    wt = 0.8
    # newly added to make sure the best score is 1
    topk = min(topk,len(gsess)-1)
    gsess = [url_normalize(t['l']) for t in gsess]
    if RankByG: ysess = [t for t in gsess if t in ysess]    # don't care about the non-overlapping qls
    ret = 0.
    for i in xrange(topk+1):
        if multi:
            try:
                capsize = len(sum((mset(ysess[:i+1]) & mset(gsess[:i+1])).values()))
            except:
                gstub = gsess[:i+1]
                ystub = ysess[:i+1]
                capsize = sum(ystub.count(g) for g in gstub)
        else:
            capsize = len(set(ysess[:i+1]).intersection(set(gsess[:i+1])))
        ret += capsize * math.pow(wt,i) / (i+1)
    nf = (1 - math.pow(wt,topk+1))/(1-wt)
    return ret/nf
Example #25
0
    def dice_expr(self, p):
        rollList = p.dice_expr
        op = p[1]
        if 'expr' in p._namemap:
            keepDrop = self._sumDiceRolls(p.expr)
        else:
            # default to 1 if no right arg was given
            keepDrop = 1

        # filter dice that have already been dropped
        validRolls = [r for r in rollList.rolls if not r.dropped]

        # if it's a drop op, invert the number into a keep count
        if op.startswith('d'):
            opType = 'drop'
            keepDrop = len(validRolls) - keepDrop
        else:
            opType = 'keep'

        if len(validRolls) < keepDrop:
            raise InvalidOperandsException(
                'attempted to {} {} dice when only {} were rolled'.format(
                    opType, keepDrop, len(validRolls)))

        if op == 'kh' or op == 'dl':
            keptRolls = heapq.nlargest(keepDrop, validRolls)
        elif op == 'kl' or op == 'dh':
            keptRolls = heapq.nsmallest(keepDrop, validRolls)
        else:
            raise NotImplementedError(
                "operator '{}' is not implemented (also, this should be impossible?)"
                .format(op))

        # determine which rolls were dropped, and mark them as such
        dropped = list((mset(validRolls) - mset(keptRolls)).elements())
        for drop in dropped:
            index = rollList.rolls.index(drop)
            rollList.rolls[index].dropped = True

        return rollList
Example #26
0
def solution(str1, str2):
    str1 = str1.lower()
    str2 = str2.lower()
    l1 = len(str1)
    l2 = len(str2)
    st1 = []
    st2 = []
    for i in range(l1 - 1):
        test = str1[i:i + 2]
        if clean_text(test):
            st1.append(test)
    for i in range(l2 - 1):
        test = str2[i:i + 2]
        if clean_text(test):
            st2.append(test)
    if len(st1) == 0 and len(st2) == 0:
        return 65536
    mst1 = mset(st1)
    mst2 = mset(st2)
    inter_lst = list((mst1 & mst2).elements())
    len_inter_lst = len(inter_lst)
    len_union_lst = len(st1) + len(st2) - len_inter_lst
    return int((len_inter_lst / len_union_lst) * 65536)
Example #27
0
def problem12():
    flag = True
    n = 1
    i = 2
    while True:
        number = 1
        nDiv = mset(hf.primeFactorization(n))
        exp = nDiv.most_common()
        for ex in exp:
            number *= ex[1] + 1
        if number > 500:
            return n
        n += i
        i += 1
Example #28
0
def problem12():
    flag = True
    n = 1
    i = 2
    while True:
        number = 1
        nDiv = mset(hf.primeFactorization(n))
        exp = nDiv.most_common()
        for ex in exp:
            number *= ex[1] + 1
        if number > 500:
            return n
        n += i
        i += 1
Example #29
0
def calculate_str_similarity(s1, s2):
    """
    Calculates the percent similarity between two strings. Meant to be a
    replacement for PHP's similar_text function, which old GeneWeaver uses
    to determine the right microarray platform to use.
    This algorithm uses digram intersections determine percent similarity.
    It is calculated as:

    sim(s1, s2) = (2 * intersection(digrams(s1), digrams(s2)) /
                   |digrams(s1) + digrams(s2)|

    arguments
        s1: string #1
        s2: string #2

    returns
        a float indicating the percent similarity between two strings
    """

    sd1 = make_digrams(s1)
    sd2 = make_digrams(s2)
    intersect = list((mset(sd1) & mset(sd2)).elements())

    return (2 * len(intersect)) / float(len(sd1) + len(sd2))
def getEWcnt(text, dic="1111", flag=0):
    # text_word=text.split()
    dic = int(dic, 2)
    if int(flag) == 0:
        text_word = word_tokenize(text)
    else:
        text_word = word_tokenize(text.decode("utf8"))
    text_word = [w.lower() for w in text_word]
    # print text_word
    text_cnt = mset(text_word)
    # print text_cnt
    # print wn_joy_cnt

    EWcnt = {}
    EWcnt["bl_neg"] = sum((text_cnt & bl_neg_cnt).values())
    EWcnt["bl_pos"] = sum((text_cnt & bl_pos_cnt).values())
    EWcnt["fmn_emo"] = sum((text_cnt & fmn_emo_cnt).values())
    EWcnt["mpqa_neg"] = sum((text_cnt & mpqa_neg_cnt).values())
    EWcnt["mpqa_pos"] = sum((text_cnt & mpqa_pos_cnt).values())
    EWcnt["wn_anger"] = sum((text_cnt & wn_anger_cnt).values())
    EWcnt["wn_disgust"] = sum((text_cnt & wn_disgust_cnt).values())
    EWcnt["wn_fear"] = sum((text_cnt & wn_fear_cnt).values())
    EWcnt["wn_joy"] = sum((text_cnt & wn_joy_cnt).values())
    EWcnt["wn_sadness"] = sum((text_cnt & wn_sadness_cnt).values())

    EW = []
    if (dic & 8) > 0:
        EW.append(EWcnt["bl_neg"])
        EW.append(EWcnt["bl_pos"])
    if (dic & 4) > 0:
        EW.append(EWcnt["fmn_emo"])
    if (dic & 2) > 0:
        EW.append(EWcnt["mpqa_neg"])
        EW.append(EWcnt["mpqa_neg"])
    if (dic & 1) > 0:
        EW.append(EWcnt["wn_anger"])
        EW.append(EWcnt["wn_disgust"])
        EW.append(EWcnt["wn_fear"])
        EW.append(EWcnt["wn_joy"])
        EW.append(EWcnt["wn_sadness"])

        # return EWcnt
    return EW
Example #31
0
def solution(n, stages):
    answer = []
    f_rate = []
    stageSet = mset(stages)
    for stage in range(0, n):
        t = 0
        fin = 0
        for key in stageSet.keys():
            if key - 1 == stage:
                t += stageSet[key]
            elif key - 1 > stage:
                fin += stageSet[key]
        if (t + fin) != 0:
            f_rate.append([stage + 1, t / (t + fin)])
        else:
            f_rate.append([stage + 1, 0])
    #print(f_rate)
    answer = [x[0] for x in sorted(f_rate, key=functools.cmp_to_key(compare))]
    return answer
Example #32
0
def getEWcnt(text, dic='1111', flag=0):
    #text_word=text.split()
    dic = int(dic, 2)
    if int(flag) == 0:
        text_word = word_tokenize(text)
    else:
        text_word = word_tokenize(text.decode('utf8'))
    text_word = [w.lower() for w in text_word]
    #print text_word
    text_cnt = mset(text_word)
    #print text_cnt
    #print wn_joy_cnt

    EWcnt = {}
    EWcnt['bl_neg'] = sum((text_cnt & bl_neg_cnt).values())
    EWcnt['bl_pos'] = sum((text_cnt & bl_pos_cnt).values())
    EWcnt['fmn_emo'] = sum((text_cnt & fmn_emo_cnt).values())
    EWcnt['mpqa_neg'] = sum((text_cnt & mpqa_neg_cnt).values())
    EWcnt['mpqa_pos'] = sum((text_cnt & mpqa_pos_cnt).values())
    EWcnt['wn_anger'] = sum((text_cnt & wn_anger_cnt).values())
    EWcnt['wn_disgust'] = sum((text_cnt & wn_disgust_cnt).values())
    EWcnt['wn_fear'] = sum((text_cnt & wn_fear_cnt).values())
    EWcnt['wn_joy'] = sum((text_cnt & wn_joy_cnt).values())
    EWcnt['wn_sadness'] = sum((text_cnt & wn_sadness_cnt).values())

    EW = []
    if (dic & 8) > 0:
        EW.append(EWcnt['bl_neg'])
        EW.append(EWcnt['bl_pos'])
    if (dic & 4) > 0:
        EW.append(EWcnt['fmn_emo'])
    if (dic & 2) > 0:
        EW.append(EWcnt['mpqa_neg'])
        EW.append(EWcnt['mpqa_neg'])
    if (dic & 1) > 0:
        EW.append(EWcnt['wn_anger'])
        EW.append(EWcnt['wn_disgust'])
        EW.append(EWcnt['wn_fear'])
        EW.append(EWcnt['wn_joy'])
        EW.append(EWcnt['wn_sadness'])

    #return EWcnt
    return EW
Example #33
0
def solution(N, stages):
    answer = []
    failureRateLst = []
    stage = mset(stages)
    for stage in range(0, N):
        tryer = 0
        finisher = 0
        for key in stage.keys():
            if key - 1 == stage:
                tryer += stage[key]
            elif key - 1 > stage:
                finisher += stage[key]
        if (tryer + finisher) != 0:
            failureRateLst.append([stage + 1, tryer / (tryer + finisher)])
        else:
            failureRateLst.append([stage + 1, 0])

    print(failureRateLst)
    answer = [
        x[0] for x in sorted(failureRateLst, key=functools.cmp_to_key(kcomp))
    ]
    return answer
import sys
import os
import operator
from collections import Counter as mset
from nltk.tokenize import word_tokenize

dicDir = "./Dictionaries"
blDIR = os.sep.join([dicDir, "Bingliu"])
fmnDIR = os.sep.join([dicDir, "Framenet"])
mpqaDIR = os.sep.join([dicDir, "MPQA"])
wnDIR = os.sep.join([dicDir, "Wordnet"])

Max_ch = 7
bl_neg_list = open(os.sep.join([blDIR, "list_negative-words.txt"])).readlines()
bl_neg_list = [word.rstrip("\n") for word in bl_neg_list]
bl_neg_cnt = mset(bl_neg_list)
for i in range(1, Max_ch):
    bl_neg_cnt += bl_neg_cnt

bl_pos_list = open(os.sep.join([blDIR, "list_positive-words.txt"])).readlines()
bl_pos_list = [word.rstrip("\n") for word in bl_pos_list]
bl_pos_cnt = mset(bl_pos_list)
for i in range(1, Max_ch):
    bl_pos_cnt += bl_pos_cnt


fmn_emo_list = open(os.sep.join([fmnDIR, "list_emotions.txt"])).readlines()
fmn_emo_list = [word.rstrip("\n") for word in fmn_emo_list]
fmn_emo_cnt = mset(fmn_emo_list)
for i in range(1, Max_ch):
    fmn_emo_cnt += fmn_emo_cnt
Example #35
0
File: 5.py Project: k-natt/public
def smallest_divisible(min_, max_):
	facts = mset()
	for i in range(min_, max_+1):
		facts += mset(factors(i)) - facts

	return reduce(mul, facts.elements(), 1)
Example #36
0
def new_elements(a, b):
    union = mset(a) + mset(b)
    inter = mset(a) & mset(b)
    new = union - inter
    c = list(new.elements())
    return c
Example #37
0
def has_extra_nt(singular, plural):
    """ Returns true if non-nominative forms add n or t. """
    difference = mset(plural) - mset(singular)
    return difference['н'] != 0 or difference['т'] != 0
Example #38
0
import sys
import os
import operator
from collections import Counter as mset
from nltk.tokenize import word_tokenize

dicDir = "./Dictionaries"
blDIR = os.sep.join([dicDir, "Bingliu"])
fmnDIR = os.sep.join([dicDir, "Framenet"])
mpqaDIR = os.sep.join([dicDir, "MPQA"])
wnDIR = os.sep.join([dicDir, "Wordnet"])

Max_ch = 7
bl_neg_list = open(os.sep.join([blDIR, "list_negative-words.txt"])).readlines()
bl_neg_list = [word.rstrip('\n') for word in bl_neg_list]
bl_neg_cnt = mset(bl_neg_list)
for i in range(1, Max_ch):
    bl_neg_cnt += bl_neg_cnt

bl_pos_list = open(os.sep.join([blDIR, "list_positive-words.txt"])).readlines()
bl_pos_list = [word.rstrip('\n') for word in bl_pos_list]
bl_pos_cnt = mset(bl_pos_list)
for i in range(1, Max_ch):
    bl_pos_cnt += bl_pos_cnt

fmn_emo_list = open(os.sep.join([fmnDIR, "list_emotions.txt"])).readlines()
fmn_emo_list = [word.rstrip('\n') for word in fmn_emo_list]
fmn_emo_cnt = mset(fmn_emo_list)
for i in range(1, Max_ch):
    fmn_emo_cnt += fmn_emo_cnt
Example #39
0
from collections import Counter as mset

a = [int(i) for i in input().split()]
b = [int(i) for i in input().split()]

inters = list(mset(a) & mset(b))
inters.sort()
print(*inters, sep=' ')
Example #40
0
def find_common_in_lists(list_1, list_2):
    intersection = mset(list_1) & mset(list_2)
    return list(intersection.elements())
    factor = list()
    if number == 1:
        factor.append(1)
    cn = number
    while cn > 1:
        sf = smallestFactor(cn)
        factor.append(sf)
        cn = cn / sf
        
    return factor
        
        
        
number1 = input('Give a number')

number2 = input('Give another number')

factor1 = factorize(number1)

factor2 = factorize(number2)

commonfactors =  list((mset(factor1) & mset(factor2)).elements())

gcd = 1
# the following loop takes each element in common factors
# and makes gcd = gcd * e
# the short form of which is gcd *= e
for e in commonfactors: 
  gcd *= e
  
print "The GCD of the two numbers is: " + str(gcd)
Example #42
0
    lis = responses.split("{'title': ")

    for Recipe in lis:
        one = Recipe.split(": '")

        one[0] = one[0][0:-8]  #название
        one[1] = one[1][0:-16]  #ссылка
        one[2] = one[2][0:-14]  #игредиенты
        one.pop()  #убираем картинку

        Ingr = one[2].split(", ")  #Ingr - now, lis - main

        Ingr.sort()
        lisF.sort()

        if list(
            (mset(lisF) & mset(Ingr)).elements()
        ) == Ingr:  #проверяю кол-во элементов в ingr и если оно меньше текущего, кидаю туда

            if len(Ingr) < CountOfIngr:
                #print(one)
                #print(len(Ingr))
                BestOut = one
                CountOfIngr = len(Ingr)
    url = url1
#for qwe in BestOut:
#    print(qwe)
print(
    "Самый простой рецепт по запросу - {}. \nОн содержит следующие ингредиенты: {} \nСсылка: {}"
    .format(BestOut[0], BestOut[2], BestOut[1]))
Example #43
0
from collections import Counter as mset
from functools import reduce
from time import clock

start = clock()
factors = mset() # Empty
range = list(range(1,20+1)) 

# This program finds the smallest multiple using the highest multiplicity of each prime
# Code based on Problem 3 from Project Euler
for num in range:
    i = 1
    tempFactors = mset()
    while (i < num):
        i += 1
        if (num % i == 0):  # Factor of our number 
            while (num % i == 0): # Multiple factors
                tempFactors[i] += 1
                num = num/i
    factors = factors | tempFactors # Union

factors = list(factors.elements())
output = reduce(lambda x,y: x*y, factors)
print(factors)
print(output)
end = clock()
print('Computation Time:',end-start,'seconds')