def score(self, text):    
        total_score = 0
        prev_word = None
        for current_word in text:
            current_score = 0
            #print('current word is {}'.format(current_word))
            if current_word in self.pos_features:
                current_score += math.log2(self.pos_features[current_word])
            #    #print('+1')

            if current_word in self.neg_features:
                current_score -= math.log2(self.neg_features[current_word])
            #    print('-1')

            if prev_word is not None:
            #    print('prev word is {}'.format(prev_word))
                if prev_word in self.inc_features:
                    current_score *= 1.5 
            #        print('*2')
                elif prev_word in self.dec_features:
                    current_score /= 1.5 
            #        print('/2')
                elif prev_word in self.inv_features:
                    current_score *= -1.0
            #        print('-')
            prev_word = current_word
            total_score += current_score

        return total_score
def NaiveBayes(class_list,variables_counter,path):
    test = pd.read_csv(path,sep='\t',names=['num','class','desc'],header = None)
    final_list = []
    desc_list = test['desc'].tolist()
    test_class = test['class']
    total_train_files = sum(class_count.values())
    i = 0
    for line in desc_list:
        class_prob.clear()

        if type(line) is not str:
            continue
        record = line.split()
        for word in record:
            for classes in class_list:
                prob_word_in_class = 0.0
                class_counter = complete_dict[classes]
                class_desc_overall_count = sum(class_counter.values())
                class_word_unique_count = len(variables_counter)
                word_count = class_counter.get(word,0)
                prob_word_in_class = ( ((math.log2((word_count+1)) - math.log2((class_desc_overall_count + class_word_unique_count)))))
                if class_prob.get(classes,0) != 0:
                    class_prob[classes] =  class_prob[classes] + prob_word_in_class
                else:
                    class_prob[classes] =  prob_word_in_class
        class_prob[classes] = class_prob[classes] + math.log2(class_count[classes]/total_train_files)
        #print(class_prob,max(class_prob,key=class_prob.get))
        final_list.append(max(class_prob,key=class_prob.get))            
    accuracy(final_list,test)       
예제 #3
0
파일: hmm.py 프로젝트: acapello/PLN-2015
    def tag(self, sent):
        """Returns the most probable tagging for a sentence.

        sent -- the sentence.
        """
        sent = list(sent)
        hmm = self.hmm
        tagset = hmm.tagset()
        self._pi = pi = {0: {(START,) * (hmm.n - 1): (log2(1.0), [])}}

        for k, w in enumerate(sent):
            pi[k + 1] = {}
            tag_out_probs = [(t, hmm.out_prob(w, t)) for t in tagset]
            for t, out_p in [(t, p) for t, p in tag_out_probs if p > 0.0]:
                for prev_tags, (log_p, tag_sent) in pi[k].items():
                    trans_p = hmm.trans_prob(t, prev_tags)
                    if trans_p > 0.0:
                        ts = (prev_tags + (t,))[1:]
                        new_lp = log_p + log2(trans_p) + log2(out_p)
                        if ts not in pi[k + 1] or new_lp > pi[k + 1][ts][0]:
                            pi[k + 1][ts] = (new_lp, tag_sent + [t])

        max_lp = M_INF
        res = None
        for prev_tags, (log_p, tagging) in pi[len(sent)].items():
            trans_p = hmm.trans_prob(STOP, prev_tags)
            if trans_p > 0:
                new_lp = log_p + log2(trans_p)
                if new_lp > max_lp:
                    max_lp = new_lp
                    res = tagging

        return res
예제 #4
0
파일: fattal.py 프로젝트: tatsy/hydra
def fattal(img, beta=0.90, normalize=True):
    Lori = hydra.core.lum(img)
    L = np.log(Lori + 1.0e-6)

    h, w = L.shape
    levels = int(round(math.log2(min(h, w))) - math.log2(32))

    E = calcedge(L)
    alph = 0.1 * np.average(E)

    Phi = attenuatemap(L, alph, beta, 0, levels)

    G = gradient(L, method='forward')
    G[:,:,0] = np.multiply(G[:,:,0], Phi)
    G[:,:,1] = np.multiply(G[:,:,1], Phi)

    divG = hydra.core.remove_specials(divergence(G, method='backward'))

    Ld = np.exp(poisson_solver(divG))
    if normalize:
        Ld = Ld / hydra.core.max_quart(Ld, 0.99995)
        Ld = np.maximum(Ld, 0.0)
        Ld = np.minimum(Ld, 1.0)

    ret = np.zeros(img.shape)
    for c in range(3):
        ret[:,:,c] = img[:,:,c] / Lori * Ld

    ret = hydra.core.remove_specials(ret)
    ret = np.maximum(ret, 0.0)
    ret = np.minimum(ret, 1.0)

    return ret
    def log_probability( self, sequence, transitions_weight = None, outputs_weight = 1 ):
        """
        Returns the log-probability of the given symbol sequence. If the
        sequence is labelled, then returns the joint log-probability of the
        symbol, state sequence. Otherwise, uses the forward algorithm to find
        the log-probability over all label sequences.

        :return: the log-probability of the sequence
        :rtype: float
        :param sequence: the sequence of symbols which must contain the TEXT
            property, and optionally the TAG property
        :type sequence:  Token
        """
        if transitions_weight is None:
            transitions_weight = 1

        sequence = self._transform( sequence )

        T = len( sequence )
        EPSILON = ''
        channelModel = 0
        sourceModel = 0

        if T > 0 and sequence[ 0 ][ _TAG ] is not None:
            last_state = sequence[ 0 ][ _TAG ]

            if last_state != EPSILON:
                if transitions_weight:
                    sourceModel += transitions_weight * self._priors.logprob( last_state )

            else:
                if transitions_weight:
                    sourceModel += transitions_weight * math.log2( globalModelParameters.EpsilonTransition )

            channelModel += outputs_weight * self._output_logprob( last_state, sequence[ 0 ][ _TEXT ] )

            for t in range( 1, T ):
                state = sequence[ t ][ _TAG ]

                if last_state != EPSILON:
                    if state != EPSILON:
                        if transitions_weight:
                            sourceModel += transitions_weight * self._transitions[ last_state ].logprob( state )
                    else:
                        if transitions_weight:
                            sourceModel += transitions_weight * math.log2( globalModelParameters.EpsilonTransition )
                else:
                    # check if last_state is epsilon; if so then transition with probability of Epsilon
                    if transitions_weight:
                        sourceModel += transitions_weight * math.log2( globalModelParameters.EpsilonTransition )

                channelModel += outputs_weight * self._output_logprob( state, sequence[ t ][ _TEXT ] )

                last_state = state

            # FIXME changed exponentiation
            return { 'HMMtotal':  (sourceModel + channelModel),
                     'HMMchannel':  channelModel,
                     'HMMsource':  sourceModel,
                     'sequence': sequence}
def main():
    hashlist = []
    diglist = []
    transactionlistobj = open(sys.argv[1])
    transactionlist = transactionlistobj.read()
    splittransactionlist = str.splitlines(transactionlist)


    nextpow2 = pow(2,math.ceil(math.log2(len(splittransactionlist))))
    for i in range(len(splittransactionlist),nextpow2):
        splittransactionlist.append("null") #splitting list and appending null


    for i in range(0,len(splittransactionlist)):
        hashlist.append(hashlib.sha256(splittransactionlist[i].encode('utf-8'))) #hashing initial list
        diglist.append(hashlist[i].hexdigest()) #generating initial digested hash


    for i in range(0,math.ceil(math.log2(nextpow2))): #for each power of 2
        for j in range(0,int(len(diglist)/2)): #for half the list
            sha256 = hashlib.sha256(diglist[j+j].encode('utf-8')+diglist[j+(j+1)].encode('utf-8'))
            diglist[j] = sha256.hexdigest()


    return diglist[0]
def calc_posprob(sentence,file1,file2):
    prob_p =math.log2(pos_count_sep/(pos_count_sep+neg_count_sep))

    voca=open(file1).read()
    vocab=voca.split()
    with open(file1) as f:
        vocab_len= sum(1 for _ in f)

    pos_word=open(file2).read()
    pos_words=pos_word.split()
    #with open(file1) as f:
    #    total_pos= sum(1 for _ in f)
    total_pos=sum_words(file2)

    for word in sentence:
        if word in vocab:
            if word in pos_words:
                index= pos_words.index(word)
                count= int(pos_words[index+1])
                prob_1= math.log2(count+1/(total_pos+vocab_len))
                prob_p= prob_p+prob_1
        else:
            prob_1 = math.log2(1/(total_pos+vocab_len))
            prob_p= prob_p+prob_1
    return prob_p
예제 #8
0
def write_axi_csr(filename, registers, module_name="AXI_CSR", register_width=32):

	# check that register width is a power of 2
	assert int(log2(register_width)) == log2(register_width), "register_width must be power of 2"
	register_byte_width = register_width // 8
	log2_register_byte_width = int(log2(register_byte_width))

	# figure out some constants
	ceil_log2_num_regs = ceil( log2(max(reg.addr for reg in registers) + 1 ) )
	axi_addr_width = ceil_log2_num_regs + log2_register_byte_width
	num_regs = 2**(ceil_log2_num_regs)

	# maximum register width for some alignment nicieties
	justification_width = max(len(reg.label) for reg in registers if reg.mode == "read" or reg.mode == "write" )

	with open(filename, "w") as FID:
		FID.write(
			t.render(
				MODULE_NAME=module_name,
				registers=registers,
				JUSTIFICATION_WIDTH=justification_width,
				NUM_REGS=num_regs,
				AXI_ADDR_WIDTH=axi_addr_width,
				REGISTER_WIDTH=register_width,
				REGISTER_BYTE_WIDTH=register_byte_width,
				LOG2_REGISTER_BYTE_WIDTH=log2_register_byte_width
				)
			)
예제 #9
0
def run_simulation(num_blocks_per_set, num_words_per_block, cache_size,
                   replacement_policy, num_addr_bits, word_addrs):

    num_blocks = cache_size // num_words_per_block
    num_sets = num_blocks // num_blocks_per_set

    # Ensure that the number of bits used to represent each address is always
    # large enough to represent the largest address
    num_addr_bits = max(num_addr_bits, int(math.log2(max(word_addrs))) + 1)

    num_offset_bits = int(math.log2(num_words_per_block))
    num_index_bits = int(math.log2(num_sets))
    num_tag_bits = num_addr_bits - num_index_bits - num_offset_bits

    refs = get_addr_refs(
        word_addrs, num_addr_bits,
        num_offset_bits, num_index_bits, num_tag_bits)

    cache, ref_statuses = read_refs_into_cache(
        num_sets, num_blocks_per_set, num_index_bits,
        num_words_per_block, replacement_policy, refs)

    print()
    display_addr_refs(refs, ref_statuses)
    print()
    display_cache(cache)
    print()
def test_EquationBC_mixedpoisson_matfree_fieldsplit(eq_type, mat_type, porder):

    # Mixed poisson with EquationBCs
    # matfree with fieldsplit pc

    solver_parameters = {'mat_type': mat_type,
                         'ksp_type': 'gmres',
                         'ksp_atol': 1e-09,
                         'ksp_rtol': 1e-09,
                         'ksp_max_it': 200000,
                         'ksp_divtol': 1e8,
                         'pc_type': 'fieldsplit',
                         'pc_fieldsplit_type': 'schur',
                         'pc_fieldsplit_schur_fact_type': 'full',
                         'fieldsplit_0_ksp_type': 'gmres',
                         'fieldsplit_0_ksp_rtol': 1.e-12,
                         'fieldsplit_0_pc_type': 'python',
                         'fieldsplit_0_pc_python_type': 'firedrake.AssembledPC',
                         'fieldsplit_0_assembled_pc_type': 'asm',
                         'fieldsplit_1_ksp_type': 'gmres',
                         'fieldsplit_1_ksp_rtol': 1.e-12,
                         'fieldsplit_1_pc_type': 'none'}
    err = []

    if eq_type == "linear":
        for i, mesh_num in enumerate([8, 16]):
            err.append(linear_poisson_mixed(solver_parameters, mesh_num, porder))
    elif eq_type == "nonlinear":
        for i, mesh_num in enumerate([8, 16]):
            err.append(nonlinear_poisson_mixed(solver_parameters, mesh_num, porder))

    assert(abs(math.log2(err[0][0]) - math.log2(err[1][0]) - (porder+1)) < 0.03)
예제 #11
0
    def test_tag2(self):
        tagset = {"D", "N", "V"}
        trans = {
            ("<s>", "<s>"): {"D": 1.0},
            ("<s>", "D"): {"N": 1.0},
            ("D", "N"): {"V": 0.8, "N": 0.2},
            ("N", "N"): {"V": 1.0},
            ("N", "V"): {"</s>": 1.0},
        }
        out = {"D": {"the": 1.0}, "N": {"dog": 0.4, "barks": 0.6}, "V": {"dog": 0.1, "barks": 0.9}}
        hmm = HMM(3, tagset, trans, out)
        tagger = ViterbiTagger(hmm)

        x = "the dog barks".split()
        y = tagger.tag(x)

        pi = {
            0: {("<s>", "<s>"): (log2(1.0), [])},
            1: {("<s>", "D"): (log2(1.0), ["D"])},
            2: {("D", "N"): (log2(0.4), ["D", "N"])},
            3: {
                ("N", "V"): (log2(0.8 * 0.4 * 0.9), ["D", "N", "V"]),
                ("N", "N"): (log2(0.2 * 0.4 * 0.6), ["D", "N", "N"]),
            },
        }
        self.assertEqualPi(tagger._pi, pi)

        self.assertEqual(y, "D N V".split())
예제 #12
0
def ndcg(run, qrels, detailed = False):
    """
    Computes NDCG using the formula
    DCG_p = rel_1 + \sum_{i = 2}^p( rel_i / log_2(i) )
    Where p is the number of entries of the given run for
    a certain topic, and rel_i is the relevance score of the
    document at rank i.
    """
    details = {}
    avg = 0
    for topicId, entryList in run.entries.items():
        relevancesByRank = qrels.getRelevanceScores( topicId, [doc for (doc, _, _) in entryList] )
        sumdcg = relevancesByRank[0] + sum( [ relScore / math.log2(rank)
                                              for rank, relScore in enumerate(relevancesByRank[1:], start=2)] )
        #sumdcg = sum( [ (2**relScore - 1) / math.log2(rank+1)
        #                for rank, relScore in enumerate(relevancesByRank, start=1)] )
        relevancesByRank.sort(reverse = True) # sort the relevance list descending order
        sumIdcg = relevancesByRank[0] + sum( [ relScore / math.log2(rank)
                                               for rank, relScore in enumerate(relevancesByRank[1:], start=2)] )
        #sumIdcg = sum( [ (2**relScore - 1) / math.log2(rank+1)
        #                   for rank, relScore in enumerate(relevancesByRank, start=1)] )
        if sumIdcg == 0: print(topicId, relevancesByRank)
        details[topicId] = sumdcg / sumIdcg
        avg += sumdcg / sumIdcg
    numtopics = qrels.getNTopics()
    return avg / numtopics if not detailed else (avg / numtopics, details)
예제 #13
0
def clog(pagerank):
    vector = list(sorted(pagerank, reverse=True))
    k = [math.log2(i) for i in range(1, len(vector) + 1)]
    y = [math.log2(i) for i in vector]
    A = np.vstack([k, np.ones(len(k))]).T
    m, c = np.linalg.lstsq(A, y)[0]
    return m
예제 #14
0
	def __init__(self, Loader, Encoder, Parser, freq_threshold, vectors, candidates):
	
		print("Initializing MDL Learner for this round (loading data).")
		
		#Initialize
		self.language = Encoder.language
		self.Encoder = Encoder
		self.Loader = Loader
		self.Parser = Parser
		self.freq_threshold = freq_threshold
		self.tabu_start = False
		
		#Get fixed units costs per representation type
		self.type_cost = -math.log2(float(1.0/3.0))
		
		number_of_words = len(list(self.Encoder.word_dict.keys()))
		self.lex_cost = -math.log2(float(1.0/number_of_words))
		
		number_of_pos = len(list(self.Encoder.pos_dict.keys()))
		self.pos_cost = -math.log2(float(1.0/number_of_pos))
		
		number_of_domains = len(list(set(self.Encoder.domain_dict.values())))
		self.domain_cost = -math.log2(float(1.0/number_of_domains))
		
		#Load candidate constructions to use as grammar
		self.vectors = vectors
	
		#Reformat candidate to be equal length for numba
		self.candidates = self.Parser.format_grammar(candidates)
def test_EquationBC_mixedpoisson_matrix_fieldsplit(eq_type, mat_type, porder):

    # Mixed poisson with EquationBCs
    # aij with fieldsplit pc

    solver_parameters = {"mat_type": mat_type,
                         "ksp_type": "gmres",
                         "ksp_rtol": 1.e-10,
                         "ksp_atol": 1.e-10,
                         "ksp_max_it": 500000,
                         "pc_type": "fieldsplit",
                         "pc_fieldsplit_type": "schur",
                         "pc_fieldsplit_schur_fact_type": "full",
                         "fieldsplit_0_ksp_type": "gmres",
                         "fieldsplit_0_pc_type": "asm",
                         "fieldsplit_0_ksp_rtol": 1.e-12,
                         "fieldsplit_1_ksp_type": "gmres",
                         "fieldsplit_1_ksp_rtol": 1.e-12,
                         "fieldsplit_1_pc_type": "none"}
    err = []

    if eq_type == "linear":
        for i, mesh_num in enumerate([8, 16]):
            err.append(linear_poisson_mixed(solver_parameters, mesh_num, porder))
    elif eq_type == "nonlinear":
        for i, mesh_num in enumerate([8, 16]):
            err.append(nonlinear_poisson_mixed(solver_parameters, mesh_num, porder))

    assert(abs(math.log2(err[0][0]) - math.log2(err[1][0]) - (porder+1)) < 0.03)
def test_EquationBC_poisson_matfree(eq_type, mat_type, porder, with_bbc):

    # Test standard poisson with EquationBCs
    # matfree

    solver_parameters = {'mat_type': mat_type,
                         'ksp_type': 'gmres',
                         'ksp_atol': 1e-10,
                         'ksp_rtol': 1e-10,
                         'ksp_max_it': 200000,
                         'ksp_divtol': 1e8}
    err = []

    if with_bbc:
        if eq_type == "linear":
            for mesh_num in [8, 16]:
                err.append(linear_poisson_bbc(solver_parameters, mesh_num, porder))
        elif eq_type == "nonlinear":
            for mesh_num in [8, 16]:
                err.append(nonlinear_poisson_bbc(solver_parameters, mesh_num, porder))
    else:
        if eq_type == "linear":
            for mesh_num in [8, 16]:
                err.append(linear_poisson(solver_parameters, mesh_num, porder))
        elif eq_type == "nonlinear":
            for mesh_num in [8, 16]:
                err.append(nonlinear_poisson(solver_parameters, mesh_num, porder))

    assert(abs(math.log2(err[0]) - math.log2(err[1]) - (porder+1)) < 0.01)
    def test_effective_window_size(self):
        log_window_sizes = [math.log2(z) for z in self.window_sizes]
        plot = PointPlot()
        plot.new_plot("Effective Window Size", rows=1, num_curves=self.num_estimations+1)

        avg_err_bayes = self.get_errors(self.num_estimations)

        for i in range (0,len(self.window_sizes)):
            for k in range (0, self.num_estimations+1):
                self.print_values(k, self.window_sizes[i], avg_err_bayes[i][k], avg_err_bayes[i][0])

        for k in range(0, len(avg_err_bayes[0])): # which is numestimations+1
            k_array = avg_err_bayes[:,k]

            log_k_array =  [math.log2(y) for y in k_array]
            if k == 0:
                plot.add_data_to_plot(log_k_array,log_window_sizes,label = "Naive ("+str(k)+" Shifts)")
            else:
                plot.add_data_to_plot(log_k_array, log_window_sizes, label=str(k)+" Shifts")

        """naive = avg_err_bayes[0]
        avg_err_naive = [naive]* len(avg_err_bayes)


        plot.add_to_plot()"""

        plot.create_legend()
        plot.save_plot("effective_window_size_plot")
예제 #18
0
파일: forms.py 프로젝트: czlee/tabbycat
    def save(self):
        presets = list(all_presets())
        t = self.instance

        # Identify + apply selected preset
        selected_index = self.cleaned_data["preset_rules"]
        selected_preset = next(p for p in presets if p.name == selected_index)
        selected_preferences = get_preferences_data(selected_preset, t)
        for preference in selected_preferences:
            t.preferences[preference['key']] = preference['new_value']

        # Apply public info presets
        do_public = self.cleaned_data["public_info"]
        public_preset = next((p for p in presets if p.name == do_public), False)
        if public_preset:
            public_preferences = get_preferences_data(public_preset, t)
            for preference in public_preferences:
                t.preferences[preference['key']] = preference['new_value']

        # Apply the credits
        if self.cleaned_data['tournament_staff'] != self.fields['tournament_staff'].initial:
            t.preferences["public_features__tournament_staff"] = self.cleaned_data["tournament_staff"]

        # Create break rounds (need to do so after we know teams-per-room)
        open_break = BreakCategory.objects.filter(tournament=t, is_general=True).first()
        # Check there aren't already break rounds (i.e. when importing demos)
        if open_break and not t.break_rounds().exists():
            if t.pref('teams_in_debate') == 'bp':
                num_break_rounds = math.ceil(math.log2(open_break.break_size / 2))
            else:
                num_break_rounds = math.ceil(math.log2(open_break.break_size))
            auto_make_break_rounds(t, num_break_rounds, open_break)
def computeHash(inputFile):
    # Initialize a list for storing each transaction from the file
    try:
        transactionsList = open(inputFile, 'rt').read().split('\n')
    except FileNotFoundError:
        print("The file cannot be found. Please enter a valid name.")
        return

    # If there's a newline character at the end, account for it
    if len(transactionsList[len(transactionsList) - 1]) == 0:
        transactionsList = transactionsList[:len(transactionsList) - 1]

    nextLogOfTwo = math.log2(len(transactionsList))

    # If the number of transactions in the list is not a power of 2, then append the string 'null' into it until it is
    if not nextLogOfTwo.is_integer():
        # Find what the next log of two is
        nextLogOfTwo = math.ceil(math.log2(len(transactionsList)))
        targetNumOfList = int(math.pow(2, nextLogOfTwo))

        # And append 'null'
        for i in range(0, targetNumOfList - len(transactionsList), 1):
            transactionsList.append('null')
    else:
        nextLogOfTwo = int(nextLogOfTwo)
   
    # Encode each of the items in transactionsList to their corresponding representations in bytes
    for indexOfTrans in range(0, len(transactionsList), 1):
        transactionsList[indexOfTrans] = bytes(transactionsList[indexOfTrans], 'utf-8')
 
    hashes = []
    currLevelHash = list(transactionsList)
    nextLevelHash = []

    for j in range(0, len(currLevelHash), 1):
        hashOfEachElem = hashlib.sha256()
        hashOfEachElem.update(currLevelHash[j])

        nextLevelHash.append(hashOfEachElem)
    currLevelHash = nextLevelHash

    # Now start hashing and concatenating each pair of elements up till nextLogOfTwo
    for i in range(0, nextLogOfTwo, 1):
        nextLevelHash = []
        for j in range(0, len(currLevelHash) - 1, 2):
            hashOfFirstElem = currLevelHash[j].hexdigest()
            hashOfSecondElem = currLevelHash[j+1].hexdigest()

            bothElemsConcatenated = hashOfFirstElem + hashOfSecondElem
            hashOfBothElems = hashlib.sha256()
            hashOfBothElems.update(bytes(bothElemsConcatenated, 'utf-8'))

            nextLevelHash.append(hashOfBothElems)
        currLevelHash = nextLevelHash

    # Set hashes to be equal to currLevelHash
    hashes = currLevelHash

    # And return the hexdigest of the root hash
    return hashes[0].hexdigest()
예제 #20
0
파일: instruction.py 프로젝트: mgard/epater
def immediateToBytecode(imm):
    """
    The immediate operand rotate field is a 4 bit unsigned integer which specifies a shift
    operation on the 8 bit immediate value. This value is zero extended to 32 bits, and then
    subject to a rotate right by twice the value in the rotate field. (ARM datasheet, 4.5.3)
    :param imm:
    :return:
    """
    if imm == 0:
        return 0, 0, False
    if imm < 0:
        val, rot, _ = immediateToBytecode(~imm)
        return val, rot, True

    mostSignificantOne = int(math.log2(imm))
    leastSignificantOne = int(math.log2((1 + (imm ^ (imm - 1))) >> 1))
    if mostSignificantOne < 8:
        # Are we already able to fit the value in the immediate field?
        return imm & 0xFF, 0, False
    elif mostSignificantOne - leastSignificantOne < 8:
        # Does it fit in 8 bits?
        # If so, we want to put the MSB to the utmost left possible in 8 bits
        # Remember that we can only do an EVEN number of right rotations
        if mostSignificantOne % 2 == 0:
            val = imm >> (mostSignificantOne - 6)
            rot = (32 - (mostSignificantOne - 6)) // 2
        else:
            val = imm >> (mostSignificantOne - 7)
            rot = (32 - (mostSignificantOne - 7)) // 2
        return val & 0xFF, rot, False
    else:
        # It is impossible to generate the requested value
        return None
예제 #21
0
파일: entropy.py 프로젝트: Futrell/cliqs
def surprisals(counts):
    if isinstance(counts, Counter):
        counts = counts.items()
    counts = list(counts)
    A = log2(sum(c for _, c in counts))
    for x, count in counts:
        yield x, -log2(count) + A
예제 #22
0
def filter_mapping_bias(genomeA, genomeB):
    '''Takes two dicts of hyrbid data mapped on each genome and
    returns a single dict of genomeA data with the biased genes
    removed. GenomeA is ideally the genome with the best assembly.'''

    unbiased = {}
    genesA = list(hybridA.keys())
    genesB = list(hybridB.keys())
    genes = list(set(genesA).intersection(genesB))
    genes.sort()

    for gene in genes:
        
        expAA = genomeA[gene][0]
        expAB = genomeA[gene][1]
        expBA = genomeB[gene][0]
        expBB = genomeB[gene][1]

        # Minimum cutoff of 20 reads per gene, half or more replicates significant
        if genomeA[gene][2] + genomeA[gene][3] > 20 and genomeA[gene][4] >= 0.5:

            valueA = log2(expAA/expAB)
            valueB = log2(expBA/expBB)

            # Cutoff for mapping bias (could play around with this)
            if abs(valueA-valueB) < 1.5:
                unbiased[gene] = valueA

            else: 
                unbiased[gene] = "NA"

        else: 
            unbiased[gene] = "NA"

    return(unbiased)
예제 #23
0
def getCount(i):


    number = i
    count = 0
    while True:
        if number in computed:
            return computed[number]

        if number % 2 == 0:
            lognumber = math.log2(number)
            intlognumber = int(lognumber)
            if lognumber == intlognumber:
                count += lognumber + 1
                break
            else:
                divide = math.pow(2, int(lognumber))
                while number % divide != 0:
                    divide /= 2
                number = (number / divide)*3+1
                count += int(math.log2(divide))+1
        else:
            number = number * 3 + 1
            count += 1
    computed[i] = count
    return count
예제 #24
0
def compute_mutual_info(N00, N01, N11, N10):

    if N11 <= 0:
        return 0.

    if N10 <= 0:
        return 0.

    N0x = N01 + N00
    Nx0 = N10 + N00
    N1x = N10 + N11
    Nx1 = N01 + N11
    N = N00 + N01 + N11 + N10

    #print(N00, N01, N10, N11)

    term1 = (N*N11)/(N1x*Nx1)
    term2 = (N*N01)/(N0x*Nx1)
    term3 = (N*N10)/(N1x*Nx0)
    term4 = (N*N00)/(N0x*Nx0)

    w1 = N11 / N
    w2 = N01 / N
    w3 = N10 / N
    w4 = N00 / N

    score = w1 * math.log2(term1) + w2 * math.log2(term2) + w3 * math.log2(term3) + w4 * math.log2(term4)
    return score
예제 #25
0
    def __create_tournament_tree(self):
        '''
        Creates list for every rounds. Connects every list item between other
        items, that connections makes tournament tree.

        @return: list of interconnected list items
        '''
        tournament_rounds = []
        # create lists for every round
        for i in range(int(math.log2(self.competitors_count))):
            round_list = self._init_round_list(i)
            tournament_rounds.append(round_list)
        # make interconnections between rounds - tournament tree
        for i in range(int(math.log2(self.competitors_count - 1))):
            if len(tournament_rounds[- 1 - i]) > 1:
                for j in range(len(tournament_rounds[- 1 - i]) // 2):
                    k = (2 * j)
                    tournament_rounds[- 1 - i][k].next_round = \
                        tournament_rounds[- 1 - i - 1][j]
                    tournament_rounds[- 1 - i][k + 1].next_round = \
                        tournament_rounds[- 1 - i - 1][j]
                    tournament_rounds[- 1 - i - 1][j].previous_match1 = \
                        tournament_rounds[- 1 - i][k]
                    tournament_rounds[- 1 - i - 1][j].previous_match2 = \
                        tournament_rounds[- 1 - i][k + 1]
        # set current round variable to index for the first round
        self.__current_round = len(tournament_rounds) - 1
        # return all rounds
        return tournament_rounds
예제 #26
0
파일: 4413_2.py 프로젝트: Se7ge/csc
def qsort_based_counter(a, b, x):
    len_x = len(x)
    result = [0] * len_x
    checked = {}
    len_a = len(a)
    len_b = len(b)
    if not len_a:
        return result
    qsort(a, 0, len_a, math.floor(math.log2(len_a)))
    qsort(b, 0, len_b, math.floor(math.log2(len_b)))
    print(a)
    print(b)
    # a = sorted(a)
    # b = sorted(b)
    for i in range(0, len(x)):
        if x[i] < a[0]:
            continue
        if x[i] in checked:
            result[i] = result[checked[x[i]]]
        else:
            a_idx = bisect.bisect_right(a, x[i])
            b_idx = bisect.bisect_left(b, x[i])
            result[i] = a_idx - b_idx
        checked[x[i]] = i
    return result
def GetSampleLincData(sample, linc_exp):
	""" 
	Get the data for each linc for that sample. Get log2 fold change for FPKM compared to average and median of all samples for the linc.

	Args:
		sample = Sample from the input file name.
		linc_exp = Name of file containing the expression data for each linc in every sample.

	Returns:
		linc_dict = Dict containing signal of every SE position for the sample {(chr, (start, stop)): ((linc_id, linc_name), signal)}
	"""

	# Dict to hold data.
	linc_dict = {}

	with open(linc_exp) as f:

		# Get the sample index.
		header = f.readline().strip()
		sample_idx = GetSampleIdx(header, sample)

		for line in f:
			line = line.strip().split("\t")
			data = [float(x) for x in line[5:]]  # Convert all data to floats.
			linc_med = log2(float(median(data)))  # Get log2 median of list.
			linc_avg = log2(float(mean(data)))  # Get log2 average of the list.
			linc_val = log2(float(line[sample_idx]))  # Get log2 of the linc FPKM for the sample.
			linc_med_FC = linc_val - linc_med
			linc_avg_FC = linc_val - linc_avg

			# Grab data and add to the dict.
			chrom, start, stop, linc_id, linc_name  = line[0], int(line[1]), int(line[2]), line[3], line[4]
			linc_dict[(chrom, (start, stop))] = ((linc_id, linc_name), (linc_med_FC, linc_avg_FC))

	return linc_dict
예제 #28
0
    def I(self, term, cluster):
        n = len(self.docVector)
        n00 = n10 = n11 = n01 = 0

        for id in self.docVector:
            if self.docCluster[id] == cluster:
                if term in self.docVector[id].dict.keys():
                    n11 += 1
                else:
                    n01 += 1
            else:
                if term in self.docVector[id].dict.keys():
                    n10 += 1
                else:
                    n00 += 1
        n1_ = n10 + n11
        n_1 = n01 + n11
        n0_ = n00 + n01
        n_0 = n00 + n10
        # #print('cluster : '+cluster.__str__())
        # #print('n00 = ',n00)
        # #print('n01 = ', n01)
        # #print('n10 = ',n10)
        # #print('n11 = ', n11)
        a1 =  n11 / n * log2(n * n11 / (n1_ * n_1)) if n11 != 0 else 0
        a2 = n01 / n * log2(n * n01 / (n0_ * n_1)) if n01 != 0 else 0
        a3 = n10 / n * log2(n * n10 / (n1_ * n_0)) if n10 != 0 else 0
        a4 = n00 / n * log2(n * n00 / (n0_ * n_0)) if n00 != 0 else 0
        return a1 +a2  + a3 + a4
예제 #29
0
 def log_value(x):
     import math
     import numpy as np
     if type(x) == list or type(x) == np.ndarray:
         return [math.log2(x_i) for x_i in x if x_i != 0]
     else:
         return math.log2(x)
def channelModel( candidate_object ):
    partitionProbData = [ ]

    for partition in candidate_object[ 'partitions' ]:


        partitionProbData.append( HMMmodel.log_probability( partition,
                                                        transitions_weight = globalModelParameters.TransitionWeight ) )


    partitionProbData.sort(key=lambda arg: arg['HMMtotal'],reverse=True)

    TopPartitionsProbData=partitionProbData[:globalModelParameters.NUM_PARTITIONS]



    candidate_object.pop( 'partitions' )

    candidate_object[ 'totalProb' ] = 0

    candidate_object[ 'channelProb' ] = round( math.log2(sum(map(lambda arg: 2**arg[ 'HMMtotal' ],TopPartitionsProbData),0)) ,
    3 )
    candidate_object[ 'langProb' ] = 0

    candidate_object[ 'HMMchannel' ] = round( math.log2(sum(map(lambda arg: 2**arg[ 'HMMchannel' ],TopPartitionsProbData),0)) ,
                                              3 )

    candidate_object[ 'HMMsource' ] = round( math.log2(sum(map(lambda arg: 2**arg[ 'HMMsource' ],TopPartitionsProbData),0)) , 3 )

    candidate_object[ 'maxPartition' ] = TopPartitionsProbData[0]['sequence']

    candidate_object['topPartitionsDict'] = TopPartitionsProbData

    return candidate_object
예제 #31
0
파일: loh.py 프로젝트: pzweuj/DNApipeline
    def lohhla(self):
        sampleID = self.sample
        pairID = self.pair
        resultsDir = self.output

        tmpDir = resultsDir + "/tempFile/LOH_" + sampleID
        mkdir(tmpDir)

        # 对HLA结果进行提取
        self.extractHLAResults()
        # 肿瘤纯度与倍性评估
        self.TempPurityPloidy()

        # HLAfasta来源
        # https://github.com/jason-weirather/hla-polysolver/blob/master/data/abc_complete.fasta
        HLAloc = "/home/bioinfo/ubuntu/software/LOHHLA"
        HLAfasta = "/home/bioinfo/ubuntu/software/LOHHLA/data/abc_complete.fasta"
        HLAexon = "/home/bioinfo/ubuntu/software/LOHHLA/data/hla.dat"
        cmd = """
            Rscript {HLAloc}/LOHHLAscript.R \\
                --patientId {sampleID} \\
                --outputDir {tmpDir} \\
                --normalBAMfile {resultsDir}/bam/{pairID}.bam \\
                --tumorBAMfile {resultsDir}/bam/{sampleID}.bam \\
                --BAMDir {resultsDir}/bam \\
                --hlaPath {tmpDir}/{sampleID}.hlas \\
                --HLAfastaLoc {HLAfasta} \\
                --CopyNumLoc {tmpDir}/{sampleID}.solutions.txt \\
                --mappingStep TRUE \\
                --minCoverageFilter 10 \\
                --fishingStep TRUE \\
                --cleanUp FALSE \\
                --gatkDir /home/bioinfo/ubuntu/software/picard \\
                --novoDir /home/bioinfo/ubuntu/software/novocraft \\
                --LOHHLA_loc {HLAloc} \\
                --HLAexonLoc {HLAexon}
        """.format(HLAloc=HLAloc,
                   sampleID=sampleID,
                   tmpDir=tmpDir,
                   resultsDir=resultsDir,
                   pairID=pairID,
                   HLAfasta=HLAfasta,
                   HLAexon=HLAexon)
        print(cmd)
        os.system(cmd)

        # results
        f_list = os.listdir(tmpDir)
        lohfile = "-"
        for f in f_list:
            if "DNA.HLAlossPrediction_" in f:
                lohfile = f
        if lohfile == "-":
            print("未找到结果")
            exit()
        else:
            lohf = open(tmpDir + "/" + lohfile, "r")
            loh_results = open(tmpDir + "/" + sampleID + ".loh.txt", "w")
            loh_results.write("HLAType\tHLACopyNumWithBAFBin\tpval\tLOHstat\n")
            for line in lohf:
                if not line.startswith("message"):
                    lines = line.split("\t")
                    HLAtype = lines[1][0:5]
                    if lines[0].startswith("homozygous_alleles"):
                        HLAtype2copyNumWithBAFBin = "-"
                        pVal = "-"
                        LOHstat = "FALSE"
                    else:
                        HLAtype2copyNumWithBAFBin = lines[28]
                        pVal = lines[33]
                        if pVal == "NA":
                            LOHstat = "-"
                        # 参考 https://github.com/pyc1216/hlaloh-pipeline/blob/master/scripts/get_result.py 获得回报结果
                        elif float(pVal) < 0.01 and float(
                                HLAtype2copyNumWithBAFBin) < math.log2(0.5):
                            LOHstat = "TRUE"
                        else:
                            LOHstat = "FALSE"
                    loh_results.write(HLAtype + "\t" +
                                      HLAtype2copyNumWithBAFBin + "\t" + pVal +
                                      "\t" + LOHstat + "\n")
            loh_results.close()
            lohf.close()
        shutil.copy(tmpDir + "/" + sampleID + ".loh.txt",
                    resultsDir + "/LOH/" + sampleID + ".loh.txt")
        print("LOH分析完成")
예제 #32
0
def information_gain(features, attribute_index, targets):
    """
    TODO: Implement me!

    Information gain is how a decision tree makes decisions on how to create
    split points in the tree. Information gain is measured in terms of entropy.
    The goal of a decision tree is to decrease entropy at each split point as much as
    possible. This function should work perfectly or your decision tree will not work
    properly.

    Information gain is a central concept in many machine learning algorithms. In
    decision trees, it captures how effective splitting the tree on a specific attribute
    will be for the goal of classifying the training data correctly. Consider
    data points S and an attribute A. S is split into two data points given binary A:

        S(A == 0) and S(A == 1)

    Together, the two subsets make up S. If A was an attribute perfectly correlated with
    the class of each data point in S, then all points in a given subset will have the
    same class. Clearly, in this case, we want something that captures that A is a good
    attribute to use in the decision tree. This something is information gain. Formally:

        IG(S,A) = H(S) - H(S|A)

    where H is information entropy. Recall that entropy captures how orderly or chaotic
    a system is. A system that is very chaotic will evenly distribute probabilities to
    all outcomes (e.g. 50% chance of class 0, 50% chance of class 1). Machine learning
    algorithms work to decrease entropy, as that is the only way to make predictions
    that are accurate on testing data. Formally, H is defined as:

        H(S) = sum_{c in (classes in S)} -p(c) * log_2 p(c)

    To elaborate: for each class in S, you compute its prior probability p(c):

        (# of elements of class c in S) / (total # of elements in S)

    Then you compute the term for this class:

        -p(c) * log_2 p(c)

    Then compute the sum across all classes. The final number is the entropy. To gain
    more intution about entropy, consider the following - what does H(S) = 0 tell you
    about S?

    Information gain is an extension of entropy. The equation for information gain
    involves comparing the entropy of the set and the entropy of the set when conditioned
    on selecting for a single attribute (e.g. S(A == 0)).

    For more details: https://en.wikipedia.org/wiki/ID3_algorithm#The_ID3_metrics

    Args:
        features (np.array): numpy array containing features for each example.
        attribute_index (int): which column of features to take when computing the
            information gain
        targets (np.array): numpy array containing labels corresponding to each example.

    Output:
        information_gain (float): information gain if the features were split on the
            attribute_index.
    """
    data = list(features[:, attribute_index])
    label = {}
    totalyesno = targets.shape[0]
    attributecount = {}
    aveInfoEntropy = 0

    for i in range(len(data)):
        if str(data[i]) in label:
            label[str(data[i])].append(i)
        else:
            label[str(data[i])] = [i]

    total_yes = 0
    total_no = 0

    for i in targets:
        if i == 0:
            total_yes += 1
        else:
            total_no += 1

    yesfrac = (total_yes / totalyesno)
    nofrac = (total_no / totalyesno)

    if yesfrac == 0 or nofrac == 0:
        entropyS = 0
    else:
        entropyS = -((yesfrac) * log2(yesfrac)) - ((nofrac) * log2(nofrac))

    entropyA = {}
    fractionA = {}

    for key in label:
        no = 0
        yes = 0

        attributecount[key] = len(label[key])

        for each in label[key]:
            if targets[each] == 0:
                no += 1
            else:
                yes += 1

        frac = [(yes) / (yes + no), (no) / (yes + no)]

        if frac[0] == 0 or frac[1] == 0:
            entro = 0
        else:
            entro = -(frac[0] * (log2(frac[0]))) - (frac[1] * (log2(frac[1])))

        entropyA[key] = entro
        fractionA[key] = frac

    for key in label:
        aveInfoEntropy += (attributecount[key] / totalyesno) * entropyA[key]

    infoGain = entropyS - aveInfoEntropy
    """print("entropy {}".format(entropyA))
    print("fraction [1, 0] format: {}".format(fractionA))
    print("ave info entropy = {}".format(aveInfoEntropy))
    print(attributecount)
    print("entropyS = {}".format(entropyS))
    print(type(infoGain))
    """
    return infoGain
예제 #33
0
    def _get_prediction(self, data, theta):
        """Make prediction on data based on each theta.

        Args:
            data (numpy.ndarray): 2-D array, NxD, N data points, each with D dimension
            theta (list[numpy.ndarray]): list of 1-D array, parameters sets for variational form

        Returns:
            Union(numpy.ndarray or [numpy.ndarray], numpy.ndarray or [numpy.ndarray]):
                list of NxK array, list of Nx1 array
        """
        circuits = []

        num_theta_sets = len(theta) // self._var_form.num_parameters
        theta_sets = np.split(theta, num_theta_sets)

        def _build_parameterized_circuits():
            var_form_support = isinstance(self._var_form, QuantumCircuit) \
                or self._var_form.support_parameterized_circuit
            feat_map_support = isinstance(self._feature_map, QuantumCircuit) \
                or self._feature_map.support_parameterized_circuit

            if var_form_support and feat_map_support and self._parameterized_circuits is None:
                parameterized_circuits = self.construct_circuit(
                    self._feature_map_params,
                    self._var_form_params,
                    measurement=not self._quantum_instance.is_statevector)
                self._parameterized_circuits = \
                    self._quantum_instance.transpile(parameterized_circuits)[0]

        _build_parameterized_circuits()
        for thet in theta_sets:
            for datum in data:
                if self._parameterized_circuits is not None:
                    curr_params = dict(zip(self._feature_map_params, datum))
                    curr_params.update(dict(zip(self._var_form_params, thet)))
                    circuit = self._parameterized_circuits.assign_parameters(
                        curr_params)
                else:
                    circuit = self.construct_circuit(
                        datum,
                        thet,
                        measurement=not self._quantum_instance.is_statevector)
                circuits.append(circuit)

        results = self._quantum_instance.execute(
            circuits, had_transpiled=self._parameterized_circuits is not None)

        circuit_id = 0
        predicted_probs = []
        predicted_labels = []
        for _ in theta_sets:
            counts = []
            for _ in data:
                if self._quantum_instance.is_statevector:
                    temp = results.get_statevector(circuit_id)
                    outcome_vector = (temp * temp.conj()).real
                    # convert outcome_vector to outcome_dict, where key
                    # is a basis state and value is the count.
                    # Note: the count can be scaled linearly, i.e.,
                    # it does not have to be an integer.
                    outcome_dict = {}
                    bitstr_size = int(math.log2(len(outcome_vector)))
                    for i, _ in enumerate(outcome_vector):
                        bitstr_i = format(i, '0' + str(bitstr_size) + 'b')
                        outcome_dict[bitstr_i] = outcome_vector[i]
                else:
                    outcome_dict = results.get_counts(circuit_id)

                counts.append(outcome_dict)
                circuit_id += 1

            probs = return_probabilities(counts, self._num_classes)
            predicted_probs.append(probs)
            predicted_labels.append(np.argmax(probs, axis=1))

        if len(predicted_probs) == 1:
            predicted_probs = predicted_probs[0]
        if len(predicted_labels) == 1:
            predicted_labels = predicted_labels[0]

        return predicted_probs, predicted_labels
예제 #34
0
if __name__ == "__main__":
    while True:
        try:
            # int a, cin >> a
            a = int(input())

        except:
            break
# 數學相關
math.ceil(x)  #上高斯
math.floor(x)  #下高斯
math.factorial(x)  #階乘
math.fabs(x)  #絕對值
math.fsum(arr)  #跟sum一樣但更精確(小數點問題)
math.gcd(x, y)  #bj4
math.exp(x)  #e^x
math.log(x, base)
math.log2(x)  #2為底
math.log10(x)  #10為底
math.sqrt(x)
math, pow(x, y)  #精確些(float型態)
math.sin(x)
# cos tan asin acos atan atan2(弧度)
# sinh cosh tanh acosh asinh atanh
math.hypot(x, y)  #歐幾里德範數
math.degrees(x)  #x從弧度轉角度
math.radians(x)  #x從角度轉弧度
math.gamma(x)  #x的gamma函數
math.pi  #常數
math.e  #常數
math.inf
예제 #35
0
def get_axial_shape(x):
    "Simple heuristic to suggest axial_shape givem max_seq_len (2 factors)"
    return (2**math.ceil(math.log2(x**0.5)), 2**math.floor(math.log2(x**0.5)))
예제 #36
0
                current_dict["R_unknown_images"] = len(
                    images_list_unknown) / len(images_list)
                current_dict["N_classes"] = len(uni_all)
                current_dict["N_known_classes"] = len(uni_known)
                current_dict["N_unknown_classes"] = len(uni_unknown)

                N_images = len(images_list)
                current_count = {w: 0 for w in uni_all}
                for A in images_list_all:
                    w = A.split("/")[-2]
                    current_count[w] += 1
                H = 0
                for c in current_count.values():
                    if c > 0:
                        p = c / N_images
                        H = H - (p * log2(p))
                current_dict["entropy"] = H
                current_dict["ranking"] = 2**H
                entropy_list.append((H, counter_storage))

                data_set = list_data_class(
                    image_list=images_list,
                    transform_supervised=image_transform_supervised)
                data_loader = DataLoader(dataset=data_set,
                                         batch_size=batch_size,
                                         shuffle=False,
                                         num_workers=n_cpu)
                N_data = data_set.__len__()

                for batch_id, x_supervised in enumerate(data_loader, 0):
                    assert batch_id == 0
예제 #37
0
파일: UVa1590.py 프로젝트: sjsakib/psbook
"""
from math import log2


def to_decimal(ip):
    return sum([int(b) << ((3 - i) * 8) for i, b in enumerate(ip.split('.'))])


def to_ip(n):
    return '.'.join([str(n >> (i * 8) & 255) for i in range(3, -1, -1)])


while True:
    try:
        n = int(input())
    except EOFError:
        break

    first = to_decimal(input())
    diff = 0

    for i in range(n - 1):
        c = to_decimal(input())
        diff |= (first ^ c)
    try:
        mask = ((1 << int(log2(diff) + 1)) - 1) ^ -1
    except ValueError:
        mask = ~diff
    print(to_ip(first & mask))
    print(to_ip(mask))
예제 #38
0
파일: div_game.py 프로젝트: AkiOhtani/ALGO
from queue import deque
from math import log2

MAX_NUM = 10**16

N = int(input())

primenum = []

for i in range(2, int(log2(N)) + 1):
    for n in primenum:
        if i % n == 0:
            break
    else:
        primenum.append(i)

kouho_num = set()

for n in primenum:
    for i in range(1, int(log2(MAX_NUM)) + 1):
        if n**i <= N:
            kouho_num.add(n**i)
        else:
            break

queue = deque([(N, 0, kouho_num)])

res = 0

while queue:
    divnum, countnum, pnum = queue.popleft()
예제 #39
0
    def __init__(self, hres=800, vres=600, with_csi_interpreter=True):
        self.enable = Signal(reset=1)
        self.vtg_sink = vtg_sink = stream.Endpoint(video_timing_layout)
        self.uart_sink = uart_sink = stream.Endpoint([("data", 8)])
        self.source = source = stream.Endpoint(video_data_layout)

        # # #

        csi_width = 8 if with_csi_interpreter else 0

        # Font Mem.
        # ---------
        os.system(
            "wget https://github.com/enjoy-digital/litex/files/6076336/ter-u16b.txt"
        )  # FIXME: Store Font in LiteX?
        os.system("mv ter-u16b.txt ter-u16b.bdf")
        font = import_bdf_font("ter-u16b.bdf")
        font_width = 8
        font_heigth = 16
        font_mem = Memory(width=8, depth=4096, init=font)
        font_rdport = font_mem.get_port(has_re=True)
        self.specials += font_mem, font_rdport

        # Terminal Mem.
        # -------------
        term_colums = 128  # 80 rounded to next power of two.
        term_lines = math.floor(vres / font_heigth)
        term_depth = term_colums * term_lines
        term_init = [ord(c) for c in [" "] * term_colums * term_lines]
        term_mem = Memory(width=font_width + csi_width,
                          depth=term_depth,
                          init=term_init)
        term_wrport = term_mem.get_port(write_capable=True)
        term_rdport = term_mem.get_port(has_re=True)
        self.specials += term_mem, term_wrport, term_rdport

        # UART Terminal Fill.
        # -------------------

        # Optional CSI Interpreter.
        if with_csi_interpreter:
            self.submodules.csi_interpreter = CSIInterpreter()
            self.comb += uart_sink.connect(self.csi_interpreter.sink)
            uart_sink = self.csi_interpreter.source
            self.comb += term_wrport.dat_w[font_width:].eq(
                self.csi_interpreter.color)

        self.submodules.uart_fifo = stream.SyncFIFO([("data", 8)], 8)
        self.comb += uart_sink.connect(self.uart_fifo.sink)
        uart_sink = self.uart_fifo.source

        # UART Reception and Terminal Fill.
        x_term = term_wrport.adr[:7]
        y_term = term_wrport.adr[7:]
        y_term_rollover = Signal()
        self.submodules.uart_fsm = uart_fsm = FSM(reset_state="RESET")
        uart_fsm.act("RESET", NextValue(x_term, 0), NextValue(y_term, 0),
                     NextState("CLEAR-XY"))
        uart_fsm.act(
            "CLEAR-XY", term_wrport.we.eq(1),
            term_wrport.dat_w[:font_width].eq(ord(" ")),
            NextValue(x_term, x_term + 1),
            If(
                x_term == (term_colums - 1), NextValue(x_term, 0),
                NextValue(y_term, y_term + 1),
                If(y_term == (term_lines - 1), NextValue(y_term, 0),
                   NextState("IDLE"))))
        uart_fsm.act(
            "IDLE",
            If(
                uart_sink.valid,
                If(
                    uart_sink.data == ord("\n"),
                    uart_sink.ready.eq(1),  # Ack sink.
                    NextState("INCR-Y")).Elif(
                        uart_sink.data == ord("\r"),
                        uart_sink.ready.eq(1),  # Ack sink.
                        NextState("RST-X")).Else(NextState("WRITE"))))
        uart_fsm.act("WRITE", uart_sink.ready.eq(1), term_wrport.we.eq(1),
                     term_wrport.dat_w[:font_width].eq(uart_sink.data),
                     NextState("INCR-X"))
        uart_fsm.act("RST-X", NextValue(x_term, 0), NextState("CLEAR-X"))
        uart_fsm.act(
            "INCR-X", NextValue(x_term, x_term + 1), NextState("IDLE"),
            If(x_term == (80 - 1), NextValue(x_term, 0), NextState("INCR-Y")))
        uart_fsm.act("RST-Y", NextValue(y_term, 0), NextState("CLEAR-X"))
        uart_fsm.act(
            "INCR-Y", NextValue(y_term, y_term + 1), NextState("CLEAR-X"),
            If(y_term == (term_lines - 1), NextValue(y_term_rollover, 1),
               NextState("RST-Y")))
        uart_fsm.act(
            "CLEAR-X", NextValue(x_term, x_term + 1), term_wrport.we.eq(1),
            term_wrport.dat_w[:font_width].eq(ord(" ")),
            If(x_term == (term_colums - 1), NextValue(x_term, 0),
               NextState("IDLE")))

        # Video Generation.
        # -----------------
        ce = (vtg_sink.valid & vtg_sink.ready)

        # Timing delay line.
        latency = 2
        timing_bufs = [
            stream.Buffer(video_timing_layout) for i in range(latency)
        ]
        self.comb += vtg_sink.connect(timing_bufs[0].sink)
        for i in range(len(timing_bufs) - 1):
            self.comb += timing_bufs[i].source.connect(timing_bufs[i + 1].sink)
        self.comb += timing_bufs[-1].source.connect(
            source, keep={"valid", "ready", "last", "de", "hsync", "vsync"})
        self.submodules += timing_bufs

        # Compute X/Y position.
        x = vtg_sink.hcount[int(math.log2(font_width)):]
        y = vtg_sink.vcount[int(math.log2(font_heigth)):]
        y_rollover = Signal(8)
        self.comb += [
            If(~y_term_rollover, y_rollover.eq(y)).Else(
                # FIXME: Use Modulo.
                If((y + y_term + 1) >= term_lines,
                   y_rollover.eq(y + y_term + 1 - term_lines)).Else(
                       y_rollover.eq(y + y_term + 1)), )
        ]

        # Get character from Terminal Mem.
        term_dat_r = Signal(font_width)
        self.comb += term_rdport.re.eq(ce)
        self.comb += term_rdport.adr.eq(x + y_rollover * term_colums)
        self.comb += [
            term_dat_r.eq(term_rdport.dat_r[:font_width]),
            If(
                (x >= 80) | (y >= term_lines),
                term_dat_r.eq(ord(" ")),  # Out of range, generate space.
            )
        ]

        # Translate character to video data through Font Mem.
        self.comb += font_rdport.re.eq(ce)
        self.comb += font_rdport.adr.eq(term_dat_r * font_heigth +
                                        timing_bufs[0].source.vcount[:4])
        bit = Signal()
        cases = {}
        for i in range(font_width):
            cases[i] = [bit.eq(font_rdport.dat_r[font_width - 1 - i])]
        self.comb += Case(
            timing_bufs[1].source.hcount[:int(math.log2(font_width))], cases)
        # FIXME: Add Palette.
        self.comb += [
            If(
                bit,
                Case(
                    term_rdport.dat_r[font_width:], {
                        0: [Cat(source.r, source.g, source.b).eq(0xffffff)],
                        1: [Cat(source.r, source.g, source.b).eq(0x34e289)],
                    })).Else(Cat(source.r, source.g, source.b).eq(0x000000), )
        ]
def calculate_entropy(p):
    if (p==0 or p==1):
        return 0.0
    else:
        return -1*(p*math.log2(p) + (1-p)*math.log2(1-p))
예제 #41
0
 def test_code_length_of_two_letters_is_computed_correctly(self):
     self.assertEqual(Arima().compress([5, 7]),
                      int(math.ceil(-math.log2(math.pow(1 / 256, 2)))))
예제 #42
0
    def __init__(self, bottom_up, in_features, out_channels, norm="", top_block=None, fuse_type="sum",reduction=16):
        # bottom_up = 是具备{"res2": [56*56*256], "res3":[28*28*512],"res4":[14*14*1024], "res5": [7*7*2048]}的骨干网
        # in_features = ["res2", "res3", "res4", "res5"]
        # out_channels = 256
        super(DesFPN, self).__init__()
        # 对传入的bottom_up进行断言 判断类型是否是Backbone
        assert isinstance(bottom_up, Backbone)

        # Feature map strides and channels from the bottom up network (e.g. ResNet)
        # 获取bottom_up的输出类型作为FPN的输入类型
        input_shapes = bottom_up.output_shape()
        # f in ["res2", "res3", "res4", "res5"], 获取res块的步长 in_strides = [stride2, stride3, ...]
        in_strides = [input_shapes[f].stride for f in in_features]
        # 获取["res2", "res3", "res4", "res5"]各自的channel = [256, 512, 1025, 2048]
        in_channels = [input_shapes[f].channels for f in in_features]
        # aug_lateral_conv = 最顶层的channel
        # aug_lateral_conv = in_channels[-1]
        # 验证strides
        _assert_strides_are_log2_contiguous(in_strides)
        # 横向卷积层
        lateral_convs = []
        # 输出卷积层
        output_convs = []
        across_convs = []
        senets_convs = []
        use_bias = norm == ""
        for idx, in_channel in enumerate(in_channels):
            lateral_norm = get_norm(norm, out_channels)
            output_norm = get_norm(norm, out_channels)
            across_norm = get_norm(norm, out_channels)
            # 设置横向卷积层
            lateral_conv = Conv2d(
                in_channel, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
            )
            across_conv = Conv2d(
                in_channel, out_channels, kernel_size=1, bias=use_bias, norm=lateral_norm
            )
            senet_conv = SELayer(in_channel, out_channels)
            # 设置输出卷积层
            output_conv = Conv2d(
                out_channels,
                out_channels,
                kernel_size=3,
                stride=1,
                padding=1,
                bias=use_bias,
                norm=output_norm,
            )
            # 卷积层权重初始化参数
            weight_init.c2_xavier_fill(lateral_conv)
            weight_init.c2_xavier_fill(output_conv)
            weight_init.c2_xavier_fill(across_conv)
            stage = int(math.log2(in_strides[idx]))

            # 在模型中添加上配置信息
            self.add_module("fpn_lateral{}".format(stage), lateral_conv)
            self.add_module("fpn_output{}".format(stage), output_conv)
            self.add_module("fpn_across{}".format(stage), across_conv)
            # 将卷积层添加到list中方便计算
            lateral_convs.append(lateral_conv)
            output_convs.append(output_conv)
            across_convs.append(across_conv)
            senets_convs.append(senet_conv)
        # senet
        self.conv1x1 = nn.Conv2d(2048, 256, kernel_size=1, bias=False)
        self.avg_pool = nn.AdaptiveAvgPool2d(1)
        self.fc = nn.Sequential(
                nn.Linear(256, 256 // reduction, bias=False),
                nn.ReLU(inplace=True),
                nn.Linear(256 // reduction, 256, bias=False),
                nn.Sigmoid()
            )
        # Place convs into top-down order (from low to high resolution)
        # to make the top-down computation in forward clearer.
        # 将横向卷积逆序成 [conv(2048*256)...conv(256, 256)]
        self.lateral_convs = lateral_convs[::-1]
        # 将输出卷积逆序 conv(256, 256) ...
        self.output_convs = output_convs[::-1]
        self.across_convs = across_convs[::-1]
        self.senets_convs = senets_convs[::-1]
        # top_block = None
        self.top_block = top_block
        # self.in_features = ["res2", "res3", "res4", "res5"]
        self.in_features = in_features
        # self.bottom_up = Backbone
        self.bottom_up = bottom_up
        # Return feature names are "p<stage>", like ["p2", "p3", ..., "p6"]
        self._out_feature_strides = {"p{}".format(int(math.log2(s))): s for s in in_strides}
        # top block output feature maps.
        if self.top_block is not None:
            for s in range(stage, stage + self.top_block.num_levels):
                self._out_feature_strides["p{}".format(s + 1)] = 2 ** (s + 1)
        # self._out_feature_strides.keys() = ["p2", "p3", ... "p6"]
        self._out_features = list(self._out_feature_strides.keys())
        self._out_feature_channels = {k: out_channels for k in self._out_features}
        # 顶层的步长
        self._size_divisibility = in_strides[-1]
        assert fuse_type in {"avg", "sum"}
        self._fuse_type = fuse_type
예제 #43
0
 def test_code_length_of_one_letter_is_computed_correctly(self):
     self.assertEqual(Arima().compress([5]),
                      int(math.ceil(-math.log2(1 / 256))))
def query(data, table, left, right):
    number_of_elements = right - left + 1
    end_query = math.floor(math.log2(number_of_elements))
    return min(
        data[table[left][end_query]],
        data[table[left + number_of_elements - (1 << end_query)][end_query]])
예제 #45
0
파일: src.py 프로젝트: dailydaniel/univer
 def __init__(self, array):
     self.array = [i for i in array]
     self.n = len(array)
     self.m = 1 + int(math.log2(self.n))
     self.stats = stat(array, self.m)
예제 #46
0
    def extractE(self, wf, nodelist, MList, rf=True, annotation=None, window=1, inc=None, aa=True):
        """
        Regenerates the signal from each of nodes and finds max standardized E.
        Args:
        1. wf - WaveletFunctions with a homebrew wavelet tree (list of ndarray nodes)
        2. nodelist - will reconstruct signal and run detections on each of these nodes separately
        3. MList - passed here to allow multiple Ms to be tested from one reconstruction
        4. rf - bandpass to species freq range?
        5. annotation - for calculating noise properties during training
        6-7. window, inc - window / increment length, seconds
        8. antialias - True/False
        Return: ndarrays of MxTxN energies, for each of M values, T windows and N nodes
        """

        if inc is None:
            inc = window
            resol = window
        else:
            resol = (math.gcd(int(100 * window), int(100 * inc))) / 100
        annotation = np.array(annotation)

        duration = len(wf.tree[0])

        # Window, inrement, and resolution are converted to true seconds based on the tree samplerate
        samplerate = wf.treefs
        win_sr = math.ceil(window * samplerate)
        inc_sr = math.ceil(inc * samplerate)
        resol_sr = math.ceil(resol * samplerate)

        # number of windows of length inc
        nw = int(np.ceil(duration / inc_sr))

        nodenum = 0
        maxE = np.zeros((len(MList), nw, len(nodelist)))
        for node in nodelist:
            useWCenergies = False
            # Option 1: use wavelet coef energies directly
            if useWCenergies:
                # how many samples went into one WC?
                samples_wc = 2**math.floor(math.log2(node+1))
                duration = int(duration/samples_wc)
                # put WC from test node(s) on the new tree
                C = wf.tree[node][0::2]
            # Option 2: reconstruct from the WCs, as before
            else:
                samples_wc = 1
                C = wf.reconstructWP2(node, antialias=aa, antialiasFilter=True)

            # Sanity check for all zero case
            if not any(C):
                continue

            if len(C) > duration:
                C = C[:duration]

            C = np.abs(C)
            N = len(C)

            # Compute threshold using mean & sd from non-call sections
            if annotation is not None:
                noiseSamples = np.repeat(annotation == 0, resol_sr/samples_wc)
                noiseSamples = noiseSamples[:len(C)]
            else:
                print("Warning: no annotations detected in file")
                noiseSamples = np.full(len(C), True)
            meanC = np.mean(np.log(C[noiseSamples]))
            stdC = np.std(np.log(C[noiseSamples]))

            # Compute the energy curve (a la Jinnai et al. 2012)
            # using different M values, for a single node.
            for indexM in range(len(MList)):
                # Compute the number of samples in a window -- species specific
                # Convert M to number of WCs
                M = int(MList[indexM] * win_sr/samples_wc)
                E = ce.EnergyCurve(C, M)
                # for each sliding window, find largest E
                start = 0
                for j in range(nw):
                    end = min(N, int(start + win_sr/samples_wc))
                    # NOTE: here we determine the statistic (mean/max...) for detecting calls
                    maxE[indexM, j, nodenum] = (np.log(np.mean(E[start:end])) - meanC) / stdC
                    start += int(inc_sr/samples_wc)
            nodenum += 1

        C = None
        E = None
        del C
        del E
        gc.collect()
        return maxE
예제 #47
0
from math import log2

DSIZE = 32  # data size
ASIZE = 32  # address size
RAM_DEPTH = 128  # RAM size in words
NUM_REGS = 32  # number of registers
REG_ASIZE = int(log2(NUM_REGS))  # register address size

ALU_FUN_SIZE = 4

OPCODE_SIZE = 6
OPCODE_RANGE = (32, 26)
FUNCT_SIZE = 6
FUNCT_RANGE = (6, 0)
IMMEDIATE_SIZE = 16
IMMEDIATE_RANGE = (16, 0)

RS_RANGE = (26, 21)
RT_RANGE = (21, 16)
RD_RANGE = (16, 11)

JUMP_IMM_RANGE = (26, 0)
예제 #48
0
def branch_point_differences(n, mode):
    out_dict = {}
    syllables_dict = {}
    for nest, birds_list in meta_nest_dict.items():
        nest_dict = {}
        nest_syllable_dict = {}
        pupil_IDs = birds_list[:-1]
        tutor_ID = birds_list[-1]
        for pupil_ID in pupil_IDs:
            fp1 = directory + prefix + tutor_ID + '.csv'
            fp2 = directory + prefix + pupil_ID + '.csv'
            distrib_1 = entropy.branchpoints(fp1, [2, n + 1])[n]
            distrib_2 = entropy.branchpoints(fp2, [2, n + 1])[n]
            bird1_branchpoints = []
            bird2_branchpoints = []
            for branchpoint_1 in distrib_1.keys():
                bird1_branchpoints.append(branchpoint_1)
            for branchpoint_2 in distrib_2.keys():
                bird2_branchpoints.append(branchpoint_2)
            branchpoints_to_analyze = [
                value for value in bird1_branchpoints
                if value in bird2_branchpoints
            ]
            branchpoints_dict = {}
            for branchpoint in branchpoints_to_analyze:
                differences_dict = {}
                if branchpoint in distrib_1.values():
                    count1 = distrib_1[branchpoint]['count']
                else:
                    count1 = 0
                if branchpoint in distrib_2.values():
                    count2 = distrib_1[branchpoint]['count']
                else:
                    count2 = 0
                transitions_to_analyze = list(
                    distrib_1[branchpoint]['transitions'].keys()) + list(
                        distrib_2[branchpoint]['transitions'].keys())
                for transition in transitions_to_analyze:
                    if transition not in distrib_1[branchpoint][
                            'transitions'].keys():
                        bird1_value = 0.00000001
                    else:
                        bird1_value = distrib_1[branchpoint]['transitions'][
                            transition]
                    if transition not in distrib_2[branchpoint][
                            'transitions'].keys():
                        bird2_value = 0.00000001
                    else:
                        bird2_value = distrib_2[branchpoint]['transitions'][
                            transition]
                    if mode == 'euclidean':
                        difference = abs(bird1_value - bird2_value)
                    if mode == 'dkl':
                        difference = bird1_value * math.log2(
                            bird1_value / bird2_value)
                    if mode == 'log':
                        difference = abs(
                            math.log2(bird1_value) - math.log2(bird2_value))
                    differences_dict[transition] = difference
                divergence = sum(differences_dict.values())
                branchpoints_dict[branchpoint] = {
                    'tutor_count': count1,
                    'pupil_count': count2,
                    'divergence': divergence
                }
            divergences = []
            counts = []
            for branchpoint, subdict in branchpoints_dict.items():
                divergences.append(subdict['divergence'])
            shared_branchpoints = len(branchpoints_dict.keys())
        out_dict[nest] = nest_dict
        syllables_dict[nest] = nest_syllable_dict
    matrix_version = []
    syllables_matrix_version = []
    for nest, nestdict in out_dict.items():
        for bird, birdresult in nestdict.items():
            matrix_version.append(birdresult)
    for nest, nestdict in syllables_dict.items():
        for bird, birddict in nestdict.items():
            for syl, syldict in birddict.items():
                syllables_matrix_version.append(
                    [nest, bird, syl, syldict['divergence']])
    with open("./output/bird_divergence.csv", 'w') as output_file:
        writer = csv.writer(output_file)
        for row in matrix_version:
            writer.writerow([row])
    with open("./output/syllable_divergence.csv", 'w') as output_file:
        writer = csv.writer(output_file)
        writer.writerow(['Nest', 'BirdID', 'Syllable', 'Divergence'])
        for row in syllables_matrix_version:
            writer.writerow(row)
    return [matrix_version, syllables_dict]
예제 #49
0
    def __init__(self,
                 n,
                 uni=None,
                 iso=None,
                 phys_dim=2,
                 dangle=False,
                 site_ind_id="k{}",
                 site_tag_id="I{}",
                 **tn_opts):

        # short-circuit for copying MERA
        if isinstance(n, MERA):
            super().__init__(n)
            for ep in MERA._EXTRA_PROPS:
                setattr(self, ep, getattr(n, ep))
            return

        self._site_ind_id = site_ind_id
        self._site_tag_id = site_tag_id
        self.cyclic = True

        if not is_power_of_2(n):
            raise ValueError("``n`` should be a power of 2.")

        nlayers = round(log2(n))

        if isinstance(uni, np.ndarray):
            uni = (uni, )

        if isinstance(iso, np.ndarray):
            iso = (iso, )

        unis = itertools.cycle(uni)
        isos = itertools.cycle(iso)

        def gen_mera_tensors():
            u_ind_id = site_ind_id

            for i in range(nlayers):

                # index id connecting to layer below
                l_ind_id = u_ind_id
                # index id connecting to isos to unis
                m_ind_id = rand_uuid() + "_{}"
                # index id connecting to layer above
                u_ind_id = rand_uuid() + "_{}"

                # number of tensor sites in this layer
                eff_n = n // 2**i

                for j in range(0, eff_n, 2):

                    # generate the unitary:
                    #  ul | | ur
                    #     UNI
                    #  ll | | lr
                    #     j j+1
                    ll, lr = map(l_ind_id.format, (j, (j + 1) % eff_n))
                    ul, ur = map(m_ind_id.format, (j, (j + 1) % eff_n))
                    inds = (ll, lr, ul, ur)

                    tags = {"_UNI", "_LAYER{}".format(i)}
                    if i == 0:
                        tags.add(site_tag_id.format(j))
                        tags.add(site_tag_id.format(j + 1))

                    yield Tensor(next(unis), inds, tags=tags)

                    # generate the isometry (offset by one effective site):
                    #      | ui
                    #     ISO
                    #  ll | | lr
                    #   j+1 j+2
                    ll, lr = map(m_ind_id.format, (j + 1, (j + 2) % eff_n))
                    ui = u_ind_id.format(j // 2)
                    inds = (ll, lr, ui)
                    tags = {"_ISO", "_LAYER{}".format(i)}

                    if i < nlayers - 1 or dangle:
                        yield Tensor(next(isos), inds, tags)
                    else:
                        # don't leave dangling index at top
                        yield Tensor(
                            np.eye(phys_dim, dtype=next(isos).dtype) / 2**0.5,
                            inds[:-1], tags)

        super().__init__(gen_mera_tensors(),
                         check_collisions=False,
                         structure=site_tag_id)

        # tag the MERA with the 'causal-cone' of each site
        for i in range(nlayers):
            for j in range(n):
                # get isometries in the same layer
                for t in self.select_neighbors(j):
                    if f'_LAYER{i}' in t.tags:
                        t.add_tag(f'I{j}')

                # get unitaries in layer above
                for t in self.select_neighbors(j):
                    if f'_LAYER{i + 1}' in t.tags:
                        t.add_tag(f'I{j}')
예제 #50
0
def next_power_of_2(n: int) -> int:
    return 1 << ceil(log2(n))
예제 #51
0
def bits(n: int, max: Optional[int] = None) -> Sequence[int]:
    len = math.ceil(math.log2(max)) if max is not None else 0
    return list(map(int, bin(n)[2:].zfill(len)))
예제 #52
0
파일: signal.py 프로젝트: bencoombs/lung
def retune_alpha(alpha, new_frequency, starting_frequency=1):
    return 2**(math.log2(alpha) * starting_frequency / new_frequency)
예제 #53
0
def ent_cal(data,val,f):
	cnt=0
	yes=0
	if(f==0):#greater or equal
		for i in range(9):
			if(data[i][0]>=val):
				cnt+=1
				if(data[i][3]=='Y'):
					yes+=1
	else:#less or equal
		for i in range(9):
			if(data[i][0]<=val):
				cnt+=1
				if(data[i][3]=='Y'):
					yes+=1
	print("yes=",end='')
	print(yes)
	print("cnt=",end='')
	print(cnt)
	
	if(cnt==0):
		py0=1
		pn0=1
	else:
		py0=yes/cnt
		pn0=1-py0
	
	if(yes==0):
		py0=1

	if(cnt==yes):
		pn0=1

	print("py0=",end='')
	print(py0)
	print("pn0=",end='')
	print(pn0)
	
	ent0=(py0*math.log2(py0)+pn0*math.log2(pn0))*(-1)
	

	print("ent0=",end='')
	print(ent0)
	
	cnt1=9-cnt
	yes1=6-yes
	if(cnt1==0):
		py1=1
		pn1=1
	else:
		py1=yes1/cnt1
		pn1=1-py1
	
	if(yes1==0):
		py1=1

	if(cnt1==yes1):
		pn1=1
	
	print()
	print()
	print("py1=",end='')
	print(py1)
	print("pn1=",end='')
	print(pn1)

	ent1=(py1*math.log2(py1)+pn1*math.log2(pn1))*(-1)
	
	print("ent1=",end='')
	print(ent1)
	x=cnt/10
	print("x=",end='')
	print(x)
	
	ent=(x)*ent0+(1-x)*ent1
	
	print()
	print("ent=",end='')
	print(ent)
	print("-----------------------------------------")
	print()
	
	return ent
# e**x - 1 반환
x = int(input())
print(math.expm1(x))

# x의 로그화한 밑을 밑으로 반환합니다 (기본값은 e).
x = int(input())
base = 10
print(math.log(x, base))

# 1 + x의 자연로그를 반환합니다
x = int(input())
print(math.log1p(x))

# x의 밑이 2 인 로그 값을 반환합니다
x = int(input())
print(math.log2(x))

# x의 밑이 10 인 로그 값을 반환합니다
x = int(input())
print(math.log10(x))

# x를 거듭제곱 y로 올림
x = int(input())
y = int(input())
print(math.pow(x, y))

# x의 제곱근을 반환
x = int(input())
print(math.sqrt(x))

# x의 아크 코사인을 반환합니다
예제 #55
0
import numpy as np
import math
import random

NUM_PICTURES = 20
NUM_ITERATIONS = math.ceil(NUM_PICTURES * NUM_PICTURES *
                           math.log2(NUM_PICTURES))

rank = [[[] for col in range(NUM_PICTURES)] for row in range(NUM_PICTURES)]
images = np.random.rand(NUM_PICTURES)


def compare(targetIndex, index1, index2):
    if (math.fabs(images[targetIndex] - images[index1]) <=
            math.fabs(images[targetIndex] - images[index2])):
        winner = index1
        loser = index2
    else:
        winner = index2
        loser = index1

    print("{}: {} is closer than {}".format(images[targetIndex],
                                            images[winner], images[loser]))
    return (winner, loser)


print(images)

for i in range(NUM_ITERATIONS):
    target = i % NUM_PICTURES
    index1 = np.random.randint(NUM_PICTURES)
예제 #56
0
def plog(x):
    return (-1)*math.log2(x)
예제 #57
0
 def get_entropy_from_dict(char_dict):
     res = sum(-i * math.log2(i) for i in char_dict.values())
     return res
예제 #58
0
        prog.h(input_qubit[3])  # number=20

    # circuit end

    return prog


if __name__ == '__main__':
    key = "00000"
    f = lambda rep: str(int(rep == key))
    prog = make_circuit(5, f)
    backend = BasicAer.get_backend('statevector_simulator')
    sample_shot = 7924

    info = execute(prog, backend=backend).result().get_statevector()
    qubits = round(log2(len(info)))
    info = {
        np.binary_repr(i, qubits): round(
            (info[i] * (info[i].conjugate())).real, 3)
        for i in range(2**qubits)
    }
    backend = FakeVigo()
    circuit1 = transpile(prog, backend, optimization_level=2)

    writefile = open("../data/startQiskit_Class1876.csv", "w")
    print(info, file=writefile)
    print("results end", file=writefile)
    print(circuit1.depth(), file=writefile)
    print(circuit1, file=writefile)
    writefile.close()
예제 #59
0
def infoMeasure(n, p):
    return -math.log2(prob(n, p))
예제 #60
0
파일: c10.py 프로젝트: finngaida/detectron2
    def forward(self, images, features, gt_instances=None):
        assert not self.training

        features = [features[f] for f in self.in_features]
        objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features)

        # TODO is the needed?
        # objectness_logits_pred = [t.sigmoid() for t in objectness_logits_pred]

        assert isinstance(images, ImageList)
        if self.tensor_mode:
            im_info = images.image_sizes
        else:
            im_info = torch.Tensor([[im_sz[0], im_sz[1],
                                     torch.Tensor([1.0])]
                                    for im_sz in images.image_sizes
                                    ]).to(images.tensor.device)
        assert isinstance(im_info, torch.Tensor)

        rpn_rois_list = []
        rpn_roi_probs_list = []
        for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip(
                objectness_logits_pred,
                anchor_deltas_pred,
                iter(self.anchor_generator.cell_anchors),
                self.anchor_generator.strides,
        ):
            scores = scores.detach()
            bbox_deltas = bbox_deltas.detach()

            rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals(
                scores,
                bbox_deltas,
                im_info,
                cell_anchors_tensor,
                spatial_scale=1.0 / feat_stride,
                pre_nms_topN=self.pre_nms_topk[self.training],
                post_nms_topN=self.post_nms_topk[self.training],
                nms_thresh=self.nms_thresh,
                min_size=self.min_box_side_len,
                # correct_transform_coords=True,  # deprecated argument
                angle_bound_on=True,  # Default
                angle_bound_lo=-180,
                angle_bound_hi=180,
                clip_angle_thresh=1.0,  # Default
                legacy_plus_one=False,
            )
            rpn_rois_list.append(rpn_rois)
            rpn_roi_probs_list.append(rpn_roi_probs)

        # For FPN in D2, in RPN all proposals from different levels are concated
        # together, ranked and picked by top post_nms_topk. Then in ROIPooler
        # it calculates level_assignments and calls the RoIAlign from
        # the corresponding level.

        if len(objectness_logits_pred) == 1:
            rpn_rois = rpn_rois_list[0]
            rpn_roi_probs = rpn_roi_probs_list[0]
        else:
            assert len(rpn_rois_list) == len(rpn_roi_probs_list)
            rpn_post_nms_topN = self.post_nms_topk[self.training]

            device = rpn_rois_list[0].device
            input_list = [
                to_device(x, "cpu")
                for x in (rpn_rois_list + rpn_roi_probs_list)
            ]

            # TODO remove this after confirming rpn_max_level/rpn_min_level
            # is not needed in CollectRpnProposals.
            feature_strides = list(self.anchor_generator.strides)
            rpn_min_level = int(math.log2(feature_strides[0]))
            rpn_max_level = int(math.log2(feature_strides[-1]))
            assert (
                rpn_max_level - rpn_min_level +
                1) == len(rpn_rois_list
                          ), "CollectRpnProposals requires continuous levels"

            rpn_rois = torch.ops._caffe2.CollectRpnProposals(
                input_list,
                # NOTE: in current implementation, rpn_max_level and rpn_min_level
                # are not needed, only the subtraction of two matters and it
                # can be infer from the number of inputs. Keep them now for
                # consistency.
                rpn_max_level=2 + len(rpn_rois_list) - 1,
                rpn_min_level=2,
                rpn_post_nms_topN=rpn_post_nms_topN,
            )
            rpn_rois = to_device(rpn_rois, device)
            rpn_roi_probs = []

        proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs,
                                        self.tensor_mode)
        return proposals, {}