def __init__(self, filename, _type, key=None): self.filename = filename self.type = _type self.key = key self.read = read.Read() self.write = write.Write() self.add = add.Add()
def decrypt(): print(":::informe o arquivo que pretende decodificar:::") path = receber_parametro() dadosExtraidos = read.Read(path) dado = dadosExtraidos.dado_traduzido pv = dadosExtraidos.pv_traduzido d = principal.RSA() dadoDecodificado = d.decrypt(dado, pv) print(f"DADO DECODIFICADO.................: {''.join(dadoDecodificado)}")
async def main(): writer = write.Write(filename=filename) reader = read.Read(filename=filename) local_data = lambda: data.Data.GenerateData() # await writer.WriteData(local_data) H = await asyncio.gather( writer.BatchWrite(5), reader.BatchRead(5), )
def main(): """ runs the HELIOS RT computation """ # instantiate the classes reader = read.Read() keeper = quant.Store() computer = comp.Compute() writer = write.Write() plotter = rt_plot.Plot() # read input files and do preliminary calculations reader.read_input(keeper) reader.read_opac(keeper) keeper.dimensions() reader.read_star(keeper) hsfunc.gaussian_weights(keeper) hsfunc.spec_heat_cap(keeper) hsfunc.planet_param(keeper) hsfunc.initial_temp(keeper, reader) # get ready for GPU computations keeper.convert_input_list_to_array() keeper.create_zero_arrays() keeper.copy_host_to_device() keeper.allocate_on_device() # conduct the GPU core computations computer.construct_planck_table(keeper) computer.construct_grid(keeper) computer.construct_capital_table(keeper) computer.init_spectral_flux(keeper) computer.iteration_loop(keeper, writer, plotter) computer.calculate_mean_opacities(keeper) computer.calculate_transmission(keeper) # copy everything back to host and write to files keeper.copy_device_to_host() writer.write_info(keeper, reader) writer.write_tp(keeper) writer.write_column_mass(keeper) writer.write_integrated_flux(keeper) writer.write_downward_spectral_flux(keeper) writer.write_upward_spectral_flux(keeper) writer.write_planck_interface(keeper) writer.write_planck_center(keeper) writer.write_opacities(keeper) writer.write_transmission(keeper) writer.write_mean_extinction(keeper) # prints the success message - yay! hsfunc.success_message()
def fn_buscarReemplazar(self): import read import re a = read.Read() find = a.buscar("Buscar:") replace = a.buscar("Reemplazar:") content = self.tabWidget.currentWidget().findChild(QtWidgets.QTextEdit,"textEdit").toPlainText() incidences = re.findall(find, content, flags=re.IGNORECASE) cont = re.sub(find, replace, content, flags=re.IGNORECASE) cont += '\n' self.textEdit.setPlainText(cont) self.msgBox = QtWidgets.QMessageBox() self.msgBox.setText(f"Se han reemplazado {str(len(incidences))} incidencias.") self.msgBox.setIcon(QtWidgets.QMessageBox.Information) self.msgBox.exec()
def main(): errorRate = 0.00001 allNodes = read.Read() weight = weightPath.CreateWeightPath(allNodes[0]) print(allNodes[0]) allNodes[0], result, difference = tryNetwork.Try(allNodes[0], weight, True) delta = cloneNode.CloneNodeValue(weight, 0) #first delta must 0 delta,nodeS=calculate.CalculateDelta\ (allNodes[0],weight,result,delta) print("NewNodes=", allNodes[0]) print("Results=", result) print("Differences=", difference) end = False iteration = 0 firstTime = time.time() while not end: best = True for nodeWeight in allNodes: iteration += 1 weight = train.Train(weight, delta) nodeWeight,result,difference=\ tryNetwork.Try(nodeWeight,weight,False) delta,nodeS=calculate.CalculateDelta\ (nodeWeight,weight,result,delta) for i in range(len(difference)): if difference[i] >= errorRate: best = False if best: end = True print("\n\nTraining Successed!!!\n") print("Iteration=", iteration) print("Time=", round(float(time.time() - firstTime), 4), "\n") break end = "" inputLen = len(allNodes[0][0]) - 1 outputLen = len(allNodes[0][-1]) end = input( "Please Press Enter For Trying(leave for=after 'e' press enter)") while end != 'e': for i in range(inputLen): nodeWeight[0][i] = float(input("Please Enter Input")) nodeWeight,result,difference=\ tryNetwork.Try(nodeWeight,weight,False) print("Results=", result) end = input( "Please Press Enter For Trying(leave for=after 'e' press enter)")
def parse_bam(bam, segmentID): sys.stderr.write(time.strftime("%c") + " Busy with parsing bam file...\n") with os.popen(opts_sambamba + ' view -t ' + str(opts_threads) + ' ' + bam) as bam: for line in bam: line = line.rstrip() columns = line.split("\t") cigar = parse_cigar(columns[5]) if columns[0] in reads: read = reads[columns[0]] else: read = r.Read(columns[0], (len(columns[9]) + sum(cigar['H']))) reads[columns[0]] = read if int(columns[1]) & 4 or int(columns[4]) < opts_mapq: continue segment = s.Segment(segmentID, columns[0], columns[1], columns[2], columns[3], columns[4], len(columns[9])) segment.parseCigar(cigar) if float(segment.pid) < opts_pid: continue read.addSegment(segment) segments[segmentID] = segment segmentID += 1
import read import os def tower(twr): for i in twr: print(i) def show(): os.system('clear') for i,j in enumerate(twrs): tower(j) print('Tower', i + 1) print() global limit limit = read.Read(int, 'Limit') twrs = [[], [], []] for i in range(1, limit + 1): twrs[0].append(i) win = twrs[0] show() while win not in twrs[1:]: twr_s = read.Read(int, 'Tower number to select') - 1 if not (-1 < twr_s < 3): read.error('Enter number in (1,2,3)') continue twr_d = read.Read(int, 'Destination tower number') - 1
import read num1 = read.Read(float, 'first number') num2 = read.Read(float, 'seconnd number') result = num1 + num2 print('Sum of {} & {} is {}'.format( num1, num2, result)) #printing result along with inputs
import read num = read.Read(int, 'Number') if num < 0: #show error in invalid number read.error('Invalid Number') elif num < 2: #0 & 1 are fibonacci numbers print(num, 'is a Fibonacci number') else: seed1, seed2 = 0, 1 for i in range(num): #checking if number is fibonacci if seed1 + seed2 == num: print(num, 'is a Fibonacci number') exit() else: seed1, seed2 = seed2, seed1 + seed2 print(num, 'is not a Fibonacci number')
# -*- coding: utf-8 -*- """ Created on Fri Jan 3 20:30:29 2020 @author: ZHOU_YuZHAO """ from tkinter import * import numpy as np # This module is used for matrix operation import csv import matplotlib.pyplot import read # Using an agent to read raster file readFile = read.Read() root = Tk() G, T, P = 0, 0, 0 # Using the function to get value geology = readFile.readFromFile("best.geology") transport = readFile.readFromFile("best.mway") population = readFile.readFromFile("best.pop") m = None # This is a flag, control whether highlight enable = False shape = geology.shape matplotlib.use('TkAgg')
import read print(read.Pow(read.Read(int, 'Limit'), 2))
import settings import calculate import results import CommandErrorHandler import info import read import DataWriter import DataSender guild_id = 740596566632562760 bot = commands.Bot(command_prefix='+') @bot.event async def on_ready(): await bot.change_presence(activity=discord.Game('Made by Shep and Peter!')) print(f'Logged in as: {bot.user.name}') print(f'With ID: {bot.user.id}') bot.add_cog(settings.Settings(bot)) bot.add_cog(calculate.Calculate(bot)) bot.add_cog(results.Results(bot)) #bot.add_cog(CommandErrorHandler.CommandErrorHandler(bot)) bot.add_cog(info.Info(bot)) bot.add_cog(read.Read(bot)) bot.add_cog(DataSender.DataSender(bot, guild_id)) bot.add_cog(DataWriter.DataWriter(bot)) with open('work_bot_token.txt', 'r') as f: bot.run(f.read().strip())
import read if read.prime(read.Read(int, 'Number')): #checks for prime number, bool output print('Is a Prime Number') else: print('Not a Prime Number')
def compressByBundle(self, input_name, compressed_name, intermediate_name=None): ''' Read a sorted SAM file and compress in segments determined by clusters of reads :param filename: :return: ''' # If coverage is 0 for at least this many bases end of a potential gene overlapRadius = 50 spliced_index = [] bundles = [] first = True bundle_id = 0 read_id = 0 diff_strand_unpaired_id = 0 num_diff_strand_unpaired = len(self.diff_strand_unpaired) firstR = None with open(input_name, 'r') as filehandle: id = 0 start_id = 0 for line in filehandle: # Check if header line if line[0] == '@': continue row = line.strip().split('\t') if row[2] == '*': # HISAT includes unmapped reads at the end of the file; we just skip them continue if not row[2] in self.chromosomes[0]: print('Error! Chromosome ' + str(row[2]) + ' not found!') exit() # Starting position of this read start = self.aligned.chromOffsets[row[2]] + int(row[3]) if self.aligned.gene_bounds and start > (self.aligned.gene_bounds[-1] + overlapRadius): # Compress most recent bundle self.aligned.finalizeExons() self.aligned.finalizeUnmatched() self.aligned.finalize_cross_bundle_reads() #if self.aligned.gene_bounds[0] < 100480943 and self.aligned.gene_bounds[1] > 100478955: # print(bundle_id) # print(self.aligned.gene_bounds) # print(self.aligned.exons) # print(self.aligned.gene_bounds[0] - self.aligned.chromOffsets['X']) # print(self.aligned.gene_bounds[1] - self.aligned.chromOffsets['X']) # exit() bundle_id += 1 start_id = id bundles.append(self.aligned.exons) # Write to intermediate file if intermediate_name: if first: # If it's the first bundle, write the header as well with open(intermediate_name, 'w') as f1: read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, True, False, read_id) else: with open(intermediate_name, 'a') as f1: read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, False, False, read_id) junctions, maxReadLen = self.aligned.computeBuckets() self.sortedJuncs = sorted(junctions.keys()) # Compress bundle to temporary file if first: mode = 'wb' else: mode = 'ab' with open('temp.bin', mode) as f: l = self.compressBundle(junctions, maxReadLen, f) spliced_index.append(l) # Start new bundle self.aligned.resetBundle() self.aligned.exons.add(start) first = False # Process read if row[5] == '*': # HISAT occasionally prints * as the cigar string when it is identical to its mate #print('No cigar string') #print(row[0]) #exit() exons = None else: exons = self.parseCigar(row[5], int(row[3])) # find XS (strand) and NH values strand = None NH = 1 for r in row[11 : len(row)]: if r[0:5] == 'XS:A:' or r[0:5] == 'XS:a:': strand = r[5] elif r[0:3] == 'NH:': NH = int(r[5:]) flags = int(row[1]) if flags & 4: # Read is unmapped continue r = read.Read(row[2], int(row[3]), exons, strand, NH) #r.name = row[0] if row[6] == '*' or (flags & 8): paired = False elif diff_strand_unpaired_id < num_diff_strand_unpaired and id == self.diff_strand_unpaired[diff_strand_unpaired_id]: #if not row[6] == '*': # print('\t'.join(row)) paired = False diff_strand_unpaired_id += 1 else: paired = True r.bundle = bundle_id r.pairOffset = int(row[7]) if row[6] == '=': r.pairChrom = row[2] else: r.pairChrom = row[6] self.aligned.processRead(row[0], r, paired) id += 1 # Compress final cluster self.aligned.finalizeExons() self.aligned.finalizeUnmatched() self.aligned.finalize_cross_bundle_reads() bundle_id += 1 bundles.append(self.aligned.exons) # Write to intermediate file if intermediate_name: if first: # If it's the first bundle, write the header as well with open(intermediate_name, 'w') as f1: read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, True, False, read_id) first = False else: with open(intermediate_name, 'a') as f1: read_id = self.aligned.writeSAM(f1, self.aligned.unpaired, self.aligned.paired, False, False, read_id) junctions, maxReadLen = self.aligned.computeBuckets() self.sortedJuncs = sorted(junctions.keys()) # Compress bundle to temporary file if first: mode = 'wb' else: mode = 'ab' with open('temp.bin', mode) as f: l = self.compressBundle(junctions, maxReadLen, f) spliced_index.append(l) leftovers = 0 for k,v in self.aligned.cross_bundle_reads.items(): #if len(v) > 0: # print(k) # print(v) # exit() leftovers += len(v) print('%d cross-bundle reads unmatched' % leftovers) bundle_lens = [c[-1]-c[0] for c in bundles] print('Minimum bundle length: %d' % min(bundle_lens)) print('Maximum bundle length: %d' % max(bundle_lens)) print('Average bundle length: %d'% (sum(bundle_lens) / len(bundle_lens))) # Write index information and append spliced and unspliced files with open(compressed_name, 'wb') as f: s = binaryIO.writeChroms(self.chromosomes) s += binaryIO.writeClusters(bundles) s += binaryIO.writeList(spliced_index) f.write(s) # Compress bundle-spanning buckets self.compressCrossBundle(self.aligned.cross_bundle_buckets, self.aligned.max_cross_bundle_read_len, bundle_id, f) # Move contents of temporary file to output file with open('temp.bin', 'rb') as f2: f.write(f2.read()) os.remove('temp.bin')
import read print(read.Pow(read.Read(int, 'limit'), 3))
import read pos = read.Read(int, 'Position') if pos < 3: #1st or 2nd position i = pos - 1 else: i = -1 print(read.fib(pos)[i])
import read str = read.Read(str, 'String Input') chars = [] for char in str: if char not in chars: chars.append(char) else: print(str, 'contains duplicates') exit() print(str, "doesn't contain any duplicates")
import read try: print(ord(read.Read(str, 'character'))) #finding ascii value except TypeError: #handling input length not equal to 1 read.error('Input should be 1 character long')
import read num1 = read.Read(int, 'Lower Limit') num2 = read.Read(int, 'Upper Limit') for num in range(num1, num2 + 1): if read.prime(num): #true if num is prime print(num)
import read str = read.Read(str).lower() vowels = ['a', 'e', 'i', 'o', 'u'] vowels_not = [] #to store vowels not in the string for i in vowels: if not read.inList(i, str): #checking if each vowel is in the input vowels_not.append( i) #if not in the input, then appending to vowels_not if vowels_not == []: #if vowels_not is empty then input has all vowels print('All vowels are present') else: print(','.join(vowels_not), 'are not present') #joining the list with commas
import read str = read.Read(str) unique = '' for i in str: if i not in unique: unique += i print(unique)
import read string = read.Read(str, 'String') rev = string[::-1] #reversing string if string.lower() == rev.lower( ): #checking if input string is same as it's reverse print(string, 'is Palindrome') else: print(string, 'is not Palindrome')
import read import math #import to get pi value r = read.Read(float, 'Radius') area = round(math.pi * r**2, 2) #calculating the area & rounding off to 2 decimal values per = round(2 * math.pi * r, 2) #calculating the circumference print('Area of the circle is', area) print('Perimeter of the circle is', per)
import read #reading list lst = read.ReadList() print(lst) #reading item to find & check if it is int the list key = read.Read(str, 'item to remove') if not read.inList(key, lst): read.error(key + ' is not in the list') exit() #reading occurrence number & checking if there are that many occurrences of key in the list pos = read.Read(int, 'occurrence number') if lst.count(key) < pos: read.error('{} does not have {} occurrences'.format(key, pos)) exit() count = 0 for index, item in enumerate(lst): #enumerate to get index if item == key: count += 1 #counting number of occurrences if count == pos: lst.pop(index) #removing the key break print(lst)
import read lst = read.ReadList() key = read.Read(str, 'Key to Search') for i in lst: if i == key: print(key, 'is in the list') exit() print(key, 'is not in the list')
import add import read from Gauss import gauss import cloneNode as clone array=[] output=[] inp=[] inputs,outputs=read.Read() min=len(inputs) if len(inputs)>len(inputs[0]): min=len(inputs[0]) for i in range(len(inputs)): array.append([]) for j in range(min): array[i].append(round(add.MulAdd(inputs[i],inputs[j]),4)) for i in range(len(outputs)): output.append([]) for j in range(len(inputs)): output[i].append(round(add.MulAdd(outputs[i],inputs[j]),4)) array[0][0]=float(len(inputs[0])) for o in range(len(output)): print("\nRegression",o+1," Complate!!") for i in range(len(array)): tempString="" for j in range(len(array[i])): if not j is len(array[i])-1:
import download import json import re import os import config import time import query import multiprocessing from multiprocessing import Pool import read if __name__ == '__main__': print('程序正在运行。。。') multiprocessing.freeze_support() reader = read.Read() url_list = reader.read_txt() queryer = query.Query() # for domain_obj in url_list: # print('正在查询该链接是否已注册: '+domain_obj['url']) # queryer.query_regist(domain_obj) num = 10 zhu_num = int(len(url_list) / int(num)) all_list = [ url_list[i:i + zhu_num] for i in range(0, len(url_list), zhu_num) ] item_list = [] for each in all_list: tuple_each = (each, all_list.index(each))
import read principle = read.Read(float, 'Principle Amount') time = read.Read(float, 'Time') rate = read.Read(float, 'Rate of Interest') si = (principle * time * rate) / 100 #equation to find simple interest print('\nSimple interest for principle amount {}, for {} time, at an interest rate of {} is {}'.format(principle, time, rate, si))
import read lim = read.Read(int, 'Limit') for i in read.fib(lim): print(i, end = ' ') print('')