def _populateMappings(self, mappings: dict): self.mappings = {0: Mapping(0, 0)} for i in mappings: self.mappings[int(i)] = Mapping(int(i), int(mappings[i]["id"])) if "description" in mappings[i]: self.mappings[int(i)].description = mappings[i]["description"] if "tags" in mappings[i]: self.mappings[int(i)].tags = mappings[i]["tags"]
def __init__(self, mappingFile): # Save the mappings self._maps = [] # List of start times of ttl self._ttlStart = [] # Creates new list of mappings self._cache = [] # In case we put mapping file if mappingFile != "": # We will read the mapping file and copy the content to the cache # We assume that the mapping file is ok (this is not the purpose of this excercise..) file = open(mappingFile, "r") for line in file: # Splitting the line by spaces splitted = line.split() # The last variable is TTL - always int so we will convert it. if len(splitted) == 4: map = Mapping(splitted[0], splitted[1], splitted[2], int(splitted[3])) self._maps.append(map) self._cache.append(map) self._ttlStart.append(ttlTimer(map, default_timer())) # Closing the file. file.close() self._time = time.clock()
def __init__(self): self.curr_angle = 0 self.past_distance = 0 self.x = 0 self.y = 0 self.mp = Mapping() self.mp.create_map(1)
def __init__(self, tetra_fn, image_fn): self.image_fn = image_fn self.mapping = Mapping(tetra_fn) #self.set_background_and_transformation() # Carlos: isn't this double, you call set_background twice, Margreet: this is the bg of the image, not the tetra_image. Same formulas though (self.background, self.threshold, self.pts_number, self.dstG, self.ptsG, self.im_mean20_correct, self.n_images, self.hdim, self.Gauss) = self.set_background_and_transformation()
def mapping(): is_test_mode = str_to_bool(sys.argv[2]) map = Mapping(is_test_mode) map.draw_lines() map.grouping() map.output_result() map.print_scans()
def __init__(self, mission_number, db, image): # 초기화 self.db = db self.mapping = Mapping(mission_number, self.db) self.radius = 100 self.vehicle = Car(60, 20) self.__local_target = self.mapping.target self.__map = MapInfo(800, 600, distance=15) self.__path = [(0, 0, 0)] self.img = image
def heatMaps(self, aps, config): mp = Mapping() powers, sinrs = mp.mapPowerSinr(aps, config) self.heatMapPower(powers) self.heatMapSinr(sinrs) dfP = pd.DataFrame(powers) dfS = pd.DataFrame(sinrs) dfP.to_csv("./csv/power.csv") dfS.to_csv("./csv/sinr.csv")
def test(self): # tau, name, period, offset, bcet, wcet t1 = Task(1, 't1', 4, 0, 2, 2) self.tasks.append(t1) t2 = Task(2, 't2', 6, 0, 1, 1) self.tasks.append(t2) # TODO 把task里的变量都赋值上 # pid, name, sch bus = Bus(1, 'bus', 'FP', 2) self.platforms.append(bus) p1 = Platform(2, 'p1', 'RM') self.platforms.append(p1) p2 = Platform(3, 'p2', 'RM') self.platforms.append(p2) # task, platform self.mappings.append(Mapping(t1, p1)) self.mappings.append(Mapping(t2, p2)) # dep = (origin, dest, data) dep1 = Dep(t1, t2, 2) self.deps.append(dep1)
def run(self): mon = Monitoring() import time time = time.time() mon.probeWPerformance() mappingInstance = Mapping(mon) # mappingInstance.start_mem_pin(time); # algorithm = "alg" # vanila or alg mappingInstance.start(algorithm, time)
def setUp(self): """ Setup function TestTypes for class Mapping """ self.MappingObj = Mapping(bitstream_frames, mapping_type, bits_per_symbol) self.bitstream_frames = self.MappingObj.bitstream_frames self.bits_per_symbol = self.MappingObj.bits_per_symbol self.frame_size = self.MappingObj.frame_size self.mapped_info = self.MappingObj.mapped_info self.mapping_type = self.MappingObj.mapping_type self.rx_bitstream_frames = self.MappingObj.rx_bitstream_frames pass
def searchInServer(port, key, type, sock, cache, startMsg): dest_ip = '127.0.0.1' dest_port = int(port) # I split the message because we need to hold the key and type but doing an iterative function (asking for the #domain) message = startMsg + "|" + key + " " + type sock.sendto(message, (dest_ip, dest_port)) # Receive the answer to the query data, sender_info = sock.recvfrom(2048) # We made the messages to end at @ and after it there will be string represent the map queries = data.split("\n") splitted = queries[0].split("@") print "Message: ", splitted[0], " from: ", sender_info foundMap = Mapping.fromString(splitted[1]) # Adding the mapping file cache.addMapping(foundMap, default_timer()) if (len(queries) > 1): splitted = queries[1].split("@") print "Message: ", splitted[0], " from: ", sender_info foundMap = Mapping.fromString(splitted[1]) # Adding the mapping file cache.addMapping(foundMap, default_timer()) return foundMap
def clasificacion_odisea(idtournament): if request.method == "GET": compStandings = { "id": "compStandings", "idmap": { "idcompetition": idtournament } } req = { "compStandings": { "id": "compStandings", "idmap": { "idcompetition": idtournament, }, "filters": { "team_name": "[PI]", "active": 1, }, "ordercol": "sorting", "order": "desc", "limit": 256, "from": 0 } } req = requests.get( f'https://www.mordrek.com:666/api/v1/queries?req={req}') req = req.json() req = req["response"]["compStandings"]["result"]["rows"] lista_datos = [{ "position": indice + 1, "coach_name": fila[27], "team_name": fila[22], "race_name": Mapping.ids_to_razas([fila[21]])[0], "wins": fila[6], "draws": fila[7], "losses": fila[8], "ranking": fila[2], "td": fila[9], "cas": fila[12], "idteam": fila[20] } for indice, fila in enumerate(req)] colorear_clasificacion(lista_datos) return json.dumps(lista_datos)
def __init__(self, DEBUG=False): """Constructor""" if DEBUG: print('Running Main...') # Message object. self.message_obj = Message(input_info=input_info, n_frames=n_frames) # Mapping object. self.mapping_obj = Mapping(bitstream_frames=bitstream_frames, mapping_type=mapping_type, bits_per_symbol=bits_per_symbol) # Modulator object. self.modulator_obj = Modulator(mapped_info=mapped_info, modulation_type=modulation_type) # Transmitter object. self.transmitter_obj = Transmitter( transmitter_config=transmitter_config, tx_data=tx_data, bypass=bypass) # Channel object self.channel_obj = Channel(tx_data_in=tx_data_in, raytrace=raytrace) # Receiver object. self.receiver_obj = Receiver(receiver_config=receiver_config, rx_data=rx_data, bypass=bypass) # Merit Funcions object self.merit_functions_obj = MeritFunctions() # Global object. self.global_obj = Global() pass
def start(self): # temp advance_dirs = { 'Merged_vcf': '{analydir}/Advance/{newjob}/Merged_vcf', 'ACMG': '{analydir}/Advance/{newjob}/ACMG', 'FilterSV': '{analydir}/Advance/{newjob}/FilterSV', 'FilterCNV': '{analydir}/Advance/{newjob}/FilterCNV', 'Noncoding': '{analydir}/Advance/{newjob}/Noncoding', 'ModelF': '{analydir}/Advance/{newjob}/ModelF', 'Share': '{analydir}/Advance/{newjob}/Share', 'Denovo': '{analydir}/Advance/{newjob}/Denovo', 'Linkage': '{analydir}/Advance/{newjob}/Linkage', 'ROH': '{analydir}/Advance/{newjob}/ROH', 'Network': '{analydir}/Advance/{newjob}/Network', 'Pathway': '{analydir}/Advance/{newjob}/Pathway', 'PPI': '{analydir}/Advance/{newjob}/PPI', 'HLA': '{analydir}/Advance/{newjob}/HLA', 'SiteAS': '{analydir}/Advance/{newjob}/SiteAS', 'GeneAS': '{analydir}/Advance/{newjob}/GeneAS', 'IntegrateResult': '{analydir}/Advance/{newjob}/IntegrateResult', 'Disease': '{analydir}/Advance/{newjob}/Disease', 'BriefResults': '{analydir}/Advance/{newjob}/BriefResults', } for k, v in advance_dirs.iteritems(): self.args.update({k: v.format(**self.args)}) # print self.args['SiteAS'] # exit() # print self.analy_array print 'hello, {}'.format(self.username) # Require rawdata or not qc_status = utils.get_status('qc', self.startpoint, config.ANALYSIS_POINTS) mapping_status = utils.get_status('bwa_mem', self.startpoint, config.ANALYSIS_POINTS) print 'qc status:', qc_status print 'mapping status:', mapping_status ANALY_DICT = utils.get_analysis_dict(self.analy_array, config.ANALYSIS_CODE) self.args.update({'ANALY_DICT': ANALY_DICT}) # print ANALY_DICT.keys();exit() softwares = utils.get_softwares(self.analy_array, self.args['ANALY_DICT'], self.args, self.seqstrag) # pprint(softwares);exit() self.args.update({'softwares': softwares}) # check inputs self.queues = utils.check_queues(self.queues, self.username) self.args.update({'queues': self.queues}) # use sentieon specific queues if needed if 'sentieon' in softwares.values(): print 'add sentieon_queues' sentieon_queues = self.queues if config.CONFIG.has_option('resource', 'sentieon_queues'): sentieon_queues = config.CONFIG.get( 'resource', 'sentieon_queues').split(',') sentieon_queues = utils.check_queues(sentieon_queues, self.username) if not sentieon_queues: sentieon_queues = self.queues self.args.update({'sentieon_queues': sentieon_queues}) # print self.args['sentieon_queues'];exit() # print sentieon_queues;exit() utils.check_analy_array(self.seqstrag, self.analy_array, config.ANALYSIS_CODE) utils.check_files(self.pn, self.samp_info, self.samp_list) newTR = utils.check_target_region(config.CONFIG, self.seqstrag, self.refgenome, self.rawTR) self.args.update({'TR': newTR}) print 'analysis items:' for analysis_code in self.analy_array: print utils.color_text( '{:4} {}'.format(analysis_code, config.ANALYSIS_CODE[analysis_code][0]), 'yellow') # Analysis start point if self.startpoint: if self.startpoint in config.ANALYSIS_POINTS: print 'start point: {}'.format( utils.color_text(self.startpoint)) else: print '[error] invalid startpoint: {}'.format( utils.color_text(self.startpoint)) print 'maybe you want to choose: {}'.format( utils.color_text( process.extractOne(self.startpoint, config.ANALYSIS_POINTS.keys())[0], 'cyan')) print 'available startpoints are as follows:\n {}'.format( ' '.join(config.ANALYSIS_POINTS.keys())) exit(1) is_advance = max(self.analy_array) > 6.1 project = utils.Project(self.analydir, self.samp_info, self.samp_info_done, self.samp_list, self.qc_list, qc_status, mapping_status, is_advance) # Extract sample_info print 'extract sample informations...' fenqi, tissue, disease_name, sample_infos, sample_infos_all, sample_done = project.get_sample_infos( self.samp_list, self.samp_info, self.samp_info_done, is_advance) database = '{}/project/DisGeNet.json'.format( config.CONFIG.get('software', 'soft_dir')) disease_ids = utils.get_disease_id(disease_name, database) self.args.update({ 'disease_name': disease_name, 'disease_ids': disease_ids, }) sample_infos_waiting = { sampleid: infos for sampleid, infos in sample_infos.iteritems() if sampleid not in sample_done } self.args.update({'sample_infos_waiting': sample_infos_waiting}) # print sample_infos_waiting # exit() # print 'fenqi:', fenqi # print 'tissue:', tissue # exit() sample_lists = project.get_sample_lists # print sample_lists # print sample_infos.keys() # print sample_infos_all.keys() # for sample in sample_infos: # print sample, sample_infos[sample]['familyid'] # exit() if mapping_status == 'waiting': sample_lists = project.update_qc_list() print ' report number: {}'.format(utils.color_text(fenqi)) if disease_name: print ' disease name: {}'.format(utils.color_text(disease_name)) print ' disease id: {}'.format(utils.color_text(disease_ids)) if tissue: print ' tissue: {}'.format(utils.color_text(tissue)) print ' samples ({}): {}'.format( len(sample_infos), utils.color_text(sample_infos.keys())) if sample_done: print ' samples done({}): {}'.format( len(sample_done), utils.color_text(sample_done)) # Update qc_list and extract sample_list # print 'update qc_list...' # print json.dumps(sample_lists, indent=2) # set memory according seqstrag print 'set analysis memory...' if self.seqstrag == 'WGS': print 'upate memory for WGS...' for analysis, memory in config.ANALYSIS_MEM_WGS.items(): if analysis in config.ANALYSIS_POINTS: config.ANALYSIS_POINTS[analysis][0] = memory # exit() # =========================================================== # =========================================================== print '>>> pipeline start...' mutation_soft, sv_soft, cnv_soft, denovo_soft = [ softwares[each] for each in ('mutation', 'sv', 'cnv', 'denovo') ] print ' mutation_soft:{}, sv_soft:{}, cnv_soft:{}, denovo_soft:{}'.format( mutation_soft, sv_soft, cnv_soft, denovo_soft) # QC if ANALY_DICT['quality_control'] and qc_status == 'waiting': utils.print_color('> QC', 'white') QC(self.args, self.jobs, self.orders, sample_lists, config).start() # Mapping if ANALY_DICT['mapping']: utils.print_color('> Mapping', 'white') Mapping(self.args, self.jobs, self.orders, sample_lists, sample_infos, config, qc_status, mapping_status).start() # Mutation if ANALY_DICT['snpindel_call']: utils.print_color('> Mutation', 'white') Mutation(self.args, self.jobs, self.orders, sample_lists, sample_infos, config).start() # SV if ANALY_DICT['sv_call']: utils.print_color('> SV', 'white') SV(self.args, self.jobs, self.orders, sample_infos, config).start() # CNV if ANALY_DICT['cnv_call']: utils.print_color('> CNV', 'white') CNV(self.args, self.jobs, self.orders, sample_infos, config).start() # FilterDB if ANALY_DICT['filter']: utils.print_color('> FilterDB', 'white') FilterDB(self.args, self.jobs, self.orders, mutation_soft, sv_soft, cnv_soft, sample_infos, config, disease_name, tissue, ANALY_DICT).start() # ModelF if ANALY_DICT['filter_model']: utils.print_color('> Model', 'white') FilterModel(self.args, self.jobs, self.orders, mutation_soft, sv_soft, cnv_soft, sample_infos, config).start() # Denovo if ANALY_DICT['denovo']: utils.print_color('> Denovo', 'white') Denovo(self.args, self.jobs, self.orders, mutation_soft, sv_soft, cnv_soft, denovo_soft, sample_infos, config, ANALY_DICT).start() # Linkage if ANALY_DICT['linkage']: utils.print_color('> Linkage', 'white') Linkage(self.args, self.jobs, self.orders, mutation_soft, sv_soft, cnv_soft, denovo_soft, sample_infos_all, config, ANALY_DICT).start() # IntegrateResult if any(ANALY_DICT[analysis] for analysis in ['filter', 'filter_model', 'denovo', 'phenolyzer']): utils.print_color('> IntegrateResult', 'white') IntegrateResult(self.args, self.jobs, self.orders, config).start() # ROH if ANALY_DICT['roh']: utils.print_color('> ROH', 'white') ROH(self.args, self.jobs, self.orders, sample_infos, mutation_soft, config).start() # OTHER other = Other(self.args, self.jobs, self.orders, config, disease_name) # IBD if any(ANALY_DICT[each] for each in ['filter_model', 'linkage', 'denovo' ]) and len(sample_infos_waiting) > 1: utils.print_color('> IBD', 'white') other.ibd() # Network if ANALY_DICT['phenolyzer']: utils.print_color('> Phenolyzer', 'white') other.phenolyzer() # Pathway if ANALY_DICT['pathway']: utils.print_color('> Pathway', 'white') other.pathway() # PPI if ANALY_DICT['ppi']: utils.print_color('> PPI', 'white') other.ppi() # SiteAS if ANALY_DICT['site_association']: utils.print_color('> SiteAS', 'white') Association(self.args, self.jobs, self.orders, config).site_association() # GeneAS if ANALY_DICT['gene_association']: utils.print_color('> GeneAS', 'white') Association(self.args, self.jobs, self.orders, config).gene_association() # HLA if ANALY_DICT['hla']: utils.print_color('> HLA', 'white') HLA(self.args, self.jobs, self.orders, sample_lists, sample_infos, config, qc_status).start() # result and report utils.print_color('> Result', 'white') Result(self.args, self.jobs, self.orders, config).start() utils.print_color('> Report', 'white') Report(self.args, self.jobs, self.orders, config).start() # job summary print 'lenght of jobs waiting/total: {}/{}'.format( len([job for job in self.jobs if job.get('status') == 'waiting']), len(self.jobs)) utils.write_job(self.analydir, self.newjob, self.jobs, self.orders) print '{:-^80}'.format(' all done ')
# Popup to select input station_data.sql file root = tk.Tk() root.withdraw() # log_file_name = "small.down" log_file_name = filedialog.askopenfilename() if not log_file_name: print("Error: must select an asdo.log file") exit(1) print("Reading data from {}".format(log_file_name)) log_file = open(log_file_name, 'r') gps_previous = GpsPoint('', 0, 0, 0, 0) gps_point = GpsPoint('', 0, 0, 0, 0) mapping = Mapping(log_file_name) # ip_addr = "10.177.156.21" # ip_addr = "10.182.144.21" # ip_addr = "10.181.72.21" # ip_addr = "10.176.36.21" ip_addr = None marker_count = [0] map_count = 1 i_line = 0 header = Header() # navigation = Navigation(0, 0, 0, '', 0) # navigation.setGpsPoint(gps_map, gps_point, gps_previous) nav_msg = Navigation()
from Reader import Reader from Mapping import Mapping from Compare import Compare from Writer import Writer #gold standard filename_wiktionary = "files/wiki_output_with_pgmc_forms.csv" #test languages filename_results = "files/results.csv" #validation languages filename_extend = "files/results_kroonen_northeuralex.csv" if __name__ == '__main__': Reader = Reader() Mapping = Mapping() Compare = Compare() Writer = Writer() #reads files results = Reader.read_csv(filename_results) wiki = Reader.read_csv(filename_wiktionary) validation_set = Reader.read_csv(filename_extend) #maps wiki to test set all_words, wiki_words, res_words, id_lst = Mapping.maps(wiki, results) print("all words common between both lists:", len(all_words)) print("wiktionary cognate sets", len(wiki_words)) print("results cognate sets", len(res_words)) #compares data and outputs results
def colorear_clasificacion(equipos) -> list: tamaño_top = 14 tamaño_top_stunty = 1 equipos_maximos_raza = 2 equipos_super_clasificados = [] equipos_clasificados = [] equipos_no_clasificados = [] indice = 0 # Coloreo los super clasificados while indice < len(equipos): equipo = equipos[indice] if float(equipo["ranking"]) >= 75.00: equipo.update({"status": "super_clasificado"}) equipos_super_clasificados.append(equipo) indice += 1 else: break # Coloreo los equipos clasificados y los que lo estarían pero no cumplen algún requisito while indice < len(equipos) and len(equipos_clasificados) < tamaño_top: equipo = equipos[indice] equipos_misma_raza = 0 repetido = False for mejor_equipo in equipos_clasificados: if mejor_equipo["coach_name"] == equipo["coach_name"] or int( equipo["wins"]) + int(equipo["draws"]) + int( equipo["losses"]) > 40: repetido = True break if mejor_equipo["race_name"] == equipo["race_name"]: equipos_misma_raza += 1 if equipos_misma_raza >= equipos_maximos_raza: break if equipos_misma_raza < 2 and not repetido: equipo.update({"status": "clasificado"}) equipos_clasificados.append(equipo) else: equipo = equipo.update({"status": "no_clasificado"}) equipos_no_clasificados.append(equipo) indice += 1 # Cojo todos los equipos stunty ids_razas_stunty = Mapping.razas_to_ids(["Goblins", "Halflings", "Ogros"]) equipos_stunty = list( filter(lambda equipo: equipo["race_name"] in ids_razas_stunty, equipos)) # Quito los stunty que se han clasificado en intervalo verde ids_equipos_clasificados = [ equipo["idteam"] for equipo in equipos_clasificados ] equipos_stunty = list( filter(lambda equipo: equipo["idteam"] not in ids_equipos_clasificados, equipos_stunty)) # Quito a los entrenadores en verde con stunty clasificado entrenadores_clasificados = [ equipo["coach_name"] for equipo in equipos_clasificados ] equipos_stunty = list( filter( lambda equipo: equipo["coach_name"] not in entrenadores_clasificados, equipos_stunty)) # Si hay algún equipo stunty, asigno el top reservado a los stunties if equipos_stunty: for index in range(tamaño_top_stunty): top_stunty = equipos_stunty.pop(index) # Como está en el top, es verde top_stunty.update({"status": "clasificado"}) equipos_clasificados.append(top_stunty) # Si el equipo estuviese en el grupo azul, ahora está en el verde, así que lo quito del azul if top_stunty in equipos_no_clasificados: equipos_no_clasificados.remove(top_stunty) # Uno todos los resultados equipos_super_clasificados.extend(equipos_clasificados) equipos_super_clasificados.extend(equipos_no_clasificados) return equipos_super_clasificados
Created on Jun 2019 @author: carlos meant for mapping, but now with class implementation """ import os from ImageCollection import ImageCollection from Mapping import Mapping import numpy as np import matplotlib.pyplot as plt tetra_fn=os.path.normpath('N:/tnw/BN/CMJ/Shared/Margreet/20181203_HJ_training/mapping/rough_ave.tif') #tetra_fn='/home/carlos/PycharmProjects/margreet_code_review/rough_ave.tif' #image_fn ='/home/carlos/PycharmProjects/margreet_code_review/hel4.pma' tmp=Mapping(tetra_fn) if 1: root=os.path.normpath("N:/tnw/BN/CMJ/Shared/Margreet/20181203_HJ_training/#3.10_0.1mg-ml_streptavidin_50pM_HJC_G_movies") name='hel4.pma' else: root=os.path.normpath("N:/tnw/BN/CMJ/Shared/Margreet/181218 - First single-molecule sample (GattaQuant)/RawData") name='Spooled files.sifx' image_fn=os.path.join(root,name) imc = ImageCollection(tetra_fn, image_fn) # better: give image directory, and search for specific name or extension img = imc.get_image(1) #tmp._tf2_matrix
def main(): # In the cache we will check if this quert has a result. # Cache our cache # Key the key we want to find it's dns query # Type - the type of DNS query (A or NS in our excercise) cacheHandler = CacheHandler(mapFile) # Argv command line arguments # The first parameter to the server is if the server is resolver resolver = argv[1] # The second parameter to the server is the root server ip and the port # For example 127.0.0.2:11111 rootPort = argv[2] s = socket(AF_INET, SOCK_DGRAM) # The socket to our server s.bind((source_ip, source_port)) while True: # Updataing the cache ttl if (cacheHandler != None): cacheHandler.updateTTL(default_timer()) #cacheHandler.printCache() data, sender_info = s.recvfrom(2048) # Printing the query to the client if resolver == "1": print "Message: ", data.split("@")[0], " from: ", sender_info splitted = data.split() else: print "Message: ", data.split("|")[0], " from: ", sender_info splitted = data.split("|")[1].split() # The last variable is TTL - always int so we will convert it. key = "" typeQ = "" msg = "Please send key and the type for example 'com A'" if len(splitted) == 2: # The ip or nsof the server we want to find answer to the query serverToSearch = '127.0.0.1' portToSearch = rootPort # The requested key and type. requestedKey = splitted[0] requestedType = splitted[1] # Key and type that changes each iterate by our sub query key = requestedKey typeQ = requestedType # First searching for ns # typeA = typeQ == "A" # Search for map with this key or value mapSearch = cacheHandler.search(key, typeQ) # 1 if resolver if resolver == "1": # First check if the cache can handle this query if mapSearch != None: msg = "The answer for " + mapSearch.getKey() +\ " is " + mapSearch.getValue() + "@" + Mapping.stringConvert(mapSearch) # Did not found answer in the cache else: # Splitting the domain by points. splitAddress = key.split('.') # Starting the iterating (from the end of caurse) for i in range(len(splitAddress) - 1, -1, -1): if i == len(splitAddress) - 1: key = splitAddress[i] else: # Updating the key from com to ac.come for example. key = splitAddress[i] + "." + key # Search for result to this query mapSearch = cacheHandler.search(key, typeQ) # Found if mapSearch != None: serverToSearch = '127.0.0.1' msg = "The answer for " + key + " " \ + typeQ + " is " + mapSearch.getValue() + "@" + mapSearch.stringConvert() else: # Search in the root server and adding the asnwer to the cache # The mapping we got from the server if requestedType == "A" and i != 0: mapServer = searchInServer( portToSearch, key, "NS", s, cacheHandler, data.split("@")[0]) portToSearch = mapServer.getPort() if i == 0 and requestedType == "A": mapServer = searchInServer( portToSearch, key, "A", s, cacheHandler, data.split("@")[0]) portToSearch = mapServer.getPort() msg = "The answer for " + requestedKey + " is " + mapServer.getValue() + "@" \ + mapServer.stringConvert() if requestedType == "NS": mapServer = searchInServer( portToSearch, key, "NS", s, cacheHandler, data.split("@")[0]) cacheHandler.addMapping( mapServer, default_timer()) # Not resolver - check only by it's mapping and cache else: if (mapSearch != None): msg = "The answer for " + requestedKey + " is " + mapSearch.getValue() + "@" \ + mapSearch.stringConvert() if (requestedType == "NS"): # Now we do A query on the name server. key = mapSearch.getValue() typeQ = "A" mapSearch = cacheHandler.search(key, typeQ) if mapSearch != None: msg2 = "\n" + "The answer for " + key + " is " + mapSearch.getValue( ) msg3 = "@" + mapSearch.stringConvert() msg = "".join((msg, msg2)) msg = "".join((msg, msg3)) else: # Did not found the answer in the cache or mapping file. #his server is not resolver so we can not return answer to this query. msg = "Did not found answer to this query" s.sendto(msg, sender_info)