def clientHandler(dataSocket, addr): while True: recved = dataSocket.recv(BUFLEN) # 当对方关闭连接的时候,返回空字符串 if not recved: print(f'客户端{addr} 关闭了连接') break # 读取的字节数据是bytes类型,需要解码为字符串 info = recved.decode() common.common(addr, info) dataSocket.send(f'hhh {info}'.encode()) dataSocket.close()
def wight_time(self, p_from_time, p_to_time, p_seconds=1): init = common() p_to_time = init.str_to_time(p_to_time) p_from_time = init.str_to_time(p_from_time) temp_minus_time = p_to_time - p_from_time w_time = (temp_minus_time.total_seconds() / 60) * p_seconds return w_time
def set_values(self,curdir,configxml,dbpath): self.curdir = curdir self.configxml = configxml self.dbpath = dbpath self.lib = common.common() self.templates = templates.templates() self.fields_to_show = configxml.find('fields_to_show').text.split(',')
def method(self): c = common() stockListPath = constants.stockListPath c.mkdir(stockListPath) stockListUrl = constants.stockListUrl html = c.getHtml(stockListUrl) dataList = [] rest = re.findall('\[{.*}\]', html, flags=0) for res in rest: res = res.replace("\"", "").replace("f12:", "").replace("f14:", "").replace("[", ""). \ replace("]", "").replace("{", "").replace("}", "").split(",") dataList = dataList + res c = common stockListName = c.getCurrentTime() f = open(stockListPath + stockListName, 'w+', encoding='utf-8', newline="") writer = csv.writer(f) stockListTitle = constants.stockListTitle writer.writerow(stockListTitle) for idx in range(0, len(dataList) - 1, 2): writer.writerow((dataList[idx], dataList[idx + 1])) f.close()
def __init__(self, parent=None): QtCore.QThread.__init__(self, parent) self.exiting = False self.lib = common.common() self.templates = templates.templates() self.query_db_getall = query_db_getall.queryDbGetAll() self.query_db_getdetail = query_db_getdetail.queryDbGetDetail()
def updateStockList(): s = stockList() s.method() filePath = sorted(os.listdir(constants.stockListPath), reverse=True) c = common() stockDict = c.readCSV(constants.stockListPath + filePath[0]) insert = [] update = [] if len(filePath) == 1: for key in stockDict.keys(): insert.append([key, stockDict[key]]) else: stockDictOld = c.readCSV(constants.stockListPath + filePath[1]) for key in stockDict.keys() - stockDictOld.keys(): insert.append([key, stockDict[key]]) #修改名称 for key in stockDict.keys() & stockDictOld.keys(): if stockDict[key] != stockDictOld[key]: update.append([stockDict[key], key]) return insert, update
def set_values(self, parent,mainframe, curdir, configxml, dbpath): self.mainframe = mainframe self.parent = parent self.curdir = curdir self.configxml = configxml self.dbpath = dbpath self.lib = common.common()
def __init__(self, client_name): # Nombre del cliente para identificar sobre cuales archivos se ejecutaran los procesos self.client_name = client_name self.config = configparser.ConfigParser() self.config.sections() if os.path.isfile( str('../Datasets/' + self.client_name) + 'config.ini'): with open(str('../Datasets/' + self.client_name) + 'config.ini') as config_parser_fp: self.config.read_file(config_parser_fp) self.database_path = "../Datasets/" + str( self.client_name) + "database/input_data/" self.sql_db = sql.create_engine('sqlite:///' + self.database_path + "db.sql") self.models_path = "../Datasets/" + str( self.client_name) + "database/models/" self.precision_weight = float( self.config['EVALUATION']['precision_weight']) self.recall_weight = float(self.config['EVALUATION']['recall_weight']) self.time_weight = float(self.config['EVALUATION']['time_weight']) self.rmse_weight = float(self.config['EVALUATION']['rmse_weight']) self.mae_weight = float(self.config['EVALUATION']['mae_weight']) self.common_functions = common(self.client_name) self.evaluation = evaluation(self.client_name)
def main(self, params): """ Initial grouping to download, parse and filter the individual experiments. Returns: None Output: Raw counts for the experiment in a HiC adjacency matrix saved to the tmp_dir """ from common import common from fastq2adjacency import fastq2adjacency genome = params[0] dataset = params[1] sra_id = params[2] library = params[3] enzyme_name = params[4] resolution = params[5] tmp_dir = params[6] data_dir = params[7] expt = params[8] same_fastq = params[9] windows1 = params[10] windows2 = params[11] print "Got Params" print sra_id, library, resolution, time.time() f2a = fastq2adjacency() f2a.set_params(genome, dataset, sra_id, library, enzyme_name, resolution, tmp_dir, data_dir, expt, same_fastq, windows1, windows2) print "Set Params" cf = common() in_files = cf.getFastqFiles(sra_id, data_dir) map(f2a.mapWindows, [1, 2]) f2a.parseGenomeSeq() f2a.parseMaps() f2a.mergeMaps() f2a.filterReads(conservative=True) # It is at this point that the resolution is used. f2a.load_hic_read_data() f2a.save_hic_split_data() chroms = f2a.get_chromosomes() for chrom in chroms: f2a.generate_tads(chrom) f2a.normalise_hic_data() f2a.save_hic_data()
def st_token(st, terms, min_len, grp): #for length,end_set in \ #filter_sort_occur( \ #filter_ngram(tokenseq, , 4, \ if grp: for length, end_set in \ filter_gst(terms, 2, \ filter_mcs( \ filter_sort_length( \ filter_length(min_len, \ common(st))))): if len(end_set) == 0:continue yield (length, end_set) else: for length, end_set in \ filter_mcs( \ filter_sort_length( \ filter_length(min_len, \ common(st)))): if len(end_set) == 0:continue yield (length, end_set)
def __init__(self, parent = None): ''''Performance issues : - showing progress bar increases scanning time - folders are scanned twice - dicts have different methods, some are faster but block the gui. - signaling progress also slowsdown things ''' QtCore.QThread.__init__(self, parent) self.exiting = False self.lib = common.common() self.templates = templates.templates() self.get_data = get_metadata.getMetadata()
def set_values(self, w,configxml, curdir): self.w = w self.curdir = curdir self.page_title = self.w.title() self.configxml = configxml self.fft_folder = self.configxml.find('fft_folder').text # this is going to break when having multiple locations self.soundsz_folder = self.configxml.find('locations').getiterator('location')[0].text self.doc = self.w.documentElement() self.lib = common.common() self.cur_folder = '' self.audio_tag = '' self.playlist = '' self.curtrack = 0
def __init__(self, client_name): # Nombre del cliente para identificar sobre cuales archivos se ejecutaran los procesos self.client_name = client_name+'/' # Archivo de configuracion config = configparser.ConfigParser() config.sections() #config.read('config.ini', encoding="utf8") if os.path.isfile(str('../Datasets/'+self.client_name)+'config.ini'): with open(str('../Datasets/'+self.client_name)+'config.ini') as config_parser_fp: config.read_file(config_parser_fp) self.database_path = "../Datasets/"+str(self.client_name)+"database/input_data/" self.explicit_rating_threshold_for_test = float(config['SAMPLING']['explicit_rating_threshold_for_test']) self.common_functions = common(self.client_name)
def run(self, file_ids, metadata): """ Main run function for processing ChIP-seq FastQ data. Pipeline aligns the FASTQ files to the genome using BWA. MACS 2 is then used for peak calling. Parameters ---------- files_ids : list List of file locations metadata : list Returns ------- outputfiles : list List of locations for the output bam, bed and tsv files """ # TODO - Handle multiple file and background files genome_fa = file_ids[0] file_loc = file_ids[1] file_bgd_loc = file_ids[2] cf = common() out_bam = file_loc.replace('.fastq', '.bam') bwa = tool.bwaAlignerTool(self.configuration) out_bam = bwa.run((genome_fa, file_loc), ()) #cf.bwa_align_reads(genome_fa, file_loc) out_bam, out_bam_meta = file_loc.replace('.fastq', '.bam') #cf.bwa_align_reads(genome_fa, file_bgd_loc) out_bgd_bam = file_bgd_loc.replace('.fastq', '.bam') out_bgd_bam, out_bgd_bam_meta = bwa.run((genome_fa, file_bgd_loc), ()) # TODO - Multiple files need merging into a single bam file # Filter the bams b3f = tool.biobambam(self.configuration) b3f_file_out = b3f.run((out_bam[0]), ()) b3f_file_bgd_out = b3f.run((out_bgd_bam[0]), ()) # MACS2 to call peaks macs2 = tool.macs2(self.configuration) peak_bed, summits_bed, narrowPeak, broadPeak, gappedPeak = macs2.run((b3f_file_out, b3f_file_bgd_out), ()) return (b3f_file_out, b3f_file_bgd_out, peak_bed, summits_bed, narrowPeak, broadPeak, gappedPeak)
def __init__(self, client_name): # Nombre del cliente para identificar sobre cuales archivos se ejecutaran los procesos self.client_name = client_name+'/' # Archivo de configuracion self.config = configparser.ConfigParser() self.config.sections() #config.read('config.ini', encoding="utf8") if os.path.isfile(str('../Datasets/'+self.client_name)+'config.ini'): with open(str('../Datasets/'+self.client_name)+'config.ini') as config_parser_fp: self.config.read_file(config_parser_fp) # Configuracion general self.models_path = "../Datasets/"+str(self.client_name)+"database/models/" self.database_path = "../Datasets/"+str(self.client_name)+"database/input_data/" self.sql_db = sql.create_engine('sqlite:///'+self.database_path+"db.sql", encoding='utf-8') self.sql_db.raw_connection().connection.text_factory = str self.common_functions = common(self.client_name)
def __init__(self, parent,track_src,tracks,dbpath): QtGui.QDialog.__init__(self, parent) self.lib = common.common() self.track_src = track_src self.tracks = tracks self.conn = sqlite3.connect(dbpath) c = self.conn.cursor() c.row_factory = sqlite3.Row self.setup_ui() self.label_track_src.setText(track_src) self.label_track_wrapper.setText(os.path.dirname(track_src)) self.insert_existing_playlists(c) self.exec_()
def __init__(self, client_name): # Nombre del cliente para identificar sobre cuales archivos se ejecutaran los procesos self.client_name = client_name + '/' # Archivo de configuracion self.config = configparser.ConfigParser() self.config.sections() #config.read('config.ini', encoding="utf8") if os.path.isfile( str('../Datasets/' + self.client_name) + 'config.ini'): with open(str('../Datasets/' + self.client_name) + 'config.ini') as config_parser_fp: self.config.read_file(config_parser_fp) # Configuracion general self.models_path = "../Datasets/" + str( self.client_name) + "database/models/" self.database_path = "../Datasets/" + str( self.client_name) + "database/input_data/" self.common_functions = common(self.client_name) self.evaluation = evaluation(self.client_name)
def method(self, codeList, startTime, currentTime): c = common() # 创建目录 mkpath = constants.stockInfoPath + currentTime + '/' c.mkdir(mkpath) for code in codeList: if startTime == '': urlTime = constants.stockInfoTimeEnd + currentTime else: urlTime = constants.stockInfoTimeStart + startTime + constants.stockInfoTimeEnd + currentTime url = constants.stockInfoUrlStart + ("0" if code.startswith('6') else "1") + code + urlTime url = url + constants.stockInfoUrlEnd filePath = mkpath + code + '.csv' threading.Thread(target=c.downloadFileSem, args=(url, filePath)).start()
def main(self, data_dir, expt, genome_fa): """ Main loop """ cf = common() local_files = data_dir + expt["project_id"] # Optain the FastQ files run_ids = [] run_fastq_files = [] run_ids = [] run_fastq_files = {} for run_id in expt["run_ids"]: run_ids.append(run_id) if (expt.has_key("local") == False): in_files = cf.getFastqFiles(expt["project_id"], data_dir, run_id) else: in_files = [ f for f in os.listdir(local_files) if re.match(run_id, f) ] run_fastq_files[run_id] = in_files # Run BWA paired = 0 for run_id in expt["run_ids"]: if len(run_fastq_files[run_id]) > 1: paired = 1 for i in range(1, len(run_fastq_files[run_id]) + 1): cf.bwa_align_reads(genome_fa["unzipped"], data_dir, expt["project_id"], run_id + "_" + str(i)) else: cf.bwa_align_reads(genome_fa["unzipped"], data_dir, expt["project_id"], run_id) self.inps_peak_calling(data_dir, expt["project_id"], expt["run_ids"])
def method(self, code): url = constants.companyInfoUrl % code c = common() html = c.fakerHead(url) soup = BeautifulSoup(html, "lxml") soup = soup.findAll('table', attrs={"class": {"table_bg001 border_box limit_sale table_details"}}) companyTemp = [code] for val in soup: temp = val.findAll('td') cnt = 0 for tt in temp: t = tt.text t = t.replace('-', '').replace('\r', '').replace('\n', '') if cnt != 13: t = t.replace(' ','') if cnt % 2 == 1: companyTemp.append(t) cnt = cnt + 1 print('读取股票代码'+code+'的上市公司基本数据成功') return companyTemp
def set_values(self,curdir,configxml,dbpath): self.curdir = curdir self.configxml = configxml self.dbpath = dbpath self.lib = common.common() self.templates = templates.templates()
def download_playlist(self): """ Downloads a playlist of songs given the URL """ cm = common() try: os.chdir("Playlists") except: os.mkdir("Playlists") os.chdir("Playlists") print() Console().print( Columns([ Panel( f"\n [bold red]MAKE SURE YOUR PLAYLIST IS PUBLIC[/bold red]\n [bold red]YOU CAN MAKE IT PRIVATE AFTER DOWNLOADING[/bold red] \n" ) ])) plLink = input("Enter your YouTube playlist URL: ") plName = input("Give a name to your playlist: ") try: os.chdir(plName) except: os.mkdir(plName) os.chdir(plName) if "https://www" in plLink: plLink = plLink.replace("https://www", "https://music") start_time = time.time() try: plLinks = self.get_playlist_url(plLink) except Exception as e: print( f"Something went wrong. Maybe check your URL. Here's the reason from the compiler: {e}" ) print("Exiting the program") return end_time = time.time() print(f"\nTime taken to fetch the URLs from Youtube: %.2f secs" % (end_time - start_time)) data = 0.0 print("\nCalculating total download size...\n") for i in plLinks: try: data += pafy.new(i).getbestaudio().get_filesize() except: # print(traceback.format_exc()) # time.sleep(2) continue data = int((data / 1048576) * 100) / 100 Console().print( Columns([Panel(f"\nDownload size: [green]{data} MB[/green]\n")])) print() print("\nWould you like an mp3 or flac conversion?\n") Console().rule( "[bold]**** Here's a quick comparison on both codec ****[bold]", style="black", align="center") print("\n") table = Table(show_header=True, header_style="bold cyan") table.add_column("Avaliable Codec") table.add_column("Bit-rate") table.add_column("File Size") table.add_column("Remarks") table.add_row("mp3", "320kbps (default)", "~7.3MB for 3min song", "Standard codec with normal experience") table.add_row() table.add_row( "flac", "usually >800kbps (1713kbps while testing, 5x of mp3)", "~39MB for 3min song", "Takes fair amount of disk space but gives amazing experience") Console().print(table) Console().rule( "\n[bold]Note: this step [red]does not use internet[/red] [bold]\n", style="black", align="center") print('\nIf you are confused on what to select, select mp3 (default)') z = input("\tEnter\n\t1/flac/f - flac\n\tany key - mp3 : ") total_songs = len(plLinks) for i in plLinks: if sys.platform == 'win32' or os.name == 'nt': os.system("cls") elif sys.platform == 'linux' or os.name == 'posix': os.system("clear") Console().print("[bold][green]Downloaded songs:[/green][/bold]") Console().print( Columns([ Panel(''.join( list(''.join(iz + '\n' * (N % 3 == 2) for N, iz in enumerate( [ii + " " for ii in user.split()])))) + "\n[b][green]Downloaded[/green][/b]", expand=True) for user in os.listdir() ])) try: cm.download_song(i, "", '', z) except Exception: # print(traceback.format_exc()) # time.sleep(2) continue time.sleep(1) downloaded_songs = len(os.listdir()) if total_songs - downloaded_songs != 0: print( f"\n{total_songs-downloaded_songs}/{total_songs} songs were not downloaded due to some error" ) print("\n\n") Console().print( Columns([ Panel( f"\n Your playlist is downloaded in \"[bold]/musicDL downloads/Playlists/{plName}[/bold]\" folder on desktop \n" ) ])) print("\n\n") op = input("Would you like to open the the playlist? (Y/N) ") if op.lower() == "y": if sys.platform == 'win32' or os.name == 'nt': os.startfile(".") elif sys.platform == 'linux' or os.name == 'posix': subprocess.call(['xdg-open', '.']) else: return
def __init__(self, client_name): # Nombre del cliente para identificar sobre cuales archivos se ejecutaran los procesos self.client_name = client_name + '/' # Archivo de configuracion config = configparser.ConfigParser() config.sections() #config.read('config.ini', encoding="utf8") if os.path.isfile( str('../Datasets/' + self.client_name) + 'config.ini'): with open(str('../Datasets/' + self.client_name) + 'config.ini') as config_parser_fp: config.read_file(config_parser_fp) # Variables de configurción self.verbose_switch = config['DEFAULT'].getboolean('verbose_switch') self.min_interactions_explicit = int( config['CLEANING']['min_interactions_explicit']) self.recommendations_per_user = int( config['RECOMMEND']['recommendations_per_user']) self.number_of_folds = int(config['SAMPLING']['number_of_folds']) self.test_size_conf = float(config['SAMPLING']['test_size']) self.maximum_interactions_evaluation = int( config['SAMPLING']['maximum_interactions_evaluation']) self.statistical_significance = config['RESULTS'].getboolean( 'statistical_significance') self.number_of_2_fold_samples = int( config['RESULTS']['number_of_2_fold_samples']) self.min_population_constraint = int( config['RESULTS']['min_population_constraint']) self.data_path = "../Datasets/" + str(self.client_name) + "data/" self.data_path_backup = "../Datasets/" + str( self.client_name) + "data_backup/" self.database_path = "../Datasets/" + str( self.client_name) + "database/input_data/" self.models_path = "../Datasets/" + str( self.client_name) + "database/models/" self.valid_data_directories = [ "implicit", "explicit", "explicit_review", "user_content", "item_content" ] self.valid_data_conf_names = [ "implicit_conf.json", "explicit_conf.json", "explicit_review_conf.json", "user_content_conf.json", "item_content_conf.json" ] self.sql_db = sql.create_engine('sqlite:///' + self.database_path + "db.sql", encoding='utf-8') self.sql_db.raw_connection().connection.text_factory = str self.surprise_models = [ "surprise_svd", "surprise_SVDpp", "surprise_NMF", "surprise_NormalPredictor", "surprise_BaselineOnly", "surprise_KNNBasic", "surprise_KNNWithMeans", "surprise_KNNWithZScore", "surprise_KNNBaseline", "surprise_SlopeOne", "surprise_CoClustering" ] self.explicit_models_to_run = [ "surprise_svd", "surprise_SVDpp", "surprise_NMF", "surprise_NormalPredictor", "surprise_BaselineOnly", "surprise_KNNBasic", "surprise_KNNWithMeans", "surprise_KNNWithZScore", "surprise_KNNBaseline", "surprise_SlopeOne", "surprise_CoClustering" ] self.explicit_review_models_to_run = [ "surprise_svd", "surprise_SVDpp", "surprise_NMF", "surprise_NormalPredictor", "surprise_BaselineOnly", "surprise_KNNBasic", "surprise_KNNWithMeans", "surprise_KNNWithZScore", "surprise_KNNBaseline", "surprise_SlopeOne", "surprise_CoClustering" ] self.common_functions = common(self.client_name) self.training = trainAlgorithm(self.client_name) self.gridSearch = gridSearch(self.client_name) self.hybrid = hybrid(self.client_name)
def download_singles(self): """ Downloads songs based on youtube search. Takes a string as an input. """ cm = common() try: os.chdir("singles") except: os.mkdir("singles") os.chdir("singles") Console().print( Columns([ Panel( "\ntip:\n [bold white]* give the name of song and the artist for better search results)\n * you could paste the youtube video url itself if you're looking for a specific song.[/bold white]\n" ) ])) s = input("\nEnter the song name: ") print(f"\nLoading search results for {s}...\n") s = s.replace(" ", "+") # Get top 7 video URLs video_url = cm.get_url(s) j = 1 names = [] for i in video_url: if len(video_url) == 0: print( "\nThere were no results :(\nmaybe try checking the spelling of the song\n" ) quit() try: t = pafy.new(i) names.append(f"{j} - {t.title} ({t.duration})") j += 1 except: j += 1 # print(traceback.format_exc()) # time.sleep(2) continue picker = Picker( names, "Select your choice using arrow keys or press q to quit", indicator=" => ") picker.register_custom_handler(ord('q'), lambda picker: exit()) picker.register_custom_handler(ord('Q'), lambda picker: exit()) op, c = picker.start() Console().print( Columns([ Panel( f"\nDownload size: [green]{int((pafy.new(video_url[c]).getbestaudio().get_filesize()/1048576)*100)/100} MB[/green]\n" ) ])) print() print("\nWould you like an mp3 or flac conversion?\n") Console().rule( "[bold]**** Here's a quick comparison on both codec ****[bold]", style="black", align="center") print("\n") table = Table(show_header=True, header_style="bold cyan") table.add_column("Avaliable Codec") table.add_column("Bit-rate") table.add_column("File Size") table.add_column("Remarks") table.add_row("mp3", "320kbps (default)", "~7.3MB for 3min song", "Standard codec with normal experience") table.add_row() table.add_row( "flac", "usually >800kbps (1713kbps while testing, 5x of mp3)", "~39MB for 3min song", "Takes fair amount of disk space but gives amazing experience") Console().print(table) Console().rule( "\n[bold]Note: this step [red]does not use internet[/red] [bold]\n", style="black", align="center") print('\nIf you are confused on what to select, select mp3 (default)') z = input("\tEnter\n\t1/flac/f - flac\n\tany key - mp3 : ") cm.download_song(video_url[c], '', '', z) print("\n\n") Console().print( Columns([ Panel( f"\n Your song is downloaded in \"[bold cyan]/musicDL downloads/singles[/bold cyan]\" folder on desktop \n" ) ])) print("\n\n") time.sleep(3) picker = Picker( ["Open the song directory", "Open the song itself"], "Select your choice using arrow keys or press q to quit", indicator=" => ") picker.register_custom_handler(ord('q'), lambda picker: 'qq') picker.register_custom_handler(ord('Q'), lambda picker: 'QQ') _, op = picker.start() if op == 0: if sys.platform == 'win32' or os.name == 'nt': os.startfile(".") elif sys.platform == 'linux' or os.name == 'posix': subprocess.call(['xdg-open', '.']) elif op == 1: file = pafy.new(video_url[c - 1]).title a, t = get_artist_title(file) if file + ".mp3" in os.listdir(): if sys.platform == 'win32' or os.name == 'nt': os.startfile(file + ".mp3") elif sys.platform == 'linux' or os.name == 'posix': subprocess.call(['xdg-open', file + ".mp3"]) elif t + " - " + a + ".mp3" in os.listdir(): if sys.platform == 'win32' or os.name == 'nt': os.startfile(t + " - " + a + ".mp3") elif sys.platform == 'linux' or os.name == 'posix': subprocess.call(['xdg-open', t + " - " + a + ".mp3"]) else: files = glob.glob("./*") song = max(files, key=os.path.getctime) if sys.platform == 'win32' or os.name == 'nt': os.startfile(song) elif sys.platform == 'linux' or os.name == 'posix': subprocess.call(['xdg-open', song]) else: return
def getResponse(DeviceInfo: dict = None, Device: str = None, ip_port: str = None , kafka_topic: str = None, tag_lists: str = None): init = common() init.my_conn('172.17.1.34', 3306, 'root', 'dt01@', 'fems') try: #print('process start') sleep(0.5) #init = common() #init.my_conn('172.17.1.34', 3306, 'root', 'dt01@', 'fems') #print('Connect to : ' + DeviceInfo[Device][0] + ':' + DeviceInfo[Device][1]) maker = DeviceInfo[Device][3] REG_INFO, tagList = readDeviceInfo(Device, maker) client = socket.socket(socket.AF_INET, socket.SOCK_STREAM) client.settimeout(1.0) client.connect((DeviceInfo[Device][0], int(DeviceInfo[Device][1]))) kafka_client = realtime(ip_port[0], ip_port[1] ) result = list() #tagID = tagList[0].split("_")[0] #tag_type = tagList[1] ndata = 0 for key, slaveInfo in tagList.items(): Device_ID = key.split("_")[0] #DeviceUse tag_type = slaveInfo[1] # for slaveInfo in REG_INFO: if tag_type == '1': if maker == 'LS': sPkt = sendPkt(slaveInfo[0], DeviceInfo[Device][2], '04') elif maker == 'MI': sPkt = sendPkt(slaveInfo[0], DeviceInfo[Device][2], '03') elif maker == 'VI': sPkt = sendPkt(slaveInfo[0], DeviceInfo[Device][2], '04') elif maker == 'ECO': sPkt = sendPkt(slaveInfo[0], DeviceInfo[Device][2], '04', maker) else: raise Exception('[run:getResponse] Invalid Maker : ' + str(maker)) elif tag_type == '2' or '3': if maker == 'JB': sPkt = sendPkt(slaveInfo[0], DeviceInfo[Device][2], '03') elif maker == 'EL': sPkt = sendPkt(slaveInfo[0], DeviceInfo[Device][2], '03') else: raise Exception('[run:getResponse] Invalid Maker : ' + str(maker)) else: raise Exception('[run:getResponse] Invalid type : ' + str(tag_type)) print('[' + maker + '] ['+Device_ID+'] Send (', str(len(sPkt)).rjust(3, ' '), ') : ', Global.byte2Hex(sPkt)) try : client.send(sPkt) sleep(0.5) rPkt = client.recv(MAX_BUF) #CSum = osCRC.cCRC(Pkt, _pMode=16, _pMaker=maker).checkCRC_Eco(rPkt) pkt = pkt_split(Global.byte2Hex(rPkt)) time = str(datetime.datetime.now()) if rPkt is not None : if maker == 'JB': df = JBmake_data(Device_ID, pkt, time, tag_type, slaveInfo[2]) elif maker == 'EL': df = ELmake_data(Device_ID, pkt, time, tag_type, slaveInfo[2]) elif maker == 'ECO' or maker == 'MI': #if len(pkt) == 9: df = ECOmake_data(key, pkt, time, slaveInfo[0][1], slaveInfo[0][2], tag_type, slaveInfo[2], slaveInfo[3]) elif maker == 'LS': df = LSmake_data(key, pkt, time, slaveInfo[0][1], slaveInfo[0][2], tag_type, slaveInfo[2], slaveInfo[3]) elif maker == 'VI': df = VImake_data(key, pkt, time, slaveInfo[0][1], slaveInfo[0][2], tag_type, slaveInfo[2], slaveInfo[3]) print(df) # tag_master 와 매칭 작업 temp_df = [] for row in df : value = row.split("||") tag_id = value[0] if tag_id in tag_lists: temp_df.append( row ) else : print('없는 태그 : ', tag_id ) df = temp_df # 매칭이 된 데이터만 카프카로 전송 #print(df) if maker == 'ECO' : if len(pkt) == 9 or len(pkt) == 14: kafka_client.sendtokafka(kafka_topic, df) #to_kafka(df) else: pass else: kafka_client.sendtokafka(kafka_topic, df) #to_kafka(df) #pass #if maker=='JB': # sleep() except Exception as ex: try : error_time = str(datetime.datetime.now()) insert_id = str(uuid.uuid1()) error_value = { 'uuid' : insert_id, 'pro_id' : 'mm_00_collect.py', 'time' : error_time, 'Device_ID' : Device_ID, 'error_name' : str(ex) } error_sql = init.my_select('SP_GET_SQL', ('sql_06' ,)) error_sql = error_sql[0][0] init.my_bulk_insert( error_sql, [list(error_value.values())] ) except : print( 'log insert error' ) sleep(0.5) except Exception as Ex: # mariaDB error insert error_time = str(datetime.datetime.now()) insert_id = str(uuid.uuid1()) error_value = { 'uuid' : insert_id, 'pro_id' : 'mm_00_collect.py', 'time' : error_time, 'Device_ID' : Device, 'error_name' : str(Ex) } error_sql = init.my_select('SP_GET_SQL', ('sql_06' ,)) error_sql = error_sql[0][0] init.my_bulk_insert( error_sql, [list(error_value.values())] ) finally: client.close()
checker = "\Users\RJ\fakepath\Checker.py" common = "\Users\RJ\fakepath\Common.py" jacquard = "\Users\RJ\fakepath\Jacquard.py" import checker import common import jacquard #Reading Input File file = sc.textFile("/Users/Rohit/Desktop/data.txt") #Filters Invalid Entries Out validEntries = file.map(lambda x: checker.checker(x)).filter(lambda x: x) #Splits Valid Entries and the mapper emits (Movie, User) to get list of users splitEntries = validEntries.map(lambda x: (x.split("\t")[1], x.split("\t")[0])).reduceByKey(lambda x,y: x + "," + y) #Cartesian Product of userlist to get combinations joinuser = splitEntries.cartesian(splitEntries) #Mapping Length of Users and Common Users for a set of Movies commonusers = joinuser.map(lambda x: (common.common(x[0][0], x[0][1], x[1][0], x[1][1]))).filter(lambda x: x) #Using Jacquaard to find correlation between two movies eq = commonusers.map(lambda ((m1,m2),l,n1,n2): ((m1, m2), jacquard.Jac(l,n1,n2))).filter(lambda x: x) #Taking Top 100 eq.sortBy(lambda ((x,y), z): z, False).take(100)
for i in range(PROMPT_LIMIT): speak("I am listening") print("I am listening") #engine.say("I am listening") # engine.runAndWait() keyword = main().recognize_speech_from_mic(recognizer, microphone) if keyword["transcription"]: break if not keyword["success"]: break print("I didn't catch that. What did you say?") print(keyword["transcription"].lower()) if "facebook" in keyword["transcription"].lower(): facebook().start(driver) elif "new tab" in keyword["transcription"].lower(): common().opentab(driver) elif "profile" in keyword["transcription"].lower(): facebook().profile(driver) elif "go back" in keyword["transcription"].lower(): common().back(driver) elif "go forward" in keyword["transcription"].lower(): common().forward(driver) elif "logout" in keyword["transcription"].lower(): facebook().logout(driver) elif "message" in keyword["transcription"].lower(): facebook().message(driver) elif "scroll down" in keyword["transcription"].lower(): common().scrolldown(driver) elif "scroll up" in keyword["transcription"].lower(): common().scrollup(driver) elif "play" in keyword["transcription"].lower(
def get_ecodata(self, p_from_date, p_to_date, interval): init = common() init.my_database_conn() conn = pymongo.MongoClient('172.17.1.34', 17001) tmp = interval db = 'FEMS_ECO_15M_DATA' BigDataCollector = conn[db] Cache = BigDataCollector['in_data'] virtual_min_value = {} tag_list = [] result_data = [] result_summary = {} cnt = 0 #유량계 계산식 rgx = re.compile('TOT-CORR-USAGE', re.IGNORECASE) print('summary') for row in Cache.find({ 'tag': rgx, 'time': { '$gte': p_from_date, '$lt': p_to_date } }).sort([("time", pymongo.ASCENDING)]): if row['tag'] not in result_summary: result_summary[row['tag']] = { 'tag': row['tag'], 'min': row['val'], 'value': row['val'], 'acc_value': row['val'], 'from_date': p_from_date, 'to_date': p_to_date, 'kind': row['kind'], 'factory_id': row['factory_id'] } tag_list.append(row['tag']) else: #min result_summary[row['tag']]['min'] = self.min_return( result_summary[row['tag']]['min'], row['val']) #max result_summary[row['tag']]['acc_value'] = self.max_return( result_summary[row['tag']]['acc_value'], row['val']) format_date = p_to_date[:10].replace('-', '') tmp_date = p_to_date[11:17].replace(':', '') format_date = format_date + tmp_date #전력량계(ECO) 계산식 rgx0 = re.compile('_Total Active Energy.*', re.IGNORECASE) for row in Cache.find({ 'tag': rgx0, 'time': { '$gte': p_from_date, '$lt': p_to_date } }).sort([("time", pymongo.ASCENDING)]): if row['tag'] not in result_summary: result_summary[row['tag']] = { 'tag': row['tag'], 'min': row['val'], 'value': row['val'], 'acc_value': row['val'], 'from_date': p_from_date, 'to_date': p_to_date, 'kind': row['kind'], 'factory_id': row['factory_id'] } tag_list.append(row['tag']) else: #min #f float(row['val']) > 10.0: result_summary[row['tag']]['min'] = self.min_return( result_summary[row['tag']]['min'], row['val']) #max result_summary[row['tag']]['acc_value'] = self.max_return( result_summary[row['tag']]['acc_value'], row['val']) try: cx_con = cx_Oracle.connect(init.string) cur = cx_con.cursor() for var in tag_list: code = 'E00002' if result_summary[var]['kind'] == '1': code = 'E00001' elif result_summary[var]['kind'] == '3': code = 'E00003' else: pass result_summary[var]['value'] = float( result_summary[var]['acc_value']) - float( result_summary[var]['min']) val = float(result_summary[var]['value']) query = """select FN_ENG_CONVERSION('MJ', '{0}', '{1}', '{2}') AS "MJ", FN_ENG_CONVERSION('TOE', '{0}', '{1}', '{2}') AS "TOE", FN_ENG_CONVERSION('GHG', '{0}', '{1}', '{2}') AS "TCO2", FN_COST_CONVERSION('COST', '{0}', '{1}', '{2}', '{2}') AS "COST" FROM DUAL""".format( code, format_date, val) cur.execute(query) res = cur.fetchall() result_summary[var]['tj'] = res[0][0] result_summary[var]['toe'] = res[0][1] result_summary[var]['tco2'] = res[0][2] result_summary[var]['cost'] = res[0][3] result_summary[var]['lod_tmzon'] = 'L' cur.close() cx_con.close() except: result = "error : " #print(tag_list) #print(result_summary) try: tag_list.remove('E00018_Total Active Energy High') except: pass try: tag_list.remove('E00018_Total Active Energy Low') except: pass try: tag_list.remove('E00019_Total Active Energy High') except: pass try: tag_list.remove('E00019_Total Active Energy Low') except: pass #exit() if len(result_summary) == 0: pass else: exec_log = BigDataCollector['exec_log'] exec_log.delete_many({}) exec_log.insert_one({'cur_date': p_to_date}) Cache.remove({'time': {'$gte': p_from_date, '$lte': p_to_date}}) return result_summary, tag_list
#GPU関連 def change_gpu(model, train_data_variable, train_label_variable): model.to_gpu() train_data_variable.to_gpu() train_label_variable.to_gpu() return model, train_data_variable, train_label_variable def change_cpu(model, train_data_variable, train_label_variable): model.to_cpu() train_data_variable.to_cpu() train_label_variable.to_cpu() return model, train_data_variable, train_label_variable inst_common = common.common() x, y = read_csv(inst_common.TEACHER_DATA_NAME) #教師データとテストデータに別ける train_data, test_data, train_label, test_label = model_selection.train_test_split( x, y, shuffle=True) model = nn.NN(inst_common.MID_LAYER_NUM, inst_common.OUTPUT_NUM) optimizer = optimizers.Adam() optimizer.setup(model) train_data_variable = Variable(train_data.astype(np.float32)) train_label_variable = Variable(train_label.astype(np.int32)) if inst_common.IS_GPU: model, train_data_variable, train_label_variable = change_gpu( model, train_data_variable, train_label_variable)
from common import common import variables import TestData common = common() def test_login_via_admin(): common.login(variables.objectRepo['admin_user'], variables.objectRepo['admin_password'], admin=True) common.logout() def test_verify_create_user(): common.login(variables.objectRepo['admin_user'], variables.objectRepo['admin_password'], admin=True) common.addUser(TestData.new_user["User_Id"], TestData.new_user["Password"], TestData.new_user["First_Name"], TestData.new_user["Last_Name"]) common.logout() def test_verify_login_via_user(): common.login(variables.objectRepo['admin_user'], variables.objectRepo['admin_password'], admin=True) common.addUser(TestData.new_user["User_Id"], TestData.new_user["Password"], TestData.new_user["First_Name"], TestData.new_user["Last_Name"])
def get_exdata(self, p_from_date, p_to_date, sp_parm, interval): #p_from_date = '2020-01-09 14:00:00' #p_to_date = '2020-01-09 15:00:00' init = common() init.my_database_conn() if sp_parm == '2': var = '5' elif sp_parm == '3': var = '2' from_date = p_from_date[:10] from_date = from_date + ' 06:00:00' col_info = init.my_table_select('SP_GET_SUM_INTERVAL', 'MongoCon001', var) colNm = col_info[4] # 저장서버 MongoDB 저장 mongo_addr = init.my_table_select('SP_GET_SAVE_INFO', 'MongoCon001', '') mongo_ip = mongo_addr[1] mongo_port = int(mongo_addr[2]) con = pymongo.MongoClient(mongo_ip, mongo_port) db = con["SEAH_FEMS"] col = db[colNm] tag_list = [] result_summary = {} print(colNm) print(p_from_date, p_to_date) for row in col.find({ 'from_date': { '$gte': p_from_date, '$lt': p_to_date } }).sort([("from_date", pymongo.ASCENDING)]): if row['tag'] not in result_summary: result_summary[row['tag']] = { 'tag': row['tag'], 'value': row['acc_value'], 'min': row['acc_value'], 'max': row['acc_value'], 'from_date': p_from_date, 'to_date': p_to_date, 'kind': row['kind'], 'factory_id': row['factory_id'] } tag_list.append(row['tag']) else: result_summary[row['tag']]['min'] = self.min_return( result_summary[row['tag']]['min'], row['acc_value']) #max result_summary[row['tag']]['max'] = self.max_return( result_summary[row['tag']]['max'], row['acc_value']) format_date = p_to_date[:10].replace('-', '') tmp_date = p_to_date[11:17].replace(':', '') format_date = format_date + tmp_date try: cx_con = cx_Oracle.connect(init.string) cur = cx_con.cursor() for var in tag_list: code = 'E00002' if result_summary[var]['kind'] == '1': code = 'E00001' elif result_summary[var]['kind'] == '3': code = 'E00003' else: pass result_summary[var]['value'] = float( result_summary[var]['max']) - float( result_summary[var]['min']) result_summary[var]['acc_value'] = result_summary[var]['max'] val = float(result_summary[var]['value']) query = """select FN_ENG_CONVERSION('MJ', '{0}', '{1}', '{2}') AS "MJ", FN_ENG_CONVERSION('TOE', '{0}', '{1}', '{2}') AS "TOE", FN_ENG_CONVERSION('GHG', '{0}', '{1}', '{2}') AS "TCO2", FN_COST_CONVERSION('COST', '{0}', '{1}', '{2}', '{2}') AS "COST" FROM DUAL""".format( code, format_date, val) cur.execute(query) res = cur.fetchall() result_summary[var]['tj'] = res[0][0] result_summary[var]['toe'] = res[0][1] result_summary[var]['tco2'] = res[0][2] result_summary[var]['cost'] = res[0][3] cur.close() cx_con.close() except: result = "error : " #tag_list.remove('E00022_Total Active Energy') print(result_summary) if len(result_summary) == 0: pass else: lconn = pymongo.MongoClient('172.17.1.34', 17001) db = 'FEMS_ECO_15M_DATA' BigDataCollector = lconn[db] if sp_parm == '2': exec_log = BigDataCollector['h_exec_log'] elif sp_parm == '3': exec_log = BigDataCollector['m_exec_log'] exec_log.delete_many({}) exec_log.insert_one({'cur_date': p_to_date}) return result_summary, tag_list
p_time, p_collection_name, p_chk_split): # tag, collection_list(db, collection name, yyyymmdd), tmp_day, tmp_time = p_time.split(' ') yyyy, mm, dd = tmp_day.split('-') if p_chk_split == "yyyy": return yyyy elif p_chk_split == "mm": return "{0}_{1}{2}".format(p_collection_name, yyyy, mm) elif p_chk_split == "dd": return "{0}_{1}{2}{3}".format(p_collection_name, yyyy, mm, dd) else: return None if __name__ == "__main__": init = common() init.my_database_conn() # Kafka 접속 정보 DB = "" COLLECTION = "" kafka_infos = init.my_table_select('SP_GET_CONNECT_INFO', 'KafkaCon001', '') kafka_ip = kafka_infos[1] kafka_port = kafka_infos[2] kafka_topic = target_col consumer = kafka_connection(kafka_ip, kafka_port, kafka_topic) if consumer == False: exit()
# sra_id, library, enzyme_name more_params = [[ genome, dataset, line[0], line[1], line[2], resolution, tmp_dir, data_dir, expt_name, False, windows1, windows2 ] for resolution in resolutions] less_params = [ genome, dataset, line[0], line[1], line[2], 1000, tmp_dir, data_dir, expt_name, False, windows1, windows2 ] more_loading_list += more_params less_loading_list += less_params print more_loading_list cf = common() # Get the assembly genome_fa = cf.getGenomeFile(data_dir, species, assembly) hic = process_hic() # Downloads the FastQ files and then maps then to the genome. map(hic.main, less_loading_list) # Generates the final adjacency matrix for a given resolutions hic.merge_adjacency_data(more_loading_list) # This merges the final set of HDF5 files into a single file ready for the # REST API hic.merge_hdf5_files(genome, dataset, resolutions, data_dir)
checker = "\Users\RJ\fakepath\Checker.py" common = "\Users\RJ\fakepath\Common.py" jacquard = "\Users\RJ\fakepath\Jacquard.py" import checker import common import jacquard #Reading Input File file = sc.textFile("/Users/Rohit/Desktop/data.txt") #Filters Invalid Entries Out validEntries = file.map(lambda x: checker.checker(x)).filter(lambda x: x) #Splits Valid Entries and the mapper emits (Movie, User) to get list of users splitEntries = validEntries.map(lambda x: (x.split("\t")[1], x.split("\t")[0]) ).reduceByKey(lambda x, y: x + "," + y) #Cartesian Product of userlist to get combinations joinuser = splitEntries.cartesian(splitEntries) #Mapping Length of Users and Common Users for a set of Movies commonusers = joinuser.map(lambda x: (common.common(x[0][0], x[0][1], x[1][ 0], x[1][1]))).filter(lambda x: x) #Using Jacquaard to find correlation between two movies eq = commonusers.map(lambda ((m1, m2), l, n1, n2): ((m1, m2), jacquard.Jac(l, n1, n2))).filter(lambda x: x) #Taking Top 100 eq.sortBy(lambda ((x, y), z): z, False).take(100)
def coreOperation(ans): #创建模型文件夹 modelPath = constants.modelPath c = common() c.mkdir(modelPath) ############################################################## #一层LSTM time_step1 = 15 scope_name1 = modelPath + constants.scope_name + '_1' file_name1 = modelPath + constants.file_name.replace('.', '_1.') maxval1, minval1, train1 = normData(ans) data1 = processByLSTM(train1, scope_name1, time_step1, file_name1) ############################################################# #二层LSTM time_step2 = 30 scope_name2 = modelPath + constants.scope_name + '_2' file_name2 = modelPath + constants.file_name.replace('.', '_2.') res2 = np.array(data1)[:, -1, :] res2 = np.squeeze(res2) train2 = [] for i in range(len(res2)): train2.append((res2[i] * (maxval1 - minval1) + minval1)) maxval2, minval2, train2 = normData(train2) data2 = processByLSTM(train2, scope_name2, time_step2, file_name2) ###################################################### #三层LSTM time_step3 = 90 scope_name3 = modelPath + constants.scope_name + '_3' file_name3 = modelPath + constants.file_name.replace('.', '_3.') res3 = np.array(data2)[:, -1, :] res3 = np.squeeze(res3) train3 = [] for i in range(len(res3)): train3.append((res3[i] * (maxval2 - minval2) + minval2)) maxval3, minval3, train3 = normData(train3) data3 = processByLSTM(train3, scope_name3, time_step3, file_name3) ################################################################## #整合计算误差平方和 res2 = res2[time_step2 + time_step3 + 2:] res2 = np.reshape(res2, (-1)) res2 = res2 * (maxval1 - minval1) + minval1 res3 = res3[time_step3 + 1:] res3 = np.reshape(res3, (-1)) res3 = (res3 * (maxval2 - minval2) + minval2) pred = np.array(data3)[:, -1, :] pred = np.squeeze(pred) pred = pred * (maxval3 - minval3) + minval3 plt.figure(1) plt.plot(ans[time_step1 + time_step2 + time_step3 + 3:], 'r') plt.plot(res2, 'b') plt.figure(2) plt.plot(ans[time_step1 + time_step2 + time_step3 + 3:], 'r') plt.plot(res3, 'g') plt.figure(3) plt.plot(ans[time_step1 + time_step2 + time_step3 + 3:], 'r') plt.plot(pred, 'brown') plt.show() error1 = computeError(ans[time_step1 + time_step2 + time_step3 + 3:], res2) error2 = computeError(res2, res3) error3 = computeError(res3, pred) error1 = computerProduct(error1, error1) error2 = computerProduct(error2, error2) error3 = computerProduct(error3, error3) return error1, error2, error3
def get_data(self, p_from_date, p_to_date, interval): init = common() conn = pymongo.MongoClient('172.17.1.34', 17001) interval = 'FEMS_' + interval + '_DATA' BigDataCollector = conn[interval] Cache = BigDataCollector['in_data'] virtual_min_value = {} tag_list = [] result_data = [] result_summary = {} cnt = 0 print('summary') for row in Cache.find({ 'time': { '$gte': p_from_date, '$lte': p_to_date } }).sort([("time", pymongo.ASCENDING)]): if row['tag'] not in result_summary: result_summary[row['tag']] = { 'tag': row['tag'], 'min': row['val'], 'max': row['val'], 'sum': float(row['val']), 'ave': row['val'], 'cnt': 1, 'from_date': p_from_date, 'to_date': p_to_date } tag_list.append(row['tag']) else: #min result_summary[row['tag']]['min'] = self.min_return( result_summary[row['tag']]['min'], row['val']) #max result_summary[row['tag']]['max'] = self.max_return( result_summary[row['tag']]['max'], row['val']) #sume result_summary[row['tag']]['sum'] = self.sum_return( result_summary[row['tag']]['sum'], row['val']) result_summary[row['tag']]['cnt'] += 1 #w_time = self.wight_time( virtual_min_value[row['tag']]['time'], row['time'] ) #w_val = float(virtual_min_value[row['tag']]['val']) * w_time #virtual_min_value[row['tag']]['w_val'] = w_val for value in tag_list: result_summary[value]['ave'] = round( result_summary[value]['sum'] / result_summary[value]['cnt'], 3) if len(result_summary) == 0: pass else: exec_log = BigDataCollector['exec_log'] #print(p_to_date) #exit() exec_log.delete_many({}) exec_log.insert_one({'cur_date': p_to_date}) Cache.remove({'time': {'$gte': p_from_date, '$lte': p_to_date}}) return result_summary, tag_list
def __init__(self, parent = None): QtCore.QThread.__init__(self, parent) self.exiting = False self.lib = common.common() self.templates = templates.templates()
def check(test): A, staff_sol = test student_sol = common(A) return staff_sol == student_sol
def __init__(self, ui): self.table = ui.ext_tableView_extResultList self.cm = common(ui)
def __init__(self): self.ytd = common(spo=True)