def exportAction(self, event): """Export everything in the table to a file.""" lastDir = "" try: # load the last used directory # this will probably change as we will use a base64 encoded json as the complete config? lastDir = self.callbacks.loadExtensionSetting("lastDir") except: # if there is not a last used directory in the settings, continue pass from Utils import saveFileDialog, writeFile selectedFile, usedDirectory = saveFileDialog(parent=self.panel, startingDir=lastDir, title="Export Issues", extension="json") if selectedFile is not None: # write to the file writeFile(selectedFile.getAbsolutePath(), self.tableIssue.exportIssues()) if usedDirectory is not None: # overwrite the last used directory self.callbacks.saveExtensionSetting("lastDir", usedDirectory)
def randWeightInitial(Num, FilePath): if os.path.exists(FilePath): os.remove(FilePath) W = dict() for i in range(Num): W[i + 1] = np.mat(np.diag(np.random.randint(1, 10, i + 1))) writeFile(W, FilePath) return W
def Operation(self, file_path=None): if self.Encoder: if file_path is not None and os.path.exists(file_path): self.ASCII_Dict = loadFile(file_path) ASCII_List = self._ASCIIEncoder(self.PW) HEX_STR = '' for ASCII_Num in ASCII_List: HEX_STR += self._HexEncoder(ASCII_Num) + '-' DES_STR = self._DESEncoder(HEX_STR) DES_STR = base64.b64encode(DES_STR).decode() while True: random_id = random.randint(1000, 9999) if str(random_id) not in self.ASCII_Dict.keys(): random_id = str(random_id) break self.ASCII_Dict[random_id] = (DES_STR[:-self.K]) writeFile(self.ASCII_Dict, file_path) ASCII_STR = DES_STR[-self.K:] X = np.array([self._ASCIIEncoder(ASCII_STR)]) Y = self._MatrixEncoder(X, self.W[self.K]) return self._GenerateGDMM(Y, random_id) else: '''Do Something...''' X = self._MatrixDecoder(self.Y, self.W[self.K].I) X = X.tolist()[0] X = [int(i) for i in X] ASCII_STR = self._ASCIIDecoder(X) ASCII_Dict = loadFile(file_path) DES_STR = ASCII_Dict[self.ID] + ASCII_STR DES_STR = base64.b64decode(DES_STR) HEX_STR = self._DESDecoder(DES_STR) HEX_STR_List = HEX_STR.split('-')[:-1] ASCII_List = [] for hex_str in HEX_STR_List: ASCII_List.append(self._HexDecoder(hex_str)) return self._ASCIIDecoder(ASCII_List)
def main(gffPath, gplexPath, nalPath, minCov=0.5, maxDist=0): """Generate a GFF file of genes that overlap at least one gplex and at least one non-alignment. :param gffPath: path to the GFF3-formatted gene annotation file :param gplexPath: path to the GFF3-formatted gplex file :param nalPath: path to the GFF3-formatted non-alignment file :param minCov: minimum overlap required of non-aligned region (default=0.5) :param maxDist: max number of base pairs separating a gplex and gene (default=0) :return: filters the three inputted files for entries that overlap each other into separate files """ import os from Utils import load, writeFile print('\nGenerating gene overlaps...') print('Loading files...') gplexData = load(gplexPath)[2] nalsData = load(nalPath)[2] tempGeneData = load(gffPath) headers = tempGeneData[0] + tempGeneData[1] geneData = [] for line in tempGeneData[2]: if line[2] == 'gene': geneData.append(line) # Finds overlaps for each ORF print('Calculating overlaps...') genes = [] nals = [] gplexes = [] l = len(geneData) for i, gene in enumerate(geneData): print('\tCalculating ' + str(i + 1) + ' of ' + str(l) + '...') tempNals = [] tempGplexes = [] sumCov = 0 gStart = int(gene[3]) gEnd = int(gene[4]) # Iterates over each non-alignment for nline in nalsData: if nline[0] == gene[0]: start = max(gStart, int(nline[3])) end = min(gEnd, int(nline[4])) if end > start: tempNals.append(nline) sumCov += (end - start) # Iterates over each gplex; gplex is included if its distance to the orf doesn't exceed 'maxDist' for gplex in gplexData: if gplex[0] == gene[0]: start = max(gStart, int(gplex[3])) end = min(gEnd, int(gplex[4])) if (start - end) <= int(maxDist): tempGplexes.append(gplex) # If coverage is at least 'minCov' and there exists at least one gplex, add to data if (sumCov / (gEnd - gStart) > float(minCov)) and (len(tempGplexes) > 0): genes.append(gene) nals.extend(tempNals) gplexes.extend(tempGplexes) # Write everything print('Writing to output files...') output = os.path.dirname(gffPath) + '/overlaps/' writeFile(output + 'genes.gff', headers, genes) writeFile(output + 'nals.gff', headers, nals) writeFile(output + 'gplexes.gff', headers, gplexes) print('Finished writing output to ' + output + '\nFinished!')
def getCorr(): """ 根据用户选择的振动参数做相关性分析 :return: """ vibName = request.args.get("vibName") progress_data['getCorr'] = 0 # 将进度条置零 # 判断所选择的振动参数是否做过相关性分析 if vibName + "_FP_col" in os.listdir( "Savedfile") and vibName + "_FP_corr" in os.listdir("Savedfile"): vib_FP_highCorr_FP = loadFile('Savedfile/' + vibName + '_FP_corr') vib_FP_highCol_FP = loadFile('Savedfile/' + vibName + '_FP_col') progress_data['getCorr'] = float(100) # 进度条直接到100% else: repeat = [] corr_list = [] # 初始化相关性list col_list = [] # 初始化变量名list zeroCor_191207 = loadFile("Savedfile/zeroCor_191207") vibFolder = baseURL + findKey(VIBParamsTrain, vibName)[0] # 查找需要做相关性分析的参数所在文件路径 vibData, vibTime = readData(vibFolder, vibName) # 读取振动数据及相应时间 tempFPParams = FPParamsTrain # 先遍历一次,统计飞参个数 FpNumber = 0 for file in FPParamsTrain.keys(): FpNumber += len(FPParamsTrain[file]) i = 1 # 初始化计数器 for file in tempFPParams.keys(): df = pd.read_table(baseURL + file, sep='\t') time_i = timeProcess(df['TIME'].tolist()) for FPParam in tempFPParams[file]: if (FPParam in zeroCor_191207): # 如果某个变量在zeroCor_191207中 print(FPParam + " Break!") # 告知并跳过该变量的计算 elif ("Accel" in FPParam): # 如果某个变量包括"Accel"在变量名称中,该变量为加速度传感器数据 print(FPParam + " Break!") # 告知并跳过该变量的计算 elif (FPParam in repeat): # 如果有重复变量且已经做过相关性分析 print(FPParam + " Break!") # 告知并跳过该变量的计算 else: files = findKey(FPParamsTrain, FPParam) # 如果有重复的飞参,则进行拼接 if len(files) > 1: repeat.append(FPParam) # 记录处理过的重复飞参 jCol = df[FPParam].fillna(0, inplace=True).tolist() jCol_matched = matchData(vibTime, time_i, jCol) # 将飞参数据与振动数据时间轴匹配 corrJ = scs.pearsonr(jCol_matched, vibData)[0] # 计算该飞参数据与振动响应数据的相关性 corr_list.append(corrJ) # 将计算的相关性加入corr_list中 col_list.append(FPParam) # 将变量名称加入col_list print("Correlation between " + FPParam + " and " + str(vibName) + " is " + str(corrJ)) # 告知变量名与相关性 num_progress = round(i * 100 / FpNumber, 2) i += 1 progress_data['getCorr'] = num_progress # 更新进度条 print(" Fetching complete!") # 告知飞参变量的读取完毕 # 按绝对值从大到小进行排序 corr = np.array(corr_list) col = np.array(col_list) vib_FP_highCorr_FP = corr[np.argsort(np.abs(corr))[::-1]].tolist() vib_FP_highCol_FP = col[np.argsort(np.abs(corr))[::-1]].tolist() # 按绝对值从大到小进行保存 writeFile("Savedfile/" + vibName + "_FP_corr", vib_FP_highCorr_FP) writeFile("Savedfile/" + vibName + "_FP_col", vib_FP_highCol_FP) result = {"corr_list": vib_FP_highCorr_FP, "col_list": vib_FP_highCol_FP} return Response(json.dumps(result), mimetype='application/json')