def detectBuild(pathname,dictInfo): """Detect build by root passwd 'root'""" shadowPath = pathJoin(pathname,'/etc/shadow') if r"root:$1$JMvNh5xg$VnV1DyJdTcwuZ0hp5YiJG0:14349:0:::::" in \ readFile(shadowPath): dictInfo['type'] = ' assemble' elif path.exists(pathJoin(pathname,"delta")) and \ path.exists(pathJoin(pathname,"workspace")): dictInfo['type'] = " builder" issue = readFile(pathJoin(pathname,'etc/gentoo-release')) if "Server" in issue: if "Scratch" in issue: dictInfo['name'] = "CSS" else: dictInfo['name'] = "CDS" elif "Desktop" in issue: if "XFCE" in issue: dictInfo['name'] = "CLDX" elif "KDE" in issue: dictInfo['name'] = "CLD" elif "GNOME" in issue: dictInfo['name'] = "CLDG" elif "Scratch" in issue: dictInfo['name'] = "CLS" else: dictInfo['type'] = '' return dictInfo
def getOperState(iface): """ Get interface state up or down """ if readFile("/sys/class/net/%s/operstate"%iface) == "down": return "down" return "up"
def makeFrequencyDictionary(): dictionary = {} for path in os.listdir("english"): words = files.readFile("english" + os.sep + path) words = words.splitlines() for word in words: dictionary[word] = dictionary.get(word, 0) + 1 return dictionary
def concatenatefiles(country, dictsizes, xgram, source): for dictsize in dictsizes: ngrams = files.readFile("subtitles/%stxts/dictsizes/%dgram/%s%s%dgramdict%d.txt" % (country, xgram, source if source=="lowercase" else "", country, xgram, dictsize)) for filename in os.listdir("subtitles/%stxts/%s" % (country, source)): print "concatenating %s %dgram %d with %s" % (source, xgram, dictsize, filename) content = files.readFile(os.path.join("subtitles/%stxts/%s" % (country, source),filename)) concatenated = ngrams + "\n\n" + content dest = ("subtitles/%stxts/dictsizes/%dgram/%sunzipped/%sdict%d" % (country, xgram, source if source=="lowercase" else "", country, dictsize)) if not os.path.exists(dest): os.mkdir(dest) files.writeFile(os.path.join(dest, filename), concatenated) return
def getPkgUses(fullpkg): """Get USE and IUSE from package""" category, slash, pkg = fullpkg.partition("/") pkgCategory = "/var/db/pkg/{0}".format(category) packages = filter( lambda x: x["PN"] == pkg, map(reVerSplitToPV, filter(lambda x: x, map(lambda x: reVerSplit.search(x), listDirectory(pkgCategory)))), ) if not packages: return None usePath = path.join(pkgCategory, packages[-1]["PF"], "USE") iusePath = path.join(pkgCategory, packages[-1]["PF"], "IUSE") iuse = readFile(iusePath).strip().split() use = readFile(usePath).strip().split() return ( map(lambda x: x[1:] if x.startswith("+") else x, filter(lambda x: x, use)), map(lambda x: x[1:] if x.startswith("+") else x, filter(lambda x: x, iuse)), )
def concatenatefiles(country, dictsizes, xgram, source): for dictsize in dictsizes: ngrams = files.readFile( "subtitles/%stxts/dictsizes/%dgram/%s%s%dgramdict%d.txt" % (country, xgram, source if source == "lowercase" else "", country, xgram, dictsize)) for filename in os.listdir("subtitles/%stxts/%s" % (country, source)): print "concatenating %s %dgram %d with %s" % (source, xgram, dictsize, filename) content = files.readFile( os.path.join("subtitles/%stxts/%s" % (country, source), filename)) concatenated = ngrams + "\n\n" + content dest = ("subtitles/%stxts/dictsizes/%dgram/%sunzipped/%sdict%d" % (country, xgram, source if source == "lowercase" else "", country, dictsize)) if not os.path.exists(dest): os.mkdir(dest) files.writeFile(os.path.join(dest, filename), concatenated) return
def populateWordDict(wordDict=None, write=True): '''this populates the wordDict from wordDict.py with the counts of all google's input''' if wordDict is None: wordDict = getWordObjectDictionary("dict.p") print "getting the ngrams" ngrams = files.readFile("all.grams") # all.grams is the file I got # from parsing all of google's input ngrams = ngrams.splitlines() populateWordDictHelper(ngrams, wordDict) populatePossesives(wordDict) print "wordDict populated" if write: files.writePickle("populatedDict.p", wordDict) return wordDict
def makedict(country, n, lengths): print ("loading ngrams from subtitles/%stxts/lowercase%s%dgrams.txt" % (country, country , n)) ngrams = pickle.loads(files.readFile("subtitles/%stxts/lowercase%s%dgrams.txt" % (country, country, n))) #find the frequencies of all the ngrams print "determining frequency..." fdist = nltk.FreqDist(ngrams) for length in lengths: #make a plaintext dictionary for each length in lengths top = fdist.keys()[:length] topstr = plaintext(top, n) #save each dictionary print ("saving to subtitles/%stxts/dictsizes/%dgram/lowercase%s%dgramdict%d.txt" % (country, n, country, n, length)) files.writeFile("subtitles/%stxts/dictsizes/%dgram/lowercase%s%dgramdict%d.txt" % (country, n, country, n, length), topstr) return
def makedict(country, n, lengths): print("loading ngrams from subtitles/%stxts/lowercase%s%dgrams.txt" % (country, country, n)) ngrams = pickle.loads( files.readFile("subtitles/%stxts/lowercase%s%dgrams.txt" % (country, country, n))) #find the frequencies of all the ngrams print "determining frequency..." fdist = nltk.FreqDist(ngrams) for length in lengths: #make a plaintext dictionary for each length in lengths top = fdist.keys()[:length] topstr = plaintext(top, n) #save each dictionary print( "saving to subtitles/%stxts/dictsizes/%dgram/lowercase%s%dgramdict%d.txt" % (country, n, country, n, length)) files.writeFile( "subtitles/%stxts/dictsizes/%dgram/lowercase%s%dgramdict%d.txt" % (country, n, country, n, length), topstr) return
def add2grams(wordDict=None, write=True): if wordDict is None: wordDict = files.readPickle("./populatedDict.p") for text in getBooks(): name = text[text.rfind(os.sep) + 1:] print "reading book: %s" % name book = files.readFile(text) print "generating the list of words" words = generateWordList(book) count = 0 for i in xrange(2, len(words) - 2): if words[i] not in wordDict: continue wordDict[words[i]].addWordAfter(words[i + 2], second=True) wordDict[words[i]].addWordAfter(words[i + 1], second=False) wordDict[words[i]].addWordBefore(words[i - 2], second=True) wordDict[words[i]].addWordBefore(words[i - 1], second=False) if count % 10000: print "%d words read" % count if write: files.writePickle("2gramDict.p", wordDict) return wordDict
values["top"] = rows[0][2] if rowSlices[-1][0] != "": values["bottom"] = rows[-1][2] if columnSlices[0][0] != "": values["left"] = columns[0][2] if columnSlices[-1][0] != "": values["right"] = columns[-1][2] template = "fullbox" if len(rows) == 1: template = "horizontalbox" elif len(columns) == 1: template = "verticalbox" scriptPath = os.path.dirname(os.path.realpath(sys.argv[0])) htmlTestTemplate = readFile(scriptPath+"/templates/testbox.html") htmlTemplate = readFile(scriptPath+"/templates/"+template+".html") scssTemplate = readFile(scriptPath+"/templates/"+template+".scss") html = pystache.render(htmlTemplate, values) scss = pystache.render(scssTemplate, values) htmlTest = pystache.render(htmlTestTemplate, {"name": boxName, "html": html}) writeFile(filename+"-test.html", htmlTest) writeFile(filename+".html", html) writeFile(filename+".scss", scss) print "Creating CSS file from SCSS..." with open(filename+".css", "w") as file: subprocess.call(["sass", filename+".scss"], stdout=file)
def on_go_button_clicked(self, widget): filex = self.filex_entry.get_text() filey = self.filey_entry.get_text() self.log_textbuffer.set_text("") error = False self.write_in_log("Iniciando\n", 1) self.write_in_log("Leyendo archivo de x's\n", 2) if files.verifyFile(filex): xstrings = files.readFile(filex) xs = self.verify_lists(xstrings) if xs == NULL: self.write_in_log("\tNo se encontro archivo de x's\n", 0) error = True else: self.write_in_log("\tValores de x: " + str(xs) + "\n", 0) else: self.write_in_log("\tNo se encontro archivo de x's\n", 0) error = True self.write_in_log("Leyendo archivo de y's\n", 2) if files.verifyFile(filey): ystrings = files.readFile(filey) ys = self.verify_lists(ystrings) if ys == NULL: self.write_in_log("\tNo se encontro archivo de x's\n", 0) error = True else: self.write_in_log("\tValores de y: " + str(ys) + "\n", 0) else: self.write_in_log("\tNo se encontro archivo de y's\n", 0) error = True if xs != NULL and ys != NULL: self.write_in_log("Validando corcordancia de datos (x's y y's)\n", 2) if len(xs) == len(ys): self.write_in_log("Los datos concuerdan\n", 0) else: self.write_in_log("Los datos no concuerdan\n", 1) error = True self.write_in_log("Validando alfa\n", 2) try: alpha = float(self.alpha_entry.get_text()) self.write_in_log("\tAlfa: " + repr(alpha) + "\n", 0) except ValueError: self.write_in_log("\tAlfa no valido\n", 0) self.write_in_log("Validando numero de iteraciones\n", 2) try: iterations = int(self.iterations_entry.get_text()) self.write_in_log("\tNumero de iteraciones: " + repr(iterations) + "\n", 0) except ValueError: self.write_in_log("\tNumero de iteraciones no valido\n", 0) self.write_in_log("Validando tolerancia\n", 2) try: tolerance = float(self.tolerance_entry.get_text()) self.write_in_log("\tTolerancia: " + repr(tolerance) + "\n", 0) except ValueError: self.write_in_log("\tToleracia no valida\n", 0) if not error: self.write_in_log("Parametros validos\n", 2) self.write_in_log("Iniciando algoritmo\n", 1) thetas = gradient_descent(xs, ys, alpha, tolerance, iterations) self.write_in_log("Valores teta: " + str(thetas) + "\n", 0) self.write_in_log("Finalizacion exitosa\n", 1) else: self.write_in_log("Parametros no validos\n", 2) self.write_in_log("Finalizacion prematura\n", 1)
pass finally: os.unlink(tmpFile.name) p.kill() if identifier: dirName = os.path.dirname(targetFilename) if dirName == "": dirName = "." cropped.save(dirName+"/"+name+extension) namedFiles.append((identifier,name+extension,area)) css = "" innerhtml = "" scriptPath = os.path.dirname(os.path.realpath(sys.argv[0])) cssTemplate = readFile(scriptPath+"/templates/imageArea.css") htmlTemplate = readFile(scriptPath+"/templates/testbox.html") for (identifier,filename,area) in namedFiles: css += pystache.render(cssTemplate, { "identifier": identifier, "filename": filename, "left": area.bounds[0], "top": area.bounds[1], "width": area.bounds[2]-area.bounds[0], "height": area.bounds[3]-area.bounds[1], "right": imageWidth-area.bounds[2], "bottom": imageHeight-area.bounds[3] }) if identifier[0] == '.': innerhtml += "<div class='"+identifier[1:]+"'></div>\n"
def getPkgSlot(pkg, prefix="/"): """Get package slot""" pkgs = isPkgInstalled(pkg, prefix) pkgDir = path.join(prefix, "var/db/pkg") return map(lambda x: readFile(path.join(pkgDir, x["CATEGORY"], x["PF"], "SLOT")).strip(), pkgs)
pivo = matriz[j][i] matriz = changeRows(matriz, i, j) B = changeRows(B, i, j) pivoting(matriz, i) # Atualiza as matrizes U e L for coluna in range(0, N): for linha in range(coluna + 1, N): L[linha][coluna] = matriz[linha][coluna] # Matriz L matriz[linha][coluna] = 0 # Matriz U return matriz, L, B matriz, rows, columns = readFile() # Matriz lida do arquivo texto print("\nMatriz A") imprimeMatriz(matriz) # Matriz identidade que B = matrizIdentidade(rows) # Matrizes L, U e B após a fatoração LU da matriz A U, L, B = gauss(matriz, B) print("----------------------------\n") print("Matriz L") imprimeMatriz(U)
def getNgramList(letter): directory = os.listdir(".") for ngram in directory: if isNgramFile(ngram) and letter == parseLetterNgramFile(ngram): return files.readFile(ngram).splitlines() return []
def generateWordList(size=5, write=False): wordList = files.readFile("english/%d0.txt" % size).splitlines() if write: files.Pickle("pickle/wordlist%d.p" % size, content=wordList).write() return wordList