def gradient_descent(xs, ys, alpha, tolerance, iterations): m = len(xs) if m == len(ys): for row in xs: row.insert(0,1) ranges = get_ranges(xs) r = len(xs[0]) thetas = [None] * r list_cost_functions = [] cost_function = NULL for n in range(0, r): thetas[n] = random.uniform(-1, 1) for n in range(0, iterations): new_cost_function = 0 for j in range(0, r): theta = 0 for i in range(0, m): sumh = 0 for h in range(0, r): sumh += xs[i][h] * thetas[h] theta += ((sumh - ys[i][0]) * xs[i][j]) theta = alpha * theta / m thetas[j] = thetas[j] - theta new_cost_function = get_cost_function(thetas, xs, ys) list_cost_functions.append(str(new_cost_function)) if cost_function != NULL and abs(cost_function - new_cost_function) <= tolerance: break cost_function = new_cost_function; files.writeFile("costFunction", list_cost_functions) return thetas return NULL
def saveValue(self,name,value): ''' Save a value with the name name and the value value. ''' debug.add('saving value '+str(name),value) # create a file assocation for the name to store the value if name not in self.names: debug.add('name not in self.names') # create a counter for the generated filename counter=0 # seed value for while loop newName = (str(counter)+'.value') # find a filename that does not already exist in # the database directory while newName in listdir(self.path): # increment the counter counter+=1 # iterate the value newName=(str(counter)+'.value') debug.add('newname',newName) # set the metadata value for the filepaths in this table instance self.namePaths[name]=pathJoin(self.path,newName) # write the newly created name assocation to table metadata on disk writeFile(pathJoin(self.path,'names.table'),pickle(self.namePaths)) debug.add('namePaths',self.namePaths) # update the length and names attributes self.names=self.namePaths.keys() self.length=len(self.names) # saves a table changes back onto the disk fileData=writeFile(self.namePaths[name],pickle(value)) return fileData
def deleteValue(self, name): ''' Delete a value with name name. ''' # clean up names to avoid stupid debug.add('deleting value ', name) # figure out the path to the named value file if name in self.names: filePath = self.namePaths[name] # remove the metadata entry del self.namePaths[name] # write changes to database metadata file writeFile(pathJoin(self.path, 'names.table'), pickle(self.namePaths)) # update the length and names attributes self.names = self.namePaths.keys() self.length = len(self.names) else: return False if pathExists(filePath): # remove the file accocated with the value removeFile(filePath) return True else: return False
def __init__(self,path): ''' DB table to store things as files and directories. This is designed to reduce ram usage when reading things from large databases. Specifically this is designed for caches. # variables # .path The path on the filesystem where the table is stored. .names Gives you a list containing the names of all stored values as strings. .namePaths Gives you a dict where the keys are the names and the value is the path of that value database file .length The length of names stored in this table ''' # path of the root of the cache, this is where files # will be stored on the system self.path=path # create the paths if they do not exist if not pathExists(self.path): makedirs(self.path) debug.add('table path',self.path) # the path prefix is for tables stored in tables self.pathPrefix='' # tables are stored as files tempTable=[] # array of all the value names stored on table namesPath=pathJoin(self.path,'names.table') # if no namepaths exist create them if not pathExists(pathJoin(namesPath)): # write the new value to the system writeFile(namesPath,pickle(dict())) # load the name paths self.namePaths=unpickle(loadFile(namesPath)) debug.add('self.namePaths',self.namePaths) # create a array of all the names of values stored self.names=self.namePaths.keys() debug.add('self.names',self.names) # length of all the values stored on the table self.length=len(self.names) debug.add('self.length',self.length) # the protected list is a array of names that are # protected from limit cleaning protectedPath=pathJoin(self.path,'protected.table') if pathExists(pathJoin(protectedPath)): # load the list self.protectedList=unpickle(loadFile(protectedPath)) else: # create a blank list self.protectedList=[] # limit value to limit the number of values # load the limit value from file if it exists limitPath=pathJoin(self.path,'limit.table') if pathExists(pathJoin(limitPath)): self.limit=unpickle(loadFile(limitPath)) else: self.limit=None
def makengram(country, n, source): directory = "PycharmProjects\\TVSeries\\txt" global ngrams for filename in os.listdir(directory+source): #split each file in the country-specific directory into ngrams if filename[0] == ".": #skip the files that are of no use to us print "skipping", filename continue print "running", filename # content = file.readFile(directory+source+"/"+filename) content = open(directory+source+"/"+filename, 'r') print str(content) #first we have to spit each file into individual words words = nltk.word_tokenize(content) #then we can run the proper nltk function on the list of words to get the ngrams if n == 1: ngrams += words elif n == 2: ngrams += nltk.bigrams(words) elif n == 3: ngrams += nltk.trigrams(words) else: ngrams += nltk.util.ngrams(words, n) #save the list of ngrams in a text file that can easily be read by python later print "saving %s%s%s%dgrams.txt" % (directory, source, country, n) files.writeFile("%s%s%s%dgrams.txt" % (directory, source, country, n), (pickle.dumps(ngrams))) print "saved." return
def saveValue(self, name, value): ''' Save a value with the name name and the value value. ''' debug.add('saving value ' + str(name), value) # create a file assocation for the name to store the value if name not in self.names: debug.add('name not in self.names') # create a counter for the generated filename counter = 0 # seed value for while loop newName = (str(counter) + '.value') # find a filename that does not already exist in # the database directory while newName in listdir(self.path): # increment the counter counter += 1 # iterate the value newName = (str(counter) + '.value') debug.add('newname', newName) # set the metadata value for the filepaths in this table instance self.namePaths[name] = pathJoin(self.path, newName) # write the newly created name assocation to table metadata on disk writeFile(pathJoin(self.path, 'names.table'), pickle(self.namePaths)) debug.add('namePaths', self.namePaths) # update the length and names attributes self.names = self.namePaths.keys() self.length = len(self.names) # saves a table changes back onto the disk fileData = writeFile(self.namePaths[name], pickle(value)) return fileData
def makengram(country, n, source): directory = "PycharmProjects\\TVSeries\\txt" global ngrams for filename in os.listdir(directory + source): #split each file in the country-specific directory into ngrams if filename[0] == ".": #skip the files that are of no use to us print "skipping", filename continue print "running", filename # content = file.readFile(directory+source+"/"+filename) content = open(directory + source + "/" + filename, 'r') print str(content) #first we have to spit each file into individual words words = nltk.word_tokenize(content) #then we can run the proper nltk function on the list of words to get the ngrams if n == 1: ngrams += words elif n == 2: ngrams += nltk.bigrams(words) elif n == 3: ngrams += nltk.trigrams(words) else: ngrams += nltk.util.ngrams(words, n) #save the list of ngrams in a text file that can easily be read by python later print "saving %s%s%s%dgrams.txt" % (directory, source, country, n) files.writeFile("%s%s%s%dgrams.txt" % (directory, source, country, n), (pickle.dumps(ngrams))) print "saved." return
def __init__(self, path): ''' DB table to store things as files and directories. This is designed to reduce ram usage when reading things from large databases. Specifically this is designed for caches. # variables # .path The path on the filesystem where the table is stored. .names Gives you a list containing the names of all stored values as strings. .namePaths Gives you a dict where the keys are the names and the value is the path of that value database file .length The length of names stored in this table ''' # path of the root of the cache, this is where files # will be stored on the system self.path = path # create the paths if they do not exist if not pathExists(self.path): makedirs(self.path) debug.add('table path', self.path) # the path prefix is for tables stored in tables self.pathPrefix = '' # tables are stored as files tempTable = [] # array of all the value names stored on table namesPath = pathJoin(self.path, 'names.table') # if no namepaths exist create them if not pathExists(pathJoin(namesPath)): # write the new value to the system writeFile(namesPath, pickle(dict())) # load the name paths self.namePaths = unpickle(loadFile(namesPath)) debug.add('self.namePaths', self.namePaths) # create a array of all the names of values stored self.names = self.namePaths.keys() debug.add('self.names', self.names) # length of all the values stored on the table self.length = len(self.names) debug.add('self.length', self.length) # the protected list is a array of names that are # protected from limit cleaning protectedPath = pathJoin(self.path, 'protected.table') if pathExists(pathJoin(protectedPath)): # load the list self.protectedList = unpickle(loadFile(protectedPath)) else: # create a blank list self.protectedList = [] # limit value to limit the number of values # load the limit value from file if it exists limitPath = pathJoin(self.path, 'limit.table') if pathExists(pathJoin(limitPath)): self.limit = unpickle(loadFile(limitPath)) else: self.limit = None
def concatenatefiles(country, dictsizes, xgram, source): for dictsize in dictsizes: ngrams = files.readFile("subtitles/%stxts/dictsizes/%dgram/%s%s%dgramdict%d.txt" % (country, xgram, source if source=="lowercase" else "", country, xgram, dictsize)) for filename in os.listdir("subtitles/%stxts/%s" % (country, source)): print "concatenating %s %dgram %d with %s" % (source, xgram, dictsize, filename) content = files.readFile(os.path.join("subtitles/%stxts/%s" % (country, source),filename)) concatenated = ngrams + "\n\n" + content dest = ("subtitles/%stxts/dictsizes/%dgram/%sunzipped/%sdict%d" % (country, xgram, source if source=="lowercase" else "", country, dictsize)) if not os.path.exists(dest): os.mkdir(dest) files.writeFile(os.path.join(dest, filename), concatenated) return
def makedict(country, n, lengths): print ("loading ngrams from subtitles/%stxts/lowercase%s%dgrams.txt" % (country, country , n)) ngrams = pickle.loads(files.readFile("subtitles/%stxts/lowercase%s%dgrams.txt" % (country, country, n))) #find the frequencies of all the ngrams print "determining frequency..." fdist = nltk.FreqDist(ngrams) for length in lengths: #make a plaintext dictionary for each length in lengths top = fdist.keys()[:length] topstr = plaintext(top, n) #save each dictionary print ("saving to subtitles/%stxts/dictsizes/%dgram/lowercase%s%dgramdict%d.txt" % (country, n, country, n, length)) files.writeFile("subtitles/%stxts/dictsizes/%dgram/lowercase%s%dgramdict%d.txt" % (country, n, country, n, length), topstr) return
def setProtected(self,name): ''' Set a name in the table to be protected from removal because of limits. ''' # generate the filepath to the protected values # list filePath=pathJoin(self.path,'protected.table') # check if the path exists if pathExists(filePath): # read the protected list from the file protectedList=unpickle(loadFile(filePath)) else: # create the list and append the name protectedList=[] # append the new value to the list protectedList.append(name) # pickle the protected list for storage protectedList=pickle(protectedList) # write the changes back to the protected list writeFile(filePath,protectedList)
def setLimit(self, limit): ''' Set the limit of values that are stored in this table. This ignores protected values. ''' # write the limit value to the limit file in the table filePath = pathJoin(self.path, 'limit.table') # set the limit in this instance self.limit = limit # write the new limit back to the storage success = writeFile(filePath, limit) return success
def setProtected(self, name): ''' Set a name in the table to be protected from removal because of limits. ''' # generate the filepath to the protected values # list filePath = pathJoin(self.path, 'protected.table') # check if the path exists if pathExists(filePath): # read the protected list from the file protectedList = unpickle(loadFile(filePath)) else: # create the list and append the name protectedList = [] # append the new value to the list protectedList.append(name) # pickle the protected list for storage protectedList = pickle(protectedList) # write the changes back to the protected list writeFile(filePath, protectedList)
def setLimit(self,limit): ''' Set the limit of values that are stored in this table. This ignores protected values. ''' # write the limit value to the limit file in the table filePath=pathJoin(self.path,'limit.table') # set the limit in this instance self.limit=limit # write the new limit back to the storage success=writeFile(filePath,limit) return success
def concatenatefiles(country, dictsizes, xgram, source): for dictsize in dictsizes: ngrams = files.readFile( "subtitles/%stxts/dictsizes/%dgram/%s%s%dgramdict%d.txt" % (country, xgram, source if source == "lowercase" else "", country, xgram, dictsize)) for filename in os.listdir("subtitles/%stxts/%s" % (country, source)): print "concatenating %s %dgram %d with %s" % (source, xgram, dictsize, filename) content = files.readFile( os.path.join("subtitles/%stxts/%s" % (country, source), filename)) concatenated = ngrams + "\n\n" + content dest = ("subtitles/%stxts/dictsizes/%dgram/%sunzipped/%sdict%d" % (country, xgram, source if source == "lowercase" else "", country, dictsize)) if not os.path.exists(dest): os.mkdir(dest) files.writeFile(os.path.join(dest, filename), concatenated) return
def makedict(country, n, lengths): print("loading ngrams from subtitles/%stxts/lowercase%s%dgrams.txt" % (country, country, n)) ngrams = pickle.loads( files.readFile("subtitles/%stxts/lowercase%s%dgrams.txt" % (country, country, n))) #find the frequencies of all the ngrams print "determining frequency..." fdist = nltk.FreqDist(ngrams) for length in lengths: #make a plaintext dictionary for each length in lengths top = fdist.keys()[:length] topstr = plaintext(top, n) #save each dictionary print( "saving to subtitles/%stxts/dictsizes/%dgram/lowercase%s%dgramdict%d.txt" % (country, n, country, n, length)) files.writeFile( "subtitles/%stxts/dictsizes/%dgram/lowercase%s%dgramdict%d.txt" % (country, n, country, n, length), topstr) return
def deleteValue(self,name): ''' Delete a value with name name. ''' # clean up names to avoid stupid debug.add('deleting value ',name) # figure out the path to the named value file if name in self.names: filePath=self.namePaths[name] # remove the metadata entry del self.namePaths[name] # write changes to database metadata file writeFile(pathJoin(self.path,'names.table'),pickle(self.namePaths)) # update the length and names attributes self.names=self.namePaths.keys() self.length=len(self.names) else: return False if pathExists(filePath): # remove the file accocated with the value removeFile(filePath) return True else: return False
print("----------------------------") """ Em um sistema: AX = B <==> LUX = B onde X é uma matriz de mesma ordem de A e B é uma matriz identidade de mesma ordem de A Então X é a inversa de A """ # Calcula a matriz inversa B = matrizInversa(L, U, B) print("Matriz Inversa de A") imprimeMatriz(B) det = calculaDeterminante(U,columns) if (contadorPermut%2)>0: det=det*(-1) print("Valor do determinante") print(det) # Escreve a matriz em um arquivo texto writeFile(B,det)
U, L, B = gauss(matriz, B) print("----------------------------\n") print("Matriz L") imprimeMatriz(U) print("Matriz U") imprimeMatriz(L) print("----------------------------") """ Em um sistema: AX = B <==> LUX = B onde X é uma matriz de mesma ordem de A e B é uma matriz identidade de mesma ordem de A Então X é a inversa de A """ # Calcula a matriz inversa B = matrizInversa(L, U, B) print("Matriz Inversa de A") imprimeMatriz(B) # Escreve a matriz em um arquivo texto writeFile(B)
values["bottom"] = rows[-1][2] if columnSlices[0][0] != "": values["left"] = columns[0][2] if columnSlices[-1][0] != "": values["right"] = columns[-1][2] template = "fullbox" if len(rows) == 1: template = "horizontalbox" elif len(columns) == 1: template = "verticalbox" scriptPath = os.path.dirname(os.path.realpath(sys.argv[0])) htmlTestTemplate = readFile(scriptPath+"/templates/testbox.html") htmlTemplate = readFile(scriptPath+"/templates/"+template+".html") scssTemplate = readFile(scriptPath+"/templates/"+template+".scss") html = pystache.render(htmlTemplate, values) scss = pystache.render(scssTemplate, values) htmlTest = pystache.render(htmlTestTemplate, {"name": boxName, "html": html}) writeFile(filename+"-test.html", htmlTest) writeFile(filename+".html", html) writeFile(filename+".scss", scss) print "Creating CSS file from SCSS..." with open(filename+".css", "w") as file: subprocess.call(["sass", filename+".scss"], stdout=file) print "Done"
cropped.save(dirName+"/"+name+extension) namedFiles.append((identifier,name+extension,area)) css = "" innerhtml = "" scriptPath = os.path.dirname(os.path.realpath(sys.argv[0])) cssTemplate = readFile(scriptPath+"/templates/imageArea.css") htmlTemplate = readFile(scriptPath+"/templates/testbox.html") for (identifier,filename,area) in namedFiles: css += pystache.render(cssTemplate, { "identifier": identifier, "filename": filename, "left": area.bounds[0], "top": area.bounds[1], "width": area.bounds[2]-area.bounds[0], "height": area.bounds[3]-area.bounds[1], "right": imageWidth-area.bounds[2], "bottom": imageHeight-area.bounds[3] }) if identifier[0] == '.': innerhtml += "<div class='"+identifier[1:]+"'></div>\n" else: innerhtml += "<div id='"+identifier[1:]+"'></div>\n" html = pystache.render(htmlTemplate, {"name": os.path.basename(targetFilename), "html": innerhtml}) writeFile(targetFilename+".css",css) writeFile(targetFilename+".html",html)