def buildPreProcessedData(self,filePath): utilityObj = Utilities() tupleOfDictionariesAndCorpusSize=utilityObj.readTweetsAndConstructDocDictionary(filePath) self.corpusSize = tupleOfDictionariesAndCorpusSize[2] self.docDictWithText = tupleOfDictionariesAndCorpusSize[3] termDocMatrixFromTweets = self.constructTermDocumentMatrix(tupleOfDictionariesAndCorpusSize) return termDocMatrixFromTweets
def __init__(self,debugFlag,db,output): """ Initialises the class. """ Utilities.__init__(self,debugFlag) self.db = db self.outputFile = output
def checkAccession(lastOrgPath,newPath): lastOrgFile = util.return_recursive_files(lastOrgPath)[0] newOrgFile = util.return_recursive_files(newPath)[0] oldAccession = int(os.path.splitext(os.path.basename(lastOrgFile))[0].split("_")[1]) newAccession = int(os.path.splitext(os.path.basename(newOrgFile))[0].split("_")[1]) if oldAccession > newAccession: return newPath else: return lastOrgPath
def __init__(self,debugFlag, db,output): """ Initialises the class, and passes the pulsar catalog to it. """ Utilities.__init__(self,debugFlag) self.db = db self.harmonics = [1, 0.5, 0.3, 0.25, 0.2, 0.16, 0.142, 0.125, 0.111, 0.1, 0.0909,0.0833,0.0769,0.0714,0.0666,0.0625,0.03125,0.015625] self.width = 10 # The width of the image viewing panel. self.height = 8 # The height of the image viewing panel.
def test(self): ip = '127.0.0.1' Utilities.convertNumericIpToHex(ip) query = "SELECT location_ip from piwik_log_visit" self.cursor.execute(query) result = self.cursor.fetchall() print ip print result[0][0]
def __init__(self,debugFlag): """ Default constructor. Parameters: debugFlag - the debugging flag. If set to True, then detailed debugging messages will be printed to the terminal during execution. """ Utilities.__init__(self,debugFlag) self.numberOfScores = 22 # This is the default - can be set to other values. self.epsilon = 0.000005 # Used during score comparison.
def __init__(self,debugFlag, db, st,mt): """ Initialises the class, and sets up initial variables. """ Utilities.__init__(self,debugFlag) self.matcher = mt self.db = db self.settings = st # This call creates the headers for an output CSV file that will # be used to store shortened versions of candidate matches found. self.createCSVFile(self.matcher.outputPath)
def exec_global(self, request): """ Execute a global-scoped command. """ command = request["command"] if command == "go back to perm": liq.set_var("selected", "perm") liq.send_command("perm-switch.skip") self.send_feedback(request) elif command == "rescan": result = Utilities.rescan() self.send_feedback(request, result) elif command == "select": studio = session.query(Studio).find_by(slug=request["studio"].decode('utf-8')) if studio is not None: studio.selected = True session.add(studio) session.commit() liq.set_var("selected", str(studio.slug)) sylog("INFO: Selecting " + studio.slug) self.send_feedback(request) else: self.send_feedback(request, "error", "Unknown studio %s" % request["studio"])
def __mul__(self, right): """ A true multiplication of two potentials would be defined as X * Y = Z where the sets of variables z = x U y. We would then identify the instantiations of x and y that are consistent with z and Z(z) = X(x)Y(y). We are generally going to be multiplying sepset potentials by clique potentials where the variables of a setpset potential are a subset of the variables of the clique. Therefore we are going to assume in this operation that right's variables are a subset of self's. """ # right should only be a DiscreteDistribution or a ContinuousDistribution if it is a subset and it should be __imul__ assert(not isinstance(right, DiscreteDistribution) and not isinstance(right, ConditionalDiscreteDistribution)), \ "Attempt to Multiply Potential with incompatible type: Discrete or Conditional" if isinstance(right, (int, float, complex, long)): potential = copy.deepcopy(self) potential.table *= right else: nodeSet = self.__nodeSet_.union(right.__nodeSet_) potential = Potential(list(nodeSet)) selfValues = [potential.nodes.index(node) for node in self.nodes] rightValues = [potential.nodes.index(node) for node in right.nodes] # Store the following lists so we don't have to recompute them on every iteration potAxes = range(potential.nDims) selfAxes = range(self.nDims) rightAxes = range(right.nDims) #OPTIMIZE: Should be able to do this without blindly iterating through dimensions. for seq in Utilities.sequence_generator(potential.dims): #OPTIMIZE: Could access the table directly, but would break down our abstraction potIndex = potential.generate_index(seq, potAxes) selfIndex = self.generate_index(seq[selfValues], selfAxes) rightIndex = right.generate_index(seq[rightValues], rightAxes) potential[potIndex] = self[selfIndex] * right[rightIndex] return potential
def getSpeciesListFromPATRIC(genomeFinderFilePath,bacterialGenomeDBPath,familyDBPath): genomeAccesionDict = {} genomePathDict = {} genomeFinder = open(genomeFinderFilePath,"r") for line in genomeFinder: lineData = line.split("\t") genomeName = lineData[1].strip() genomeStatus = lineData[21].strip() accession = lineData[19].strip() if(genomeStatus.strip()=="complete" and (not accession.strip()=="-") and (not accession.strip()=="")): genomeAccesionDict.update({accession:genomeName}) genomeFinder.close() #out = open("/home/jain/Gram_Positive_Bacteria_Study/Organisms_Lists_from_PATRIC/Firmicutes/genomeAccessionMapfromPATRIC.txt","w") #out.write("Accession Number\tOrganism Name\n") for key,value in genomeAccesionDict.iteritems(): genomePathDict.update({key:"null"}) #out.write(key+"\t"+value+"\n") #out.close() genomesPath = util.return_recursive_dir_files(bacterialGenomeDBPath) genomePathDict = searchGenomes(genomesPath, genomeAccesionDict, genomePathDict) #Copy files to a new folder finalDir = familyDBPath #print genomePathDict for key,value in genomePathDict.iteritems(): if value != "null": #print value name = value.split("/")[len(value.split("/"))-1] shutil.copytree(value, finalDir+"/"+name) #Filter For Strains distinctSpeciesGenomeLocationDict = filterStrainsByFolderName(finalDir) #print len(distinctSpeciesGenomeLocationDict) deleteDir(finalDir, distinctSpeciesGenomeLocationDict) filterStrain(distinctSpeciesGenomeLocationDict)
def filterStrain(distinctSpeciesGenomeLocationDict): distinctSpeciesDict = {} distinctSpeciesDict_1 = {} for key,value in distinctSpeciesGenomeLocationDict.iteritems(): strainName = key; folderPath = value; #print folderPath #print str(return_recursive_files(folderPath)) + "\n" #print strainName for f in util.return_recursive_files(folderPath): seq_record = SeqIO.parse(open(f), "genbank").next() accession = seq_record.annotations['accessions'][0] organism_tmp = seq_record.annotations['organism'].replace(' ', '_') organism_tmp_1 = re.sub('[\[\]]', "", organism_tmp) organism = '_'.join(organism_tmp_1.split('_')[:2])#+"_"+accession #print organism if(distinctSpeciesDict.has_key(organism)): oldFilePath = distinctSpeciesDict[organism] old_record = SeqIO.parse(open(oldFilePath), "genbank").next() old_accession = old_record.annotations['accessions'][0] if(old_accession > accession): shutil.rmtree(os.path.dirname(oldFilePath)) distinctSpeciesDict.update({organism:f}) else: shutil.rmtree(os.path.dirname(f)) else: distinctSpeciesDict.update({organism:f})
def __init__(self,debugFlag): """ Default constructor. Parameters: debugFlag - the debugging flag. If set to True, then detailed debugging messages will be printed to the terminal during execution. candidateName - the name for the candidate, typically the file path. """ Utilities.__init__(self,debugFlag) self.accuacy = -1.0 self.radius = -1.0 self.path = "Settings.txt" self.padding = 3600 self.telescope = "Parkes"
def updateLevel(self): currentLevel = self.getLevel() currentHitPoints = self.getHitPoints() constitutionModifier = Utilities.calculateModifier(self.getConstitution()) self.setLevel(currentLevel + 1) self.setHitPoints(currentHitPoints + self.hitpointsPerLevel + constitutionModifier) self.setAttackBonus(self.getAttackBonus() + 1)
def deleteDir(finalDir,distinctSpeciesGenomeLocationDict): dir = util.return_recursive_dir_files(finalDir) for d in dir: dirSplit = d.split("/") strainName = "_".join(dirSplit[len(dirSplit)-1].split("_")[:2]) if distinctSpeciesGenomeLocationDict.has_key(strainName): if distinctSpeciesGenomeLocationDict[strainName] != d: shutil.rmtree(d) else: #shutil.rmtree(d) print "not matching "+strainName
def attack(self, enemy): roll = Utilities.roll() strength = Paladin.getStrength(self) strengthModifier = Utilities.calculateModifier(strength) damage = Paladin.damage if roll == 20: damage *= 2 strengthModifier *= 2 attack = roll + strengthModifier + self.getLevel() enemyDex = enemy.getDexterity() enemyDexModifier = Utilities.calculateModifier(enemyDex) defense = enemy.getArmorClass() + enemyDexModifier if attack > defense: currentHP = enemy.getHitPoints() enemy.setHitPoints(currentHP - damage)
class DueDateProcess: #=========================================================================== def __init__(self): self.ticketModel = TicketModel() self.util = Utilities() #=========================================================================== def startTaskDueDate(self): try: #get tickets from database tuples = self.ticketModel.getTuplesDueDateNull() if(len(tuples) > 0): for tuple in tuples: ticket = self.util.createTicketFromTuple(tuple) dd = self.util.getDueDate(ticket['creationDate'], ticket['severity']) self.ticketModel.updateTicketDueDate(ticket['ticketId'], dd) print('ticket date: ' + str(ticket['creationDate']) + ' dueDate: ' + str(dd) + ' severity: ' + str(ticket['severity'])) except: raise
def filterStrainsByFolderName(finalDir): genomesDir = util.return_recursive_dir_files(finalDir) distinctSpeciesGenomeLocationDict = {} for dir in genomesDir: dirSplit = dir.split("/") strainName = "_".join(dirSplit[len(dirSplit)-1].split("_")[:2]) #file = util.return_recursive_files(dir)[0] if distinctSpeciesGenomeLocationDict.has_key(strainName): lastOrgPath = distinctSpeciesGenomeLocationDict[strainName] newOrgPath = checkAccession(lastOrgPath,dir) distinctSpeciesGenomeLocationDict.update({strainName:newOrgPath}) else: distinctSpeciesGenomeLocationDict.update({strainName:dir}) return distinctSpeciesGenomeLocationDict
def getSearchResult(self,userQuery,termDocMatrix,vectorRetrievalObjWithPreProcessedData): utilityObj = Utilities() searchRes={} dictQuery={} tupleOfDictionariesAndCorpusSize=utilityObj.readQueryAndConstructQueryDictionary([userQuery]) corpusSize = tupleOfDictionariesAndCorpusSize[2] termFreqDictOfQuery = tupleOfDictionariesAndCorpusSize[0] sortedTermFreqDictOfQuery = OrderedDict(sorted(termFreqDictOfQuery.items(), key=lambda t: t[0])) idfDict = tupleOfDictionariesAndCorpusSize[1] queryDocMatrix= self.constructTermDocumentMatrix(tupleOfDictionariesAndCorpusSize) for index in range(len(sortedTermFreqDictOfQuery)): term = sortedTermFreqDictOfQuery.keys()[index] dictQuery[term]=queryDocMatrix[index,0] cosineSimilarityResult={} indexOfQueryTermsInCorpusDict=[] for i in range(vectorRetrievalObjWithPreProcessedData.corpusSize): tempRes=0 for term in dictQuery: if term in vectorRetrievalObjWithPreProcessedData.termFrequencyDictOfCorpus: indexOfTermInSortedCorpusTerms = self.orderingDictOfTerms[term] tempRes += termDocMatrix[indexOfTermInSortedCorpusTerms,i] * dictQuery[term] if tempRes>0: cosineSimilarityResult[i] = tempRes sortedResult=sorted(cosineSimilarityResult.items(), key=lambda x: x[1], reverse=True) if len(cosineSimilarityResult)>50: searchRes=sortedResult[0:50] else: searchRes = sortedResult return (searchRes,vectorRetrievalObjWithPreProcessedData.docDictWithText,cosineSimilarityResult,sortedResult)
def marginalize(self, other): """ Return a new potential that is the marginalization of this potential given other. This identifies the instantiations of self (s1,s2,...,sn) that are consistent with other and sum self(s1) + self(s2) + ... + self(sn). """ new = copy.deepcopy(other) intersect = self.__nodeSet_.intersection(new.__nodeSet_) newAxes = range(new.nDims) sequence = Utilities.sequence_generator(other.dims) for seq in sequence: index = self.generate_index_node(seq, intersect) newIndex = new.generate_index(seq, newAxes) val = self[index] if isinstance(val, ndarray): val = val.sum() new[newIndex] = val return new
def handleVisitorInfo(self, visitorInfo): location_ip = Utilities.convertNumericIpToHex(visitorInfo['ip']) newVisitCount = self.mysql.getVisitCount(location_ip) if self.sadsDictionary.has_key(visitorInfo['ip']): currentVisitCount = self.sadsDictionary[visitorInfo['ip']] if currentVisitCount != newVisitCount -1 and currentVisitCount != newVisitCount: print 'Warning: visit count is jumped from '+ str(currentVisitCount) + " to " + str(newVisitCount) self.sadsDictionary[visitorInfo['ip']] = newVisitCount print self.sadsDictionary
def filterGenomes(rootDir): filter_genomes_list = {} for file in util.return_recursive_dir_files(rootDir): #print file files = util.return_recursive_files(file) fileName = "" if len(files) > 1: for f in files: seq_record = SeqIO.parse(open(f), "genbank").next() accession = seq_record.annotations['accessions'][0] definition = seq_record.description; if ("plasmid" not in definition): if fileName != "": #print os.path.dirname(os.path.abspath(f)) filter_genomes_list.update({os.path.dirname(os.path.abspath(f)):""}) fileName = accession+".gbk" for f in files: if os.path.basename(f) != fileName: #print f os.remove(f) ##Deleting all the genomes having multiple chromosomes files #print len(filter_genomes_list) for folder in filter_genomes_list.iterkeys(): shutil.rmtree(folder)
def searchGenomes(genomesPath,genomeAccesionDict,genomePathDict): for genome in genomesPath: genomeFiles = util.return_recursive_files(genome) for genomeFile in genomeFiles: fileName = os.path.basename(genomeFile) accession = "" for key in genomeAccesionDict.iterkeys(): accessions = key.strip().split(",") #print accessions for a in accessions: if "." in a: accession = a.split(".")[0] else: accession = a if(accession in fileName): genomePathDict.update({key:genome}) break return genomePathDict
def __imul__(self, right): """ This is the same operation as __mul__ except that if right.nodes is a subset of self.nodes, we do the multiplication in place, because there is no reason to make a copy, which wastes time and space. """ if isinstance(right, (int, float, complex, long)): self.table *= right # FIXME: should be right.__nodeSet_ but doesn't work when right is DiscreteDistribution elif self.__nodeSet_.issuperset(right.nodes): #OPTIMIZE: There must be a way to do this without iterating over every value of table selfAxes = [self.nodes.index(node) for node in right.nodes] rightAxes = range(right.nDims) for seq in Utilities.sequence_generator(right.dims): selfIndex = self.generate_index(seq, selfAxes) #OPTIMIZE: Could index right.table directly, but this upholds our abstraction barrier rightIndex = right.generate_index(seq, rightAxes) self[selfIndex] *= right[rightIndex] else: """ If potential will be over a different set of variables after multiplication, might as well use full __mul__ version, which copies. """ self = self.__mul__(right) return self
def __init__(self, zone, crimes, longitude=10, latitude=10, num_agents=2, training=False, beta=-5, beta_3=-1): self.width = longitude self.height = latitude self.num_agents = num_agents self.beta = beta self.zone = zone self.beta_3 = beta_3 self.grid = -1 * np.ones(shape=(self.height, self.width), dtype=int) self.done = False self.agents = np.empty([self.num_agents], dtype=GridWorld) #print(crimes.head()) (self.all_reward_states, self.hist_lat, self.hist_long) = ut.lat_long_to_grid(crimes["latitude"], crimes["longitude"], self.width, self.height) print(self.all_reward_states) self.reward_states = {} self.max_iter = 500 x = datetime.datetime.now() now = str(x)[0:10] self.now = now self.time = datetime.datetime.strptime(now, '%Y-%m-%d') self.visited_states = [] self.k_coverage = 50 * self.num_agents # last k steps to calculate coverage self.all_states = self.createAllPossibleIndex(self.height, self.width) self.reward_frequncy = 0.005 * self.num_agents self.reward_parameter = 11 self.route = {}
def AddElim(self, Elim): ''' DESCRIPTION: This function loads the data of a station and compiles it in a dictionary. _____________________________________________________________ INPUT: :param Elim: A Dict, dictionary with the values that would be eliminated over and lower. Ex: Elim = {'ElimOver':{'RSC':3000}, 'ElimLow:{'TC':-1}} ''' FlagKeyOver = True FlagKeyLow = True try: ElimLabOver = list(Elim['ElimOver']) except KeyError: FlagKeyOver = False except TypeError: FlagKeyOver = False try: ElimLabLow = list(Elim['ElimLow']) except KeyError: FlagKeyLow = False except TypeError: FlagKeyLow = False if not (FlagKeyOver) and not (FlagKeyLow): r = utl.ShowError( 'AddElim', 'OpenWundergrounds', 'No elimination was added, review the Elim parameter') raise KeyError if FlagKeyOver: for Lab in ElimLabOver: self.ElimOver[Lab] = Elim['ElimOver'][Lab] if FlagKeyLow: for Lab in ElimLabLow: self.ElimLow[Lab] = Elim['ElimLow'][Lab]
class wine_regression: func = Utilities() wine_data = pd.read_csv('wine_quality_train.csv') # 1300 * 12 test_data = pd.read_csv('wine_quality_test.csv') # 299 * 12 features = ['fixed acidity', 'volatile acidity', 'citric acid', 'residual sugar', 'chlorides', 'free sulfur dioxide', 'total sulfur dioxide', 'density', 'pH', 'sulphates', 'alcohol'] # 11 features f = np.array(wine_data[features]) t = np.array(test_data[features]) Y = np.array(wine_data['quality']) TY = np.array(test_data['quality']) m = len(f) x0 = np.ones(m) X = np.insert(f, 0, values=x0, axis=1) C = np.zeros(12) alpha = 0.0003 test_len = len(t) tx0 = np.ones(test_len) TX = np.insert(t, 0, values=tx0, axis=1) initial_cost = func.cost_function(X, Y, C) newC, cost_history = func.gradient_descent(X, Y, C, alpha, 1000000) Y_pred = TX.dot(newC) print(np.round(newC, decimals=4)) print(func.rmse(TY, Y_pred)) print(func.r2_score(TY, Y_pred)) fig = plt.subplots(figsize=(10, 10), dpi=300) y_range = np.arange(len(newC[1:])) plt.bar(y_range, newC[1:], align='center', alpha=0.5) for a, b in zip(y_range, newC[1:]): plt.text(a, b, np.round(b, decimals=4), horizontalalignment='center') plt.xticks(y_range, features, rotation=30) plt.ylabel('Linear Regression Model Parameters') plt.show()
def multiSearch(robot_inst, ): #Case 0: Search using all methods #Get variables #Make copy of ROI in grayscale for LBP detection #Make copy of ROI and apply mask for blob detection #Search for robot #Save coords and export if searchCase == 0: mask = robot_inst.mask cascadeList = robot_inst.cascades hsvROI = ccv2.cvtColor(robot_inst.ROI, cv2.COLOR_BGR2HSV) grayROI = cv2.cvtColor(robot_inst.ROI, cv2.COLOR_BGR2GRAY) casLen = len(cascadeList) relativeCoords = np.zeros((casLen, 4)) for i in range(casLen): relativeCoords[i] = Utilities.get_Loc(cascadeList[i]) for i in range(casLen, casLen + 1): relativeCoords[i] = robot_inst.detector.detect() return
def orientation(self): """ Prompt user for orientation of app's top level browsing contexts. Default is 'any'. """ self.details['orientation'] = input( "Display Orientation " + str(Utilities.on(["M"], self.args)) + " (" + Colors.OKBLUE + "any" + Colors.ENDC + "): ") if self.details['orientation'].lower() == "any" \ or self.details['orientation'].lower() == "natural" \ or self.details['orientation'].lower() == "landscape" \ or self.details['orientation'].lower() == "landscape-primary" \ or self.details['orientation'].lower() == "landscape-secondary" \ or self.details['orientation'].lower() == "portrait" \ or self.details['orientation'].lower() == "portrait-primary" \ or self.details['orientation'].lower() == "portrait-secondary": self.details['orientation'] = self.details['orientation'].lower() elif self.details['orientation'] == '': self.details['orientation'] = "any" else: print( "Please enter one of the options from https://developer.mozilla.org/en-US/docs/Web/Manifest#orientation" ) self.orientation()
class OverdueProcess: #=========================================================================== def __init__(self): self.ticketModel = TicketModel() self.util = Utilities() #=========================================================================== def startTaskOverdue(self): try: #get tickets from database tuples = self.ticketModel.getTuplesOverdue() if(len(tuples) > 0): for tuple in tuples: ticket = self.util.createTicketFromTuple(tuple) if(datetime.now() >= ticket['dueDate']): self.ticketModel.markTicketAsOverdue(ticket) print('ticket ' + str(ticket['ticketId']) + ' is overdue') except: raise
class ShapeVisualizer(tk.Frame): def __init__(self, root): tk.Frame.__init__(self, root) self.root = root self.utilities = Utilities() self.init_components() self.root.minsize(640, 480) def init_components(self): entries = self.utilities.list_corpus('train/') sorted(entries, key=lambda str: str.split('/')[-1].split('.')[0]) l1 = ttk.Label(self.root, text='Seleccione una Figura =>', font=('Verdana', 14)) l1.grid(column=0, row=0) widthc, heightc = 1024, 768 self.comboBoxShape = ttk.Combobox(self.root, values=entries, font=('Verdana', 14)) self.comboBoxShape.grid(column=1, row=0, columnspan=3) self.comboBoxShape.bind('<<ComboboxSelected>>', self.comboShapeSelected) self.canvas = tk.Canvas(self.root, width=widthc, height=heightc) self.canvas.grid(column=0, row=1, columnspan=3) self.canvas.configure(background='white') self.photo = ImageTk.PhotoImage(Image.open('Logo-GIIATa-small.png')) self.img = self.canvas.create_image(widthc / 2, heightc / 2, anchor=tk.CENTER, image=self.photo) def comboShapeSelected(self, event): print(self.comboBoxShape.get()) image = Image.open(self.comboBoxShape.get()) self.photo = ImageTk.PhotoImage(image) self.canvas.itemconfig(self.img, image=self.photo)
def AskData(cls, Start = 0, End = None): """ Ask data to user (subset of DataList) in a list of dictionaries containing data meta datas and return a list of results """ ListSubset = cls.UserDataList[Start:] if End is None else cls.UserDataList[Start:End + 1] ResultList = [] for Data in ListSubset: Result = Util.GetUserInput( Data["Message"], Data["ValueType"], Data["Minimum"], Data["Maximum"], Data["PossibleValues"], Data["DefaultValue"]) ResultList.append(Result) return ResultList
def SaveDictJson(Data, Pathout='', Name='Dictionary'): ''' DESCRIPTION: This function aims to save data in a csv file from. _______________________________________________________________________ INPUT: :param Data: A dict, Dictionary with the data. :param Pathout: a str, Saving directory. :param Name: A str, File Name with extension. _______________________________________________________________________ OUTPUT: Save a Json file with the dictionary. ''' # Create folder utl.CrFolder(Pathout) with open(Pathout + Name + '.json', 'w') as f: json.dump(Data, f) return
def process(self, pattern, outputFileName, refFileName=None, hasHeader=None, chrNameList=None): chrList = range(1, 23) + ['X', 'Y', 'MT', 'M'] if refFileName and not self.__isRefFileForHuman(refFileName): chrList = self.__getChrListFromRefFile(refFileName) if chrNameList: chrList = chrNameList fileList = self.__getFileListFromPatternAndChrList(pattern, chrList) if not fileList and chrList and chrList[0][:3] == 'chr': fileList = self.__getFileListFromPatternAndChrList( pattern, [chrName[3:] for chrName in chrList]) # os.system('cat %s > %s' % (' '.join(fileList), outputFileName)) toCompress = False if outputFileName[-3:] == '.gz': outputFileName = '.'.join(outputFileName.split('.')[:-1]) toCompress = True if hasHeader: cmd = 'head -1 %s > %s' % (fileList[0], outputFileName) if os.path.basename(fileList[0]).split('.')[-1] == 'gz': cmd = 'zcat %s | head -1 > %s' % (fileList[0], outputFileName) Utilities.mySystem(cmd) for fileName in fileList: if os.path.basename(fileName).split('.')[-1] == 'gz': cmd = 'zcat' if hasHeader: cmd = "zcat %s | sed -n '1!p' >> %s" % (fileName, outputFileName) else: cmd += ' %s >> %s' % (fileName, outputFileName) else: cmd = 'cat' if hasHeader: cmd = "sed -n '1!p'" cmd += ' %s >> %s' % (fileName, outputFileName) Utilities.mySystem(cmd) if toCompress: Utilities.mySystem('gzip %s' % outputFileName)
from Utilities import Utilities import urllib2 import json import requests from flask import redirect util = Utilities() mongoDB = util.mongoDBConnect() class UserProfile: def __init__(self): pass def buildUserProfileFromFB(self, token): userProfileResponse = urllib2.urlopen( "https://graph.facebook.com/me?access_token=" + token).read() user = json.loads(userProfileResponse) userPictureResponse = requests.get('https://graph.facebook.com/' + str(user['id']) + '/picture?type=normal') preparedLinkUsername = ''.join(e for e in user['name'] if e.isalnum()) profile = { "username": user['id'], # "usernameurl": preparedLinkUsername.lower(), "facebookid": "", "name": user['name'], "picture": str(userPictureResponse.url)
def processSingleLevel(pathOut, inFilePath, calibrationMap, level): # Find the absolute path to the output directory pathOut = os.path.abspath(pathOut) # inFilePath is a singleton file complete with path inFilePath = os.path.abspath(inFilePath) # (inpath, inFileName) = os.path.split(inFilePath) inFileName = os.path.split(inFilePath)[1] # Grab input name and extension fileName, extension = os.path.splitext(inFileName) #[0] # Initialize the Utility logger, overwriting it if necessary if ConfigFile.settings["bL2Stations"] == 1 and level == 'L2': os.environ["LOGFILE"] = f'Stations_{fileName}_{level}.log' else: os.environ["LOGFILE"] = (fileName + '_' + level + '.log') msg = "Process Single Level" print(msg) Utilities.writeLogFile(msg, mode='w') # <<---- Logging initiated here # If this is an HDF, assume it is not RAW, drop the level from fileName if extension == '.hdf': fileName = fileName.rsplit('_', 1)[0] # Check for base output directory if os.path.isdir(pathOut): pathOutLevel = os.path.join(pathOut, level) else: msg = "Bad output destination. Select new Output Data Directory." print(msg) Utilities.writeLogFile(msg) return False # Add output level directory if necessary if os.path.isdir(pathOutLevel) is False: os.mkdir(pathOutLevel) outFilePath = os.path.join(pathOutLevel, fileName + "_" + level + ".hdf") if level == "L1A" or level == "L1AQC" or level == "L1B" or level == "L1BQC": if level == "L1A": root = Controller.processL1a(inFilePath, outFilePath, calibrationMap) elif level == "L1AQC": ancillaryData = Controller.processAncData( MainConfig.settings["metFile"]) # If called locally from Controller and not AnomalyDetection.py, then # try to load the parameter file for this cruise/configuration and update # ConfigFile.settings to reflect the appropriate parameterizations for this # particular file. If no parameter file exists, or this SAS file is not in it, # then fall back on the default ConfigFile.settings. anomAnalFileName = os.path.splitext(ConfigFile.filename)[0] anomAnalFileName = anomAnalFileName + '_anoms.csv' fp = os.path.join('Config', anomAnalFileName) if os.path.exists(fp): msg = 'Deglitching anomaly file found for this L1AQC. Using these parameters.' print(msg) Utilities.writeLogFile(msg) params = Utilities.readAnomAnalFile(fp) # If a parameterization has been saved in the AnomAnalFile, set the properties in the local object # for all sensors l1aqcfileName = fileName + '_L1AQC' if l1aqcfileName in params.keys(): ref = 0 for sensor in ['ES', 'LI', 'LT']: print( f'{sensor}: Setting ConfigFile.settings to match saved parameterization. ' ) ConfigFile.settings[ f'fL1aqc{sensor}WindowDark'] = params[ l1aqcfileName][ref + 0] ConfigFile.settings[ f'fL1aqc{sensor}WindowLight'] = params[ l1aqcfileName][ref + 1] ConfigFile.settings[ f'fL1aqc{sensor}SigmaDark'] = params[ l1aqcfileName][ref + 2] ConfigFile.settings[ f'fL1aqc{sensor}SigmaLight'] = params[ l1aqcfileName][ref + 3] ConfigFile.settings[ f'fL1aqc{sensor}MinDark'] = params[ l1aqcfileName][ref + 4] ConfigFile.settings[ f'fL1aqc{sensor}MaxDark'] = params[ l1aqcfileName][ref + 5] ConfigFile.settings[ f'fL1aqc{sensor}MinMaxBandDark'] = params[ l1aqcfileName][ref + 6] ConfigFile.settings[ f'fL1aqc{sensor}MinLight'] = params[ l1aqcfileName][ref + 7] ConfigFile.settings[ f'fL1aqc{sensor}MaxLight'] = params[ l1aqcfileName][ref + 8] ConfigFile.settings[ f'fL1aqc{sensor}MinMaxBandLight'] = params[ l1aqcfileName][ref + 9] ref += 10 else: msg = 'This file not found in parameter file. Resorting to values in ConfigFile.settings.' print(msg) Utilities.writeLogFile(msg) else: msg = 'No deglitching parameter file found. Resorting to default values. NOT RECOMMENDED. RUN ANOMALY ANALYSIS.' print(msg) Utilities.writeLogFile(msg) root = Controller.processL1aqc(inFilePath, outFilePath, calibrationMap, ancillaryData) elif level == "L1B": root = Controller.processL1b(inFilePath, outFilePath) elif level == "L1BQC": root = Controller.processL1bqc(inFilePath, outFilePath) # Confirm output file creation if os.path.isfile(outFilePath): modTime = os.path.getmtime(outFilePath) nowTime = datetime.datetime.now() if nowTime.timestamp( ) - modTime < 60: # If the file exists and was created in the last minute... msg = f'{level} file produced: \n {outFilePath}' print(msg) Utilities.writeLogFile(msg) elif level == "L2": # Ancillary data from metadata have been read in at L1C, # and will be extracted from the ANCILLARY_METADATA group later root, outFilePath = Controller.processL2(inFilePath, outFilePath) if os.path.isfile(outFilePath): # Ensure that the L2 on file is recent before continuing with # SeaBASS files or reports modTime = os.path.getmtime(outFilePath) nowTime = datetime.datetime.now() if nowTime.timestamp() - modTime < 60: msg = f'{level} file produced: \n{outFilePath}' print(msg) Utilities.writeLogFile(msg) # Write SeaBASS if int(ConfigFile.settings["bL2SaveSeaBASS"]) == 1: msg = f'Output SeaBASS for HDF: \n{outFilePath}' print(msg) Utilities.writeLogFile(msg) SeaBASSWriter.outputTXT_Type2(outFilePath) # return True # Exempt station writing from reports (So as not to overwrite normal file reports...?) if root is None and ConfigFile.settings["bL2Stations"] == 1: print( 'No report written due to Station search, but root is None. Processing failed.' ) return False # If the process failed at any level, write a report and return if root is None and ConfigFile.settings["bL2Stations"] == 0: if ConfigFile.settings["bL2WriteReport"] == 1: Controller.writeReport(fileName, pathOut, outFilePath, level, inFilePath) return False # If L2 successful, write a report if level == "L2": if ConfigFile.settings["bL2WriteReport"] == 1: Controller.writeReport(fileName, pathOut, outFilePath, level, inFilePath) msg = f'Process Single Level: {outFilePath} - SUCCESSFUL' print(msg) Utilities.writeLogFile(msg) return True
def processL2(inFilePath, outFilePath): root = None if not os.path.isfile(inFilePath): print('No such input file: ' + inFilePath) return None, outFilePath # Process the data msg = ("ProcessL2: " + inFilePath) print(msg) Utilities.writeLogFile(msg) try: root = HDFRoot.readHDF5(inFilePath) except: msg = "Unable to open file. May be open in another application." Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None, outFilePath root = ProcessL2.processL2(root) outPath, filename = os.path.split(outFilePath) if root is not None: if ConfigFile.settings["bL2Stations"]: station = np.unique( root.getGroup("ANCILLARY").getDataset( "STATION").columns["STATION"]).tolist() station = str(round(station[0] * 100) / 100) filename = f'STATION_{station}_{filename}' outFilePath = os.path.join(outPath, filename) # Create Plots # Radiometry if ConfigFile.settings['bL2PlotRrs'] == 1: Utilities.plotRadiometry(root, filename, rType='Rrs', plotDelta=True) if ConfigFile.settings['bL2PlotnLw'] == 1: Utilities.plotRadiometry(root, filename, rType='nLw', plotDelta=True) if ConfigFile.settings['bL2PlotEs'] == 1: Utilities.plotRadiometry(root, filename, rType='ES', plotDelta=True) if ConfigFile.settings['bL2PlotLi'] == 1: Utilities.plotRadiometry(root, filename, rType='LI', plotDelta=True) if ConfigFile.settings['bL2PlotLt'] == 1: Utilities.plotRadiometry(root, filename, rType='LT', plotDelta=True) # IOPs # These three should plot GIOP and QAA together (eventually, once GIOP is complete) if ConfigFile.products["bL2ProdadgQaa"]: Utilities.plotIOPs(root, filename, algorithm='qaa', iopType='adg', plotDelta=False) if ConfigFile.products["bL2ProdaphQaa"]: Utilities.plotIOPs(root, filename, algorithm='qaa', iopType='aph', plotDelta=False) if ConfigFile.products["bL2ProdbbpQaa"]: Utilities.plotIOPs(root, filename, algorithm='qaa', iopType='bbp', plotDelta=False) # This puts ag, Sg, and DOC on the same plot if ConfigFile.products["bL2Prodgocad"] and ConfigFile.products["bL2ProdSg"] \ and ConfigFile.products["bL2Prodag"] and ConfigFile.products["bL2ProdDOC"]: Utilities.plotIOPs(root, filename, algorithm='gocad', iopType='ag', plotDelta=False) # Write output file if root is not None: try: root.writeHDF5(outFilePath) return root, outFilePath except: msg = "Unable to write file. May be open in another application." Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None, outFilePath else: msg = "L2 processing failed. Nothing to output." if MainConfig.settings["popQuery"] == 0: Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None, outFilePath
def MALR(self,slope,intercept,Zmax=7000,Zmin=1000,LCLp=1800,FlagGraph=False,PathImg='',NameArch='',Name=''): ''' DESCRIPTION: Script para encontrar la moist adiabatic lapse rate a partir de los datos horarios dados, esta se encontrará para todas las diferentes horas, presentando una gráfica diferente por hora. Este script utilizará las ecuaciones descritas por Del Genio (s.f.) en su curso de termodinámica en la universidad de columbia, estas fueron programas por Daniel Ruiz en eñ archivo 'Claro River Profiles Final Version.xls'. Para mayor información remitirse al documento 'Summary of key equations.docx' el cual contiene un resumen de lo expuesto por Del Genio (s.f.) en las notas de clase, realizado por Daniel Ruiz. El nivel de condensación por elevación (LCL) se supone según estimaciones previas, según Cuevas (2015) el LCL se encuentra entre 2190 -> Secos - 2140 -> Mojados para una región en el PNN Los Nevados. Estimaciones previas de Daniel Ruiz (sugerencia personal) sugieren que el valor se encuentra a 1800 msnm, hace falta un estudio más profundo para ver en donde se encuentra este punto. _________________________________________________________________________ INPUT: + slope: Pendiente para la temperatura _________________________________________________________________________ OUTPUT: - ''' # En esta sección se colacarán y calcularán algunas de las constantes, # estas constantes se encuentran expuestas en el archivo 'Claro River # Profiles Final Version.xls', en la hoja 'Constants' realizado por el # profesor Daniel Ruiz. # Presión de vapor en saturación a 0°C e_s0 = 6.11 # mb e_sp0 = e_s0*100 # Pa # Calor latente, Este valor es a 0°C y puede ser asumido como cte. L = 2.5*10**6 # J/kg H_2O # Constante de gases para vapor de agua R_v = 461 # J/K/kg # Temperatura T_0 = 273.15 # K T = 288.15 # K Tc = T-T_0 # °C # Presión de vapor de agua en saturación a 15°C e_s = e_sp0*np.exp((L/R_v)*((1/T_0)-(1/T))) # Pa e_sh = e_s/100 # hPa # Epsilon (Buscar ¿qué es?) epsilon = 0.622 # R_d/R_v # Presión (¿Qué presión?) p = 80000 # Pa # Saturated Mixing ratio W_s = epsilon*(e_s/p) # Gas constant for dry air R_d = 287 # J/K/kg DivCR = 3.5 # C_p/R_d # Specific heat for dry at constant pressure C_p = R_d*DivCR # J/K/kg # dry adiabatic lapse rate Gamma_d = 9.8 # K/km # Moist adiabatic lapse rate Gamma_m = Gamma_d*((1+((L*W_s)/(R_d*T)))/((1+(((L**2)*W_s)/(C_p*R_v*(T**2)))))) # Deltas PF = 273.15 # K DT = T-PF # K DZ = DT/Gamma_m # km # Datos adicionales p_0 = 1013*100 # Pa H_s = 8631 # m z = -H_s*np.log(p/p_0) # m # Freezing level FL = (DZ*1000)+ z # m # Assumed Freezing level AFL = 4900 # m ADZ = (AFL-z)/1000 # km # Buscar qué es Tao!! Tao = DT/ADZ # K/km # R_d/C_p DivRC = R_d/C_p # Presió 0 P_00 = 1000 # hPa - mb # ------------------------------------------------------------ # Se crea el vecto de alturas Al = np.arange(Zmin,Zmax+50,50) # Vector de alturas cada 50 metros Als = len(Al) # Tamaño del vector Headers = ['Mean Annual T [°C]', 'Mean Annual T [K]'\ , 'e_s (T) [Pa]', 'Atmospheric pressure [mbar]'\ , 'Atmospheric pressure [Pa]', 'W_s', 'Gamma_m [K/km]'\ , 'Gamma_d [K/km]', 'Profile T [K]-LCL='+str(LCLp),'Profile T dry'] Hes = len(Headers) # Se crea la matriz con todos los valores TM = np.zeros((Als,Hes)) # Se calcula la temperatura a partir de una regresión previamente realizada TM[:,0] = slope*Al+intercept # Se encuentra el primer valor para el cual T<=0 x_0 = np.where(TM[:,0] <= 0)[0] Al_0 = Al[x_0[0]] # Se pasa la temperatura Kelvin TM[:,1] = TM[:,0]+273.15 # Se inicializa el vector del perfil vertical de temperatura TM[0,-2] = TM[0,1] TM[0,-1] = TM[0,1] # Se calculan el resto de los valores for ii,i in enumerate(Al): # Presión de vapor e_s [Pa] TM[ii,2] = e_sp0*np.exp((L/R_v)*((1/T_0)-(1/TM[ii,1]))) # Presión atmosférica [mbar] -> Se puede cambiar por datos!! # Se calcula con la ecuación hidroestática p=1009.28 exp(-z/H) # donde H: Scale Height = 8631. TM[ii,3] = 1009.28*np.exp(-i/H_s) # Presión atmosférica [Pa] TM[ii,4] = TM[ii,3]*100 # Rata de mezcla W_s TM[ii,5] = epsilon*(TM[ii,2]/TM[ii,4]) # Moist adiabatic lapse rate Gamma_m TM[ii,6] = Gamma_d*((1+((L*TM[ii,5])/(R_d*TM[ii,1])))/((1+(((L**2)*TM[ii,5])/(C_p*R_v*(TM[ii,1]**2)))))) # Dry adiabatic lapse rate TM[ii,7] = Gamma_d # Se genera el perfil vertical de temperatura if ii > 0: # Perfil de temperatura vertical [k] if i <= LCLp: # Perfil adiabático seco TM[ii,8] = TM[ii-1,8]-TM[ii,7]*((i-Al[ii-1])/1000) else: # Perfil adiabático húmedo TM[ii,8] = TM[ii-1,8]-((TM[ii-1,6]+TM[ii,6])/2)*((i-Al[ii-1])/1000) # Dry adiabatic lapse rate profile TM[ii,9] = TM[ii-1,9]-TM[ii,7]*((i-Al[ii-1])/1000) # Se realiza la gráfica if FlagGraph: # Se crea la carpeta en donde se guarda la información utl.CrFolder(PathImg) # Se organizan los valores para graficarlos x = np.where(Al <= LCLp)[0] xx = np.where(Al > LCLp)[0] # Parámetros de la gráfica fH = 20 fV = fH*(2/3) minorLocatorx = MultipleLocator(1) minorLocatory = MultipleLocator(100) F = plt.figure(figsize=utl.cm2inch(fH,fV)) plt.rcParams.update({'font.size': 14,'font.family': 'sans-serif'\ ,'font.sans-serif': 'Arial'}) plt.plot(TM[:,0],Al,'k-',label='Gradiente Ambiental') plt.plot(TM[x,8]-273.15,Al[x],'r--',label='Gradiente Adiabático Seco') plt.plot(TM[:,9]-273.15,Al,'r--') plt.plot(TM[xx,8]-273.15,Al[xx],'b--',label='Gradiente Adiabático Húmedo') plt.legend(loc=0,fontsize=12) plt.title(Name,fontsize=16 ) # Colocamos el título del gráfico plt.xlabel(u'Temperatura [°C]',fontsize=14) # Colocamos la etiqueta en el eje x plt.ylabel('Altura [msnm]',fontsize=14) # Colocamos la etiqueta en el eje y plt.gca().set_ylim([900,4500]) plt.gca().xaxis.set_minor_locator(minorLocatorx) plt.gca().yaxis.set_minor_locator(minorLocatory) plt.xlim([-5,40]) plt.savefig(PathImg + NameArch +'.png', format='png',dpi=200) plt.close('all') # Se reportan los resultados return Al,TM
def get_listOfQueues(self, project): url = "/" + project + '/_apis/distributedtask/queues' return Utilities.getRequest(url)
def readRawFile(filepath, calibrationMap, contextMap, root): posframe = 1 # Note: Prosoft adds posframe=1 to the GPS for some reason # print(contextMap.keys()) #gpsGroup = contextMap["$GPRMC"] #ds = gpsGroup.getDataset("POSFRAME") #ds.appendColumn(u"COUNT", posframe) posframe += 1 with open(filepath, 'rb') as f: while 1: # Reads binary file to find message frame tag pos = f.tell() b = f.read(RawFileReader.MAX_TAG_READ) f.seek(pos) if not b: break #print b for i in range(0, RawFileReader.MAX_TAG_READ): testString = b[i:].upper() #print("test: ", testString[:6]) # Reset file position on max read if i == RawFileReader.MAX_TAG_READ - 1: #f.read(RawFileReader.MAX_TAG_READ) f.read(RawFileReader.RESET_TAG_READ) break # Detects message type from frame tag if testString.startswith(b"SATHDR"): #print("SATHDR") if i > 0: f.read(i) hdr = f.read(RawFileReader.SATHDR_READ) (k, v) = RawFileReader.readSATHDR(hdr) root.attributes[k] = v break else: num = 0 for key in calibrationMap: cf = calibrationMap[key] if testString.startswith( cf.id.upper().encode("utf-8")): if i > 0: f.read(i) pos = f.tell() msg = f.read(RawFileReader.MAX_BLOCK_READ) f.seek(pos) gp = contextMap[cf.id] # Only the first time through if len(gp.attributes) == 0: #gp.id += "_" + cf.id gp.id = key gp.attributes["CalFileName"] = key gp.attributes["FrameTag"] = cf.id # if key.startswith('SATPYR'): # print('curious...') try: num = cf.convertRaw(msg, gp) except: pmsg = f'Unable to convert the following raw message: {msg}' print(pmsg) Utilities.writeLogFile(pmsg) if num >= 0: # Generate POSFRAME ds = gp.getDataset("POSFRAME") if ds is None: ds = gp.addDataset("POSFRAME") ds.appendColumn(u"COUNT", posframe) posframe += 1 f.read(num) break if num > 0: break
def testMovingAverage(self): array = [1, 2, 3, 4] average = Utilities.movingAverage(array, 1) self.assertEqual(average, [1.5, 2, 3, 3.5])
def writeReport(fileName, pathOut, outFilePath, level, inFilePath): print('Writing PDF Report...') numLevelDict = {'L1A': 1, 'L1AQC': 2, 'L1B': 3, 'L1BQC': 4, 'L2': 5} numLevel = numLevelDict[level] fp = os.path.join(pathOut, level, f'{fileName}_{level}.hdf') # Reports are written during failure at any level or success at L2. # The highest level succesfully processed will have the correct configurations in the HDF attributes. # Try to open current level. If this fails, open the previous level and use all the parameters # from the attributes up to that level, then use the ConfigFile.settings for the current level parameters. try: # Processing successful at this level root = HDFRoot.readHDF5(fp) fail = 0 root.attributes['Fail'] = 0 except: fail = 1 # Processing failed at this level. Open the level below it # This won't work for ProcessL1A looking back for RAW... if level != 'L1A': try: # Processing successful at the next lower level # Shift from the output to the input directory root = HDFRoot.readHDF5(inFilePath) except: msg = "Controller.writeReport: Unable to open HDF file. May be open in another application." Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return else: # Create a root with nothing but the fail flag in the attributes to pass to PDF reporting # PDF will contain parameters from ConfigFile.settings root = HDFRoot() root.id = "/" root.attributes["HYPERINSPACE"] = MainConfig.settings[ "version"] root.attributes[ 'TIME-STAMP'] = 'Null' # Collection time not preserved in failed RAW>L1A root.attributes['Fail'] = 1 timeStamp = root.attributes['TIME-STAMP'] title = f'File: {fileName} Collected: {timeStamp}' # Reports reportPath = os.path.join(pathOut, 'Reports') if os.path.isdir(reportPath) is False: os.mkdir(reportPath) dirPath = os.getcwd() inLogPath = os.path.join(dirPath, 'Logs') inPlotPath = os.path.join(pathOut, 'Plots') # The inPlotPath is going to be different for L1A-L1E than L2 for many cruises... # In that case, move up one directory if os.path.isdir(os.path.join(inPlotPath, 'L1AQC_Anoms')) is False: inPlotPath = os.path.join(pathOut, '..', 'Plots') outHDF = os.path.split(outFilePath)[1] if fail: outPDF = os.path.join(reportPath, f'{os.path.splitext(outHDF)[0]}_fail.pdf') else: outPDF = os.path.join(reportPath, f'{os.path.splitext(outHDF)[0]}.pdf') pdf = PDF() pdf.set_title(title) pdf.set_author(f'HyperInSPACE_{MainConfig.settings["version"]}') inLog = os.path.join(inLogPath, f'{fileName}_L1A.log') if os.path.isfile(inLog): print('Level 1A') pdf.print_chapter('L1A', 'Process RAW to L1A', inLog, inPlotPath, fileName, outFilePath, root) if numLevel > 1: print('Level 1AQC') inLog = os.path.join(inLogPath, f'{fileName}_L1A_L1AQC.log') if os.path.isfile(inLog): pdf.print_chapter('L1AQC', 'Process L1A to L1AQC', inLog, inPlotPath, fileName, outFilePath, root) if numLevel > 2: print('Level 1B') inLog = os.path.join(inLogPath, f'{fileName}_L1AQC_L1B.log') if os.path.isfile(inLog): pdf.print_chapter('L1B', 'Process L1AQC to L1B', inLog, inPlotPath, fileName, outFilePath, root) if numLevel > 3: print('Level 1BQC') inLog = os.path.join(inLogPath, f'{fileName}_L1B_L1BQC.log') if os.path.isfile(inLog): pdf.print_chapter('L1BQC', 'Process L1B to L1BQC', inLog, inPlotPath, fileName, outFilePath, root) if numLevel > 4: print('Level 2') # For L2, reset Plot directory inPlotPath = os.path.join(pathOut, 'Plots') inLog = os.path.join(inLogPath, f'{fileName}_L1BQC_L2.log') if os.path.isfile(inLog): pdf.print_chapter('L2', 'Process L1BQC to L2', inLog, inPlotPath, fileName, outFilePath, root) try: pdf.output(outPDF, 'F') except: msg = 'Unable to write the PDF file. It may be open in another program.' Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg)
def __init__(self): self.ticketModel = TicketModel() self.util = Utilities()
def testTotalChange(self): array = [{"a": 1}, {"a": 2}, {"a": 0.5}, {"a": 3}] change = Utilities.totalChange(array, lambda e: e["a"]) self.assertEqual(change, 5)
import tweepy from auth import auth, verify_credentials from Utilities import Utilities, Search, Status from stream import Stream_Lisitner #create API object and set options api = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True) #set stream tweets_listner = Stream_Lisitner(api) stream = tweepy.Stream(api.auth, tweets_listner) search_tags = Utilities() search_tags.read_in_resources('resources/status_info.txt') print(search_tags.read_dict.keys()) search_tag_dict = search_tags.read_dict stream.filter(track=[search_tag_dict.get('status_tags')], languages=[search_tag_dict.get('status_languages')]) #unpack search_req.txt search_req = Utilities() search_req.read_in_resources("resources/search_req.txt") search_req_dict = search_req.read_dict search = Search(search_req_dict) #set search criteria result = api.search(q=search.query_string, lang=search.lang, count=search.count) for tweet in result: search.search_results.append(tweet)
instead_use_lemmanization = False print("Bot-> I have 2 modes in which i can run") print( "Bot-> 1) Data Specific questions - Here you can chosse the category of the data" ) print( "Bot-> 2) Overall questions - Here you ask the questions and i will decide which category will it belong to" ) print("Bot-> Please Choose :- ", end="") specific = False databasename = "" possible_db_name = os.listdir('../dataset') ut = UT() def properInput(x): global specific if x == '1' or x == '2': if x == '1': specific = True return True else: print("Please choose a proper input - Either 1 or 2 - ", end="") return False def properDatabaseName(x, possible_db_name): global databasename
# serialize weights to HDF5 model.save_weights( config.get("model", "dir") + "/" + config.get("model", "name") + "W_{0}.h5".format(epoch)) print("Saved model to disk") return if __name__ == "__main__": # Specify number of particles to use and number of features nParticles = 60 #nFeatures=51 nFeatures = 47 loader = ModelLoader((nParticles, nFeatures)) model = loader.load() utils = Utilities(nParticles) history = Histories() history.set_up_config(config=config) history.on_train_begin() # Build the first training dataset print("TRAIN_DATA: ", TRAIN_DATA) X_train, Y, W_train, MVA_train = utils.BuildBatch(indir=TRAIN_DATA) for epoch in range(100): pool_local = ThreadPool(processes=1) # Shuffle loaded datasets and begin inds = range(len(X_train)) np.random.shuffle(inds) X_epoch, Y_epoch, W_epoch, MVA_epoch = X_train[inds], Y[inds], W_train[ inds], MVA_train[inds]
from Utilities import Utilities import os, sys currentdir = os.path.dirname(os.path.realpath(__file__)) parentdir = os.path.dirname(currentdir) sys.path.append(parentdir) # with open("resources/twt_secrets.txt", "r") as s: # secrets = s.readlines() # secrets_dict = {} # for s in range(len(secrets)): # secret = secrets[s].split("=") # secrets_dict[secret[0]] = secret[1].strip() secrets = Utilities() secrets.read_in_resources("resources/twt_secrets.txt") secrets_dict = secrets.read_dict API_KEY = secrets_dict["API_KEY"] API_SECRET = secrets_dict["API_SECRET"] ACCESS_TOKEN = secrets_dict["ACCESS_TOKEN"] ACCESS_TOKEN_SECRET = secrets_dict["ACCESS_TOKEN_SECRET"] auth = tweepy.OAuthHandler(API_KEY, API_SECRET) auth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET) def verify_credentials(api): try: api.verify_credentials()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Tue Jul 30 09:01:40 2019 @author: chandanav """ from JobShopLoader import JobShopLoader from Utilities import Utilities from SimulatedAnnealing import SimulatedAnnealing import matplotlib.pyplot as plt # Initialization of the objects jobshopLoader = JobShopLoader() utilitiesObj = Utilities() simulatedAnnealing = SimulatedAnnealing() # Loading the data from the input file jobs = jobshopLoader.load("data.txt") # Calling the search algorithm to find the best solution for scheduling the jobs on machines cost, solution = simulatedAnnealing.search(jobshopLoader, utilitiesObj, maxTime=20, T=200, termination=10, halting=10, mode='random', shrink=0.8) # Display the solution
def calculateReflectance(root, node, interval, enableQualityCheck, percentLt, performNIRCorrection, \ rhoSky=0.0256, enableWindSpeedCalculation=1, defaultWindSpeed=0.0, windSpeedData=None): #def calculateReflectance(esData, liData, ltData, newRrsData, newESData, newLIData, newLTData): print("calculateReflectance") referenceGroup = node.getGroup("Reference") sasGroup = node.getGroup("SAS") esData = referenceGroup.getDataset("ES_hyperspectral") liData = sasGroup.getDataset("LI_hyperspectral") ltData = sasGroup.getDataset("LT_hyperspectral") newReflectanceGroup = root.getGroup("Reflectance") newRrsData = newReflectanceGroup.addDataset("Rrs") newESData = newReflectanceGroup.addDataset("ES") newLIData = newReflectanceGroup.addDataset("LI") newLTData = newReflectanceGroup.addDataset("LT") # Copy datasets to dictionary esData.datasetToColumns() esColumns = esData.columns tt2 = esColumns["Timetag2"] #esColumns.pop("Datetag") #tt2 = esColumns.pop("Timetag2") liData.datasetToColumns() liColumns = liData.columns #liColumns.pop("Datetag") #liColumns.pop("Timetag2") ltData.datasetToColumns() ltColumns = ltData.columns #ltColumns.pop("Datetag") #ltColumns.pop("Timetag2") # remove added LATPOS/LONPOS if added #if "LATPOS" in esColumns: # esColumns.pop("LATPOS") # liColumns.pop("LATPOS") # ltColumns.pop("LATPOS") #if "LONPOS" in esColumns: # esColumns.pop("LONPOS") # liColumns.pop("LONPOS") # ltColumns.pop("LONPOS") #if Utilities.hasNan(esData): # print("Found NAN 1") # sys.exit(1) #if Utilities.hasNan(liData): # print("Found NAN 2") # sys.exit(1) #if Utilities.hasNan(ltData): # print("Found NAN 3") # sys.exit(1) esLength = len(list(esColumns.values())[0]) ltLength = len(list(ltColumns.values())[0]) if ltLength > esLength: for col in ltColumns: col = col[0:esLength] for col in liColumns: col = col[0:esLength] windSpeedColumns = None # interpolate wind speed to match sensor time values if windSpeedData is not None: x = windSpeedData.getColumn("TIMETAG2")[0] y = windSpeedData.getColumn("WINDSPEED")[0] new_x = esData.data["Timetag2"].tolist() new_y = Utilities.interp(x, y, new_x) windSpeedData.columns["WINDSPEED"] = new_y windSpeedData.columns["TIMETAG2"] = new_x windSpeedData.columnsToDataset() windSpeedColumns = new_y #print("items:", esColumns.values()) #print(ltLength,resolution) if interval == 0: for i in range(0, len(tt2) - 1): esSlice = ProcessL4.columnToSlice(esColumns, i, i + 1) liSlice = ProcessL4.columnToSlice(liColumns, i, i + 1) ltSlice = ProcessL4.columnToSlice(ltColumns, i, i + 1) ProcessL4.calculateReflectance2(root, esSlice, liSlice, ltSlice, newRrsData, newESData, newLIData, newLTData, \ percentLt, enableQualityCheck, performNIRCorrection, \ rhoSky, enableWindSpeedCalculation, defaultWindSpeed, windSpeedColumns) else: start = 0 #end = 0 endTime = Utilities.timeTag2ToSec(tt2[0]) + interval for i in range(0, len(tt2)): time = Utilities.timeTag2ToSec(tt2[i]) if time > endTime: end = i - 1 esSlice = ProcessL4.columnToSlice(esColumns, start, end) liSlice = ProcessL4.columnToSlice(liColumns, start, end) ltSlice = ProcessL4.columnToSlice(ltColumns, start, end) ProcessL4.calculateReflectance2(root, esSlice, liSlice, ltSlice, newRrsData, newESData, newLIData, newLTData, \ percentLt, enableQualityCheck, performNIRCorrection, \ rhoSky, enableWindSpeedCalculation, defaultWindSpeed, windSpeedColumns) start = i endTime = time + interval # Try converting any remaining end = len(tt2) - 1 time = Utilities.timeTag2ToSec(tt2[end]) if time < endTime: esSlice = ProcessL4.columnToSlice(esColumns, start, end) liSlice = ProcessL4.columnToSlice(liColumns, start, end) ltSlice = ProcessL4.columnToSlice(ltColumns, start, end) ProcessL4.calculateReflectance2(root, esSlice, liSlice, ltSlice, newRrsData, newESData, newLIData, newLTData, \ percentLt, enableQualityCheck, performNIRCorrection, \ rhoSky, enableWindSpeedCalculation, defaultWindSpeed, windSpeedColumns) # for i in range(0, int(esLength/resolution)): # #print(i) # start = i*resolution # end = start+resolution # esSlice = ProcessL4.columnToSlice(esColumns, start, end, i, resolution) # liSlice = ProcessL4.columnToSlice(liColumns, start, end, i, resolution) # ltSlice = ProcessL4.columnToSlice(ltColumns, start, end, i, resolution) # # ProcessL4.calculateReflectance2(root, node, esSlice, liSlice, ltSlice, newRrsData, newESData, newLIData, newLTData, enableQualityCheck, defaultWindSpeed, windSpeedColumns) newESData.columnsToDataset() newLIData.columnsToDataset() newLTData.columnsToDataset() newRrsData.columnsToDataset() return True
# ### **************************************************************************** ### import configparser from hermes_python.hermes import Hermes from hermes_python.ffi.utils import MqttOptions from hermes_python.ontology import * from Utilities import Utilities import io import sys import os CONFIGURATION_ENCODING_FORMAT = "utf-8" CONFIG_INI = "config.ini" util = Utilities() class SnipsConfigParser(configparser.ConfigParser): def to_dict(self): return { section: { option_name: option for option_name, option in self.items(section) } for section in self.sections() } def read_configuration_file(configuration_file): try:
def testGroupAndMerge(self): array = [1, 2, 3, 4] change = Utilities.groupAndMerge(array, 2) self.assertEqual(change, [3, 7])
def printd(self): if len(self.id) != 0: pmsg = f'id: {self.id}' print(pmsg) Utilities.writeLogFile(pmsg)
def processL1bqc(inFilePath, outFilePath): root = None if not os.path.isfile(inFilePath): print('No such input file: ' + inFilePath) return None # Process the data print("ProcessL1bqc") try: root = HDFRoot.readHDF5(inFilePath) except: msg = "Unable to open file. May be open in another application." Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None root.attributes['In_Filepath'] = inFilePath root = ProcessL1bqc.processL1bqc(root) # Write output file if root is not None: try: root.writeHDF5(outFilePath) except: msg = "Unable to write file. May be open in another application." Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None, else: msg = "L1bqc processing failed. Nothing to output." if MainConfig.settings["popQuery"] == 0: Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None return root
def processL1a(inFilePath, outFilePath, calibrationMap): root = None if not os.path.isfile(inFilePath): msg = 'No such file...' Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None, 'Never' # Process the data msg = "ProcessL1a" print(msg) root = ProcessL1a.processL1a(inFilePath, calibrationMap) # Write output file if root is not None: try: root.writeHDF5(outFilePath) except: msg = 'Unable to write L1A file. It may be open in another program.' Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None else: msg = "L1a processing failed. Nothing to output." if MainConfig.settings["popQuery"] == 0: Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None return root
def getHitPoints(self): from Utilities import Utilities constitution = self.getConstitution() modifier = Utilities.calculateModifier(constitution) return (self.hitpoints + modifier)
def processL1aqc(inFilePath, outFilePath, calibrationMap, ancillaryData): root = None if not os.path.isfile(inFilePath): print('No such input file: ' + inFilePath) return None # Process the data print("ProcessL1aqc") try: root = HDFRoot.readHDF5(inFilePath) except: msg = "Unable to open file. May be open in another application." Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None # At this stage the Anomanal parameterizations are current in ConfigFile.settings, # regardless of who called this method. This method will promote them to root.attributes. root = ProcessL1aqc.processL1aqc(root, calibrationMap, ancillaryData) # Write output file if root is not None: try: root.writeHDF5(outFilePath) except: msg = "Unable to write the file. May be open in another application." Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None else: msg = "L1aqc processing failed. Nothing to output." if MainConfig.settings["popQuery"] == 0: Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None return root
def processL1b(inFilePath, outFilePath): root = None if not os.path.isfile(inFilePath): print('No such input file: ' + inFilePath) return None # Process the data msg = ("ProcessL1b: " + inFilePath) print(msg) Utilities.writeLogFile(msg) try: root = HDFRoot.readHDF5(inFilePath) except: msg = "Controller.processL1b: Unable to open HDF file. May be open in another application." Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None root = ProcessL1b.processL1b(root, outFilePath) # Write output file if root is not None: try: root.writeHDF5(outFilePath) except: msg = "Controller.ProcessL1b: Unable to write file. May be open in another application." Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None else: msg = "L1b processing failed. Nothing to output." if MainConfig.settings["popQuery"] == 0: Utilities.errorWindow("File Error", msg) print(msg) Utilities.writeLogFile(msg) return None return root
pygame.display.flip() print('Go back to the main page...') if __name__ == '__main__': from Utilities import Utilities import platform from Communications import Communications pygame.init() DISPLAYSURF = pygame.display.set_mode((1200, 800)) FONT = pygame.font.Font('freesansbold.ttf', 16) BIGFONT = pygame.font.Font('freesansbold.ttf', 32) pygame.display.set_caption('Flippy') utilities = Utilities(DISPLAYSURF, BIGFONT) name = 'laptop' if (platform.system() == 'Windows') else 'pi7' target = 'pi7' if (platform.system() == 'Windows') else 'laptop' comm = Communications('messages', 'localhost', name) comm.connectBroker() comm.setTarget(target) utilities.comm = comm line = TextBox('Enter exit to quit') pos = line.draw() line = TextBox('Chat:') pos = line.draw(pos) pygame.display.flip() run = True lastMsg = ''
def testGroup(self): array = [1, 2, 3, 4] change = Utilities.group(array, 2) self.assertEqual(change, [[1, 2], [3, 4]])