def run(): args = parser_cl() Base.metadata.create_all(engine) session = Session() root = get_xml_root(args.file) # f = open('log', 'w') for i, sms in enumerate(root): json_data = xml_to_dict(sms) sender = sms.get('contact_name').encode('utf-8') if sms.get( 'type') == '1' else 'Me' # print('sender = %s' % sender.decode("utf-8") ) # print(type(sender)) json_data['sender'] = sender.decode('utf-8') item = json_to_b(json_data) session.add(item) date = time.strftime("%D %H:%M", time.localtime(int(sms.get('date')))) body = sms.get('body').encode('utf-8') # line = '%s - %s : %s\n' % (date, sender, body) # f.write(line) session.commit() # for sms in session.query(Backup).all(): # print(sms.body) # print('Testing f to_file') # to_file("I'm testing\ncheck") # print('Testing find data') ret = find_data(session.query(Backup).all) print(ret)
def university_basic_populate(): Base.metadata.create_all(engine) with open('../scrapers/university.json', 'r') as f: university_data = json.load(f) for uni in university_data: session = Session() university = University(uni["university_id"], uni["name"]) university.set_extra_info(uni["type"], uni["website"], uni["oos_tuition"], uni["state_tuition"], uni["survey_year"]) university.set_location(uni["longitude"], uni["latitude"], uni["county_id"], uni["state"]) university.set_enrollment(uni["enrolled_men"], uni["enrolled_women"]) university.set_demographics(uni["demographics_asian"], uni["demographics_white"], uni["demographics_black"], uni["demographics_hispanic"], uni["demographics_other"]) session.add(university) print("Added " + uni["name"]) session.commit() session.close()
def add_twitch_clip(self, id): ret = False #print(id) if (storageSystem.get_twitch_clip(id) != None): return ret session = Session() clipObject = twitchmisc.getClipObjects([id]) if (len(clipObject) == 0 or clipObject[0] == None): return False #print (clipObject[0]) try: session.add(clipObject[0]) session.commit() #update_clips(all_clip_ids, all_clips) for i in range(len(clipObject)): if clipObject[i].id not in all_clip_ids: session.expunge(clipObject[i]) all_clips.append(clipObject[i]) all_clip_ids.append(clipObject[i].id) ret = True except: ret = False finally: session.close() return ret
def add_to_liked_clips(self, clip_id, user_id): ret = False session = Session() if (self.get_twitch_clip(clip_id) is not None) and (self.user_exists(user_id) == True): id = None while id is None or session.query(LikedClips).filter( LikedClips.id == id).first() is not None: chars = digits + ascii_lowercase + ascii_uppercase id = "".join([choice(chars) for i in range(25)]) new_relationship = LikedClips(id=id, clip_id=clip_id, user_id=user_id) try: session.add(new_relationship) except: raise else: ret = True session.commit() finally: session.close() return ret else: session.close() return ret
def import_procedures(procedures): print("Importing Procedures") count = 0 session = Session() for procedure in procedures: source_id = procedure['subject']['reference'].split('/')[1] patient_id = session.query( Patient.id).where(Patient.source_id == source_id).one().id try: procedure_date = procedure['performedDateTime'] except KeyError: procedure_date = procedure['performedPeriod']['start'] procedure_obj = Procedure( source_id=procedure['id'], patient_id=patient_id, procedure_date=procedure_date, type_code=procedure['code']['coding'][0]['code'], type_code_system=procedure['code']['coding'][0]['system']) session.add(procedure_obj) count += 1 print(".", end='', flush=True) # Show progress session.commit() session.close() print(f"\nImported {count} procedures")
def new_user(self, email, username, password): session = Session() if self.email_to_id(email) is not None: return 1 id = None while id is None or session.query(User).filter( User.id == id).first() is not None: chars = digits + ascii_lowercase + ascii_uppercase id = "".join([choice(chars) for i in range(25)]) print("Using new user id: " + str(id)) new_user = User(email, hashlib.sha512(password.encode("utf-8")).hexdigest(), id, username) try: session.add(new_user) session.commit() session.close() return 0 except: session.close() return 2
def create_user(): if (request.method == 'GET'): return render_template('create_user.html') elif (request.method == 'POST'): name = request.form['name'] phone = request.form['phone'] # Remove everything except for digits p = re.compile(r"[^0-9]") phone = re.sub(p, '', phone) if (len(phone) != 10): response = {"error": "Phone number is wrong length"} return jsonify(response), 500 # Check for a duplicate phone number session = Session() duplicate = session.query(User).filter(User.phone == phone).all() if (len(duplicate) > 0): response = {"error": "This phone number is already in the system"} return jsonify(response), 500 # All tests have passed, so create and insert the user. new_user = User(name, phone) session.add(new_user) session.commit() session.close() # After we have finished inserting the new user, # redirect to show all users including the one we just inserted. return redirect(url_for('users'))
class DBQuery: Base.metadata.create_all(engine) def __init__(self): self.session = Session() def __enter__(self): return self def __exit__(self, exc_type, exc_value, traceback): self.session.close() def get_items(self): query = self.session.query(Item).all() items = [item.columns() for item in query] return items def add_item(self, values): self.session.add(values) self.session.commit() def update_item(self, item_id, values): self.session.query(Item).filter(Item.id == item_id).update(values) self.session.commit() def delete_item(self, item_id): self.session.query(Item).filter(Item.id == item_id).delete() self.session.commit()
def handler(event, context): authorized_user_types = [ UserType.ADMIN, ] success, _ = check_auth(event['headers']['Authorization'], authorized_user_types) if not success: return http_status.unauthorized() body = json.loads(event["body"]) user_email = body.get('email') if not user_email: return http_status.bad_request() chat_freq = 4 attributes = { "custom:user_type": "MENTOR", "custom:declared_chats_freq": str(chat_freq), "custom:remaining_chats_freq": str(chat_freq) } admin_update_user_attributes(user_email, attributes) admin_enable_user(user_email) session = Session() for i in range(chat_freq): chat_type = ChatType["ONE_ON_ONE"] chat = Chat(chat_type=chat_type, chat_status=ChatStatus.PENDING, senior_executive=user_email) session.add(chat) session.commit() session.close() return http_status.success()
def save(self): s = Session() if not self.id: s.add(self) s.commit() s.expunge(self) s.close()
def offerTransaction(rem, dest, offer): destUser = User.get_by_email(dest) account_2 = destUser.blockHash remUser = User.get_by_email(rem) nonce = web3.eth.getTransactionCount(remUser.blockHash) amount = offer.precio strOffer = "Pago por oferta: " + offer.nombre float_amount = float(amount) / valorUDC tx = { 'chainId': 3, # es 3 para Ropsten 'nonce': nonce, 'to': account_2, 'value': web3.toWei(float_amount, 'ether'), 'gas': 50000, 'gasPrice': web3.toWei(50, 'gwei'), 'data': bytes(strOffer, 'utf8') } signed_tx = web3.eth.account.signTransaction(tx, remUser.pk) tx_hash = web3.eth.sendRawTransaction(signed_tx.rawTransaction) s = Session() dateTimeObj = datetime.now() timestampStr = dateTimeObj.strftime("%d-%m-%Y (%H:%M:%S.%f)") t = Transaccion(timestampStr, tx_hash, rem, destUser.organizacion, None, amount, "", "") s.add(t) s.commit() s.close()
def updateGridTaskStatus(gridTask): #get GridWay status (result, status, error)=drmaa_job_ps(gridTask.gwID) #error control if result != DRMAA_ERRNO_SUCCESS: print >> sys.stderr, "updatingGridTaskStatus, drmaa_job_ps() failed for task " + gridTask.gwID + ". Error: %s" % (error) gridTask.status = "DONE" #DONE does not imply success, just that it has finished else: if gridTask.status == "CLEAR": #ya se ha acabado, nothing to do here return elif status == DRMAA_PS_UNDETERMINED or status == DRMAA_PS_QUEUED_ACTIVE: gridTask.status = "SUBMITTED" elif status == DRMAA_PS_RUNNING: #if needed, update task execution start if gridTask.status == "SUBMITTED" or gridTask.status=="WAITING": gridTask.executionStartDate = datetime.now() print ("Task " + gridTask.gwID + "with type " + gridTask.type + " on host " + gridTask.host.hostname + " with ID=" + str(gridTask.host.id) + " has started its execution") gridTask.status = "RUNNING" elif status == DRMAA_PS_DONE or status == DRMAA_PS_FAILED: gridTask.status = "DONE" Session.add(gridTask)
def main(filename): '''Funcón principal que nos permite crear el schema, tomar datos del csv y pasarlo a un .db Parameter -------- - filename : str Nombre del dataset csv Return ------- - database : database from sqlite ''' Base.metadata.create_all(engine) # Nos genera el schema session = Session() # Generamos nuestra sesion articles = pd.read_csv(filename) for index, row in articles.iterrows(): # Comenzamos a iterar por todos elementos del archivo logger.info('Loading article uid {} info DB'.format(row['uid'])) article = Article(row['uid'], row['body'], row['host'], row['newspaper_uid'], row['n_token_body'], row['n_token_title'], row['title'], row['url'],) session.add(article) session.commit() session.close()
def sendCoins(dest, amount, imgHash, urlProof): destUser = User.get_by_email(dest) account_2 = destUser.blockHash nonce = web3.eth.getTransactionCount(test_address) accion = Accion.getActionById(session['accionId']) float_amount = float(amount) / valorUDC bytesStr = "acc:" + accion.nombre + " img: " + imgHash tx = { 'chainId': 3, # es 3 para Ropsten 'nonce': nonce, 'to': account_2, 'value': web3.toWei(float_amount, 'ether'), 'gas': 50000, 'gasPrice': web3.toWei(50, 'gwei'), 'data': bytes(bytesStr, 'utf8') } signed_tx = web3.eth.account.signTransaction(tx, private_key) tx_hash = web3.eth.sendRawTransaction(signed_tx.rawTransaction) s = Session() dateTimeObj = datetime.now() timestampStr = dateTimeObj.strftime("%d-%m-%Y (%H:%M:%S.%f)") t = Transaccion(timestampStr, tx_hash, accion.empresa, dest, accion.campanya_id, amount, imgHash, urlProof) s.add(t) s.commit() query = s.query(Accion) kpi = int(float(request.form['kpi'])) dictupdate = {Accion.kpi: Accion.kpi + kpi} query.filter(Accion.id == accion.id).update(dictupdate, synchronize_session=False) s.commit() s.close()
def updateStatus(self, gridTasks): #=============================================================================== # print ("updatestatus: UPDATING INFRASTRUCTURE STATUS") # # print(" numbver of tasks to process: " + str(len(gridTasks))) #=============================================================================== for host in self.hosts: host.currentSlotCount = 0 for gridTask in gridTasks: if gridTask.status == "RUNNING": try: gridTask.host.currentSlotCount +=1 except: pass #=============================================================== # print (" task running on host " + gridTask.host.hostname + "with id " + str(gridTask.host.id) +", that makes " + str(gridTask.host.currentSlotCount) + " tasks on the host") #=============================================================== for host in self.hosts: host.maxSlotCountThisTime = max(host.currentSlotCount, host.maxSlotCountThisTime) if host.maxSlotCountThisTime > host.maxSlotCount: host.maxSlotCount = host.maxSlotCountThisTime Session.add(host)
def insert_attack(host, port): Base.metadata.create_all(engine) session = Session() attack = Attack(host, port) session.add(attack) session.commit() session.close()
def Add_Department(): dep_name = input("Enter Department Name: ") new_dep = Department(dep_name=dep_name) session = Session() session.add(new_dep) session.commit()
def get_or_create_solution(n_queens,solution): session = Session() sol = session.query(SolutionModel).filter_by(solution = ";".join(str(v) for v in solution)).first() if sol: return sol else: sol = SolutionModel(n_queens = n_queens,solution = ';'.join(str(v) for v in solution)) session.add(sol) session.commit() return sol
def updateInfoAfterExecutionInPilots(self, gridTask): self.updateInfoAfterExecution(gridTask) #queremos aumentar el numero de tareas otra vez (porque lo restamos al enviarla, y el updateInfo... lo vuelve a restar) # -si ha acabado OK, para que el numero de pendientes se quede como estaba # - si NO ha acabado Ok, para que se vuelva a incrementar, ya que lo restamos al enviarla self.remainingSamples += gridTask.realSamples Session.add(self)
def getTask(): newParameter = Session.query(Parameter).filter(Parameter.status=="WAITING").first() if newParameter == None: return template('{{id}}', id=-1) newParameter.status="RUNNING" newParameter.executionStartDate = datetime.now() Session.add(newParameter) Session.commit() return template('{{id}}', id=newParameter.id)
def main(filename): Base.metadata.create_all(engine) session = Session() articles = pd.read_csv(filename, encoding='utf-8') for index, row in articles.iterrows(): logger.info('Loading article uid: {}'.format(row['uid'])) article = Article(row['uid'], row['body'], row['host'], row['newspaper_uid'], row['n_tokens_body'], row['n_tokens_title'], row['title'], row['url']) session.add(article) session.commit() session.close()
def AddPost(): s = Session() text = request.form['text'] date = datetime.strptime(request.form['date'], '%Y-%m-%d') user = request.form['user'] title = request.form['title'] spoiler = request.form['spoiler'] p = Post(text, date, user, title, spoiler) s.add(p) s.commit() s.close() return jsonify()
def failedTask(idInput=-1): #por si acaso try: id = int(idInput) except: return template('-1') failedParameter = Session.query(Parameter).filter(Parameter.id==id).first() failedParameter.status="WAITING" failedParameter.executionStartDate = None Session.add(failedParameter) Session.commit() return template('{{id}}', id=failedParameter.id)
def main(filename): Base.metadata.create_all(Engine) session = Session() articles = pd.read_csv(filename) for index, row in articles.iterrows(): logger.info(f"Loading article uid {row['uid']} into DB...") article = Article(row['uid'], row['body'], row['host'], row['newspaper_uid'], row['n_tokens_body'], row['n_tokens_title'], row['title'], row['url']) session.add(article) session.commit() session.close()
def add_university_image_link(): with open('../scrapers/university.json', 'r') as f: university_data = json.load(f) for uni in university_data: session = Session() university = session.query(University).filter( University.id == uni["university_id"]).first() university.set_image_link(uni["image_link"]) session.add(university) session.commit() print(university.image_link) session.close()
def addToTable(self, FS, IS, VS, CO, TVOC, cTemp, fTemp, humidity, pressure): # adds collected data to local SQLite database using SQLAlchemy ORM # Base, Session, and engine are all defined in base.py # should be MySQL db of SensHost Base.metadata.create_all(engine) session = Session() new_reading = Readings(strftime("%Y-%m-%d %H:%M:%S", gmtime()), self.node.name, FS, IS, VS, CO, TVOC, cTemp, fTemp, humidity, pressure) session.add(new_reading) session.commit() session.close()
def main(filename): Base.metadata.create_all(engine) session = Session() articles = pd.read_csv(filename, encoding='latin1') for index, row in articles.iterrows(): logger.info('Loading article id {} into DB'.format(row['uid'])) article = Article(row['uid'], row['area'], row['gerente'], row['fecha'], row['banda'], row['calificacion'], row['salario'], row['seguro']) session.add(article) session.commit() session.close()
def main(filename): Base.metadata.create_all(engine) session = Session() articles = pd.read_csv(filename) for index, row in articles.iterrows(): logger.info("Loading article into DB") article = Article(row["uid"], row["body"], row["host"], row["newspaper_uid"], row["n_tokens_title"], row["title"], row["url"]) session.add(article) session.commit() session.close()
def main(filename): Base.metadata.create_all(Engine) #Genera el SCHEMA session = Session() articles = pd.read_csv(filename) for index, row in articles.iterrows(): logger.info('Loading article uid {} into DB'.format(row['uid'])) article = Article(row['uid'], row['body'], row['title'], row['newspaper_uid'], row['n_token_body'], row['n_token_title'], row['title'], row['url']) session.add(article) session.commit() session.close()
def analyzeParametricJobfile(self): tmpFile = "/tmp/tmpParameterFile" process = monteraLocation + '/templateManager -c ' + self.parametricJobFile + " > " + tmpFile parameters = self.runProcess(process) numTasks = 0 for line in open(tmpFile, 'r').readlines(): print ("Parameter: " + line) myParameter = Parameter(line) Session.add(myParameter) numTasks+=1 Session.commit()
def add_city_relationship(): with open('../scrapers/university.json', 'r') as f: university_data = json.load(f) for uni in university_data: session = Session() university = session.query(University).filter( University.id == uni["university_id"]).first() city = session.query(City).filter(City.id == uni["city_id"]).first() university.set_city(city) session.add(university) session.commit() print(city.city_name) session.close()
def finishedTask(idInput=-1): #por si acaso try: id = int(idInput) except: return template('-1') finishedParameter = Session.query(Parameter).filter(Parameter.id==id).first() finishedParameter.status="DONE" finishedParameter.executionEndDate = datetime.now() Session.add(finishedParameter) Session.commit() #TODO: en algun sitio habria que verificar los datos de salida antes del done return template('{{id}}', id=finishedParameter.id)
def store(request): title = request.form.get("title") body = request.form.get("body") resume = rf.resume(body) Base.metadata.create_all(engine) session = Session() new_corpus = Corpus(title=title, body=body, resume=resume) session.add(new_corpus) session.commit() session.close() return redirect("http://127.0.0.1:5000/")
def handler(event, context): # check authorization authorized_user_types = [ UserType.ADMIN ] success, _ = check_auth(event['headers']['Authorization'], authorized_user_types) if not success: return http_status.forbidden() # validate body body = json.loads(event["body"]) senior_executive = body.get('senior_executive') chats_new = body.get('chats') if not senior_executive or not chats_new: return http_status.bad_request("invalid parameter(s): 'senior_executive, chats'") session = Session() for chat_new in chats_new: if not chat_new.get('chat_type') or chat_new['chat_type'] not in ChatType.__members__: session.close() return http_status.bad_request("invalid parameter(s): 'chat_type'") chat_type = ChatType[chat_new['chat_type']] description = chat_new.get('description') tags = chat_new.get('tags') fixed_date = chat_new.get('fixed_date') if chat_type in mandatory_date and not fixed_date: session.rollback() session.close() return http_status.bad_request("missing body attribute { fixed_date } with chat_type { %s }" % (chat_type.name)) chat = Chat( chat_type=chat_type, description=description, chat_status=ChatStatus.PENDING, tags=tags, senior_executive=senior_executive ) admin_update_declared_chats_frequency(senior_executive, 1) if fixed_date: chat.fixed_date = datetime.fromtimestamp(fixed_date).replace(hour=0, minute=0,second=0, microsecond=0) chat.chat_status = ChatStatus.ACTIVE else: admin_update_remaining_chats_frequency(senior_executive, 1) session.add(chat) session.commit() session.close() return http_status.success()
def updateAdd(): requestDetails = request.json userID = requestDetails["userID"] maskID = requestDetails["maskID"] session = Session() newOrder = Order(userID, maskID) session.add(newOrder) session.commit() session.close() return '', 200 # Successfully update the masks for the user
def main(filename): Base.metadata.create_all(Engine) session = Session() articles = pd.read_csv(filename) # uid,body,title,url,newspaper_uid,host,token_title,token_body for index, row in articles.iterrows(): logger.info(f'Saving article {index}') article = Article(row['uid'], row['body'], row['title'], row['url'], row['newspaper_uid'], row['host'], row['token_title'], row['token_body']) session.add(article) session.commit() session.close()
def popular_banco(): session = Session() emerson = Pessoa("Emerson") juca = Pessoa("Juca") natalia = Pessoa("Natalia") # session.add(emerson) # session.add(juca) session.add(natalia) natalia_casa = Telefone("(48) 3242-1119", natalia) session.add(natalia_casa) session.commit() session.close()
def updateInfoAfterProfiling(self, gridTask): #1.- leer la info correspondiente a esa aplicacion #tener en cuenta que es una serie de parejas de valores execution_file = base.tmpExecutionFolder + "execution_result_" + gridTask.gwID + ".xml" try: doc = xml.dom.minidom.parse(execution_file) except: print("failed when profiling host " + gridTask.host.hostname + ". File " + execution_file + " could not be found") gridTask.host.updateInfoAfterFailedProfiling() Session.add(gridTask.host) return executionInfoList = doc.getElementsByTagName('results') executionResults=[] for executionData in executionInfoList: profileInfoList = executionData.getElementsByTagName('profile') for profileInfo in profileInfoList: samples = int(profileInfo.getElementsByTagName("samples")[0].firstChild.data) #TODO: remove "unicode" from TEXT time = int(profileInfo.getElementsByTagName("time")[0].firstChild.data) if samples == 0: print ("There was an error on host " + gridTask.host.hostname + ". File " + execution_file + ", no samples have been executed") gridTask.host.updateInfoAfterFailedProfiling() Session.add(gridTask.host) return executionResults.append([samples, time]) #2.- procesar los resultados #TODO: esto es muy posible que esté mal numSimulations = len(executionResults) if numSimulations == 0: print ("There was an error on host " + gridTask.host.hostname + ". File " + execution_file + ", no simulations performed") gridTask.host.updateInfoAfterFailedProfiling() Session.add(gridTask.host) return avgSampleTime = 0 for i in range(numSimulations-1): newSamples = executionResults[i+1][0] - executionResults[i][0] newTime = executionResults[i+1][1] - executionResults[i][1] if newTime <=0: value = 0 continue else: value = newTime / newSamples ######TODO esta es la clave!!! #weighted sample time, so later executions are more important than the first ones #the reason is that they executed more samples, so their information is more valuable if i==0 or avgSampleTime == 0: avgSampleTime = value else: avgSampleTime = 0.6 * value + 0.4 * avgSampleTime acumConstantTime = 0 #this has been modifiied to avoid zero values for i in range(len(executionResults)): acumConstantTime += max(0, executionResults[i][1] - executionResults[i][0] * avgSampleTime) avgConstantTime = acumConstantTime / numSimulations normalizedSampleEffort = avgSampleTime * gridTask.host.getWhetstones() normalizedConstantEffort = avgConstantTime * gridTask.host.getWhetstones() print("RESULTADO DEL PROFILING") print ("task id: " + gridTask.gwID) print("normalizado, this_sample_effort = " + str(normalizedSampleEffort)) print("normalizado, this_constant_effort = " + str(normalizedConstantEffort)) print("STATUS DEL PROFILING") print("normalizado, sample_effort = " + str(self.profile.sampleEffort)) print("normalizado, constant_effort = " + str(self.profile.constantEffort)) print("") if (normalizedSampleEffort < 0 ) or (normalizedConstantEffort < 0): print ("A normalized effort below zero makes no sense, dismissing profile") return self.profile.updateInfoAfterProfiling(normalizedConstantEffort, normalizedSampleEffort) Session.add(self.profile)
from GridTask import GridTask import UserInterface if __name__ == '__main__': requirementsFile = "/soft/users/home/u5682/nfs/workspace/montera/FLUKA/fluka_example.mt" print("Starting connection with database") metadata = MetaData() myDBDesign = DBDesign() hostDB = myDBDesign.HostDBDesign(metadata) applicationDB = myDBDesign.ApplicationDesign(metadata) appProfileDB = myDBDesign.AppProfileDBDesign(metadata) gridTaskDesignDB = myDBDesign.GridTaskDBDesign(metadata) parameterDesignDB = myDBDesign.parameterDBDesign(metadata) metadata.create_all(base.engine) print("Database is correct") print("") myInfrastructure = Infrastructure.Infrastructure('lrms') myInfrastructure.createHostProfilingTasks() Session.add(myInfrastructure) Session.commit()
Base.metadata.create_all(engine) session = Session() author_1 = Author(name='J.R.R. Tolkien', birth=date(1892, 1, 3)) author_2 = Author(name='J.K. Rowling', birth=date(1965, 7, 31)) author_3 = Author(name='Stephen King', birth=date(1947, 9, 21)) book_1 = Book(title='The Hobbit', published_in=date(1937, 9, 21), author_id=1) book_2 = Book(title='The Lord of the Rings', published_in=date(1954, 7, 29), author_id=1) book_3 = Book(title='Harry Potter', published_in=date(1997, 6, 26), author_id=2) book_4 = Book(title='Carrie', published_in=date(1974, 1, 1), author_id=3) book_5 = Book(title='Salem Lot', published_in=date(1975, 1, 1), author_id=3) book_6 = Book(title='The Shining', published_in=date(1977, 1, 1), author_id=3) book_7 = Book(title='Rage', published_in=date(1977, 1, 1), author_id=3) session.add(author_1) session.add(author_2) session.add(author_3) session.add(book_1) session.add(book_2) session.add(book_3) session.add(book_4) session.add(book_5) session.add(book_6) session.add(book_7) session.commit() session.close()
def createInfrastructureTasks(self, infrastructureTasks): print ("---------------------") print ("---------------------") print ("---------------------") print ("CREATE INFRASTRUCTURE TASKS") hostsToProfile = [] hostList = obtainGWResources() for hostInfo in hostList: hostName = hostInfo.getElementsByTagName("HOSTNAME")[0].firstChild.data #TODO: remove "unicode" from TEXT try: foundArch = hostInfo.getElementsByTagName("ARCH")[0].firstChild.data except: foundArch="" try: foundCpuMHz = int(hostInfo.getElementsByTagName("CPU_MHZ")[0].firstChild.data) except: foundCpuMHz = 0 try: foundLrms = hostInfo.getElementsByTagName("LRMS_NAME")[0].firstChild.data except: foundLrms = None try: freeNodeCount = int(hostInfo.getElementsByTagName("FREENODECOUNT")[0].firstChild.data) except: freeNodeCount = 0 if foundLrms != None: if foundLrms == "jobmanager-pilot": #solo tenemos en cuenta los pilots con al menos un slot disponible if not freeNodeCount > 0: continue #if a certain LRMS is desired, remove the hosts with a different one if self.lrms != None: if foundLrms != self.lrms: continue #if host is unknown, create a profiling task currentHost = self.getHost(hostName) if currentHost == None: newHost = Host(hostName, arch=foundArch, cpuMHz = foundCpuMHz, lrms=foundLrms) self.hosts.append(newHost) hostsToProfile.append(newHost) #store new host on databae (faiulre resistance Session.add(newHost) #if information has changed, update host information elif (currentHost.arch != foundArch) or (currentHost.cpuMHz != foundCpuMHz): #TODO: pensar que hacer aqui. habria que eliminar el viejo o solo sobreescribir la información? Si se elimina el viejo, que pasa con las tareas ahí ejecutadas? No es trivial currentHost.arch = foundArch currentHost.cpuMHz = foundCpuMHz if currentHost.lrms == None: currentHost.lrms = foundLrms hostsToProfile.append(currentHost) Session.add(currentHost) elif currentHost.shouldBeProfiled(): if currentHost.lrms == None: currentHost.lrms = foundLrms hostsToProfile.append(currentHost) #print("Host profiling: submission of 1 tasks per host") hostProfilingTasks = [ExecutionManager.createHostProfilingTask(host) for host in hostsToProfile for i in range(1)] siteTasks = [] for task in hostProfilingTasks: found=False for gridTask in infrastructureTasks: if gridTask.host.hostname == task.host.hostname: found=True break if not found: siteTasks.append(task) #Esto es para el primer experimento de montera + gwpilot #queremos tener pilots funcionando, así que los arranco con esto if self.lrms=="jobmanager-pilot": print ("creating fake profiling tasks") existingFakeTasks = len([task for task in infrastructureTasks if task.host.hostname=="" and task.status != "PENDING"]) existingGoodPilots = len (self.getGoodHosts()) existingProfilingTasks = len(hostProfilingTasks) #fakeTasksToCreate = base.maxRunningTasks - (existingFakeTasks + existingGoodPilots + existingProfilingTasks) fakeTasksToCreate = base.maxRunningTasks - existingFakeTasks print (" Desired tasks: " + str(base.maxRunningTasks)) print (" Existing fake tasks: " + str(existingFakeTasks)) print (" Existing good pilots: " + str(existingGoodPilots)) print (" created: " + str(fakeTasksToCreate)) emptyHost = FakeHost() fakeHostProfilingTasks = [ExecutionManager.createWakeUpask(emptyHost) for i in range(fakeTasksToCreate)] siteTasks+=fakeHostProfilingTasks return siteTasks
def updateInfoAfterProfiling(self, gridTask): print ("Updating info after profiling site " + gridTask.host.hostname) print (" Task info:") print (" Task id: " + str(gridTask.id)) print (" GW ID: " + gridTask.gwID) print (" desired host: " + gridTask.host.hostname) print (" Host type: " + str(gridTask.host.__class__)) gridTask.status="CLEAR" #1.- abrir el archivo correspondiente a esa task execution_file = base.tmpExecutionFolder + "execution_result_" + gridTask.gwID + ".xml" try: doc = xml.dom.minidom.parse(execution_file) except: print("failed when profiling host " + gridTask.host.hostname + ". File " + execution_file + " could not be found") gridTask.host.updateInfoAfterFailedProfiling() Session.add(gridTask) return executionInfoList = doc.getElementsByTagName('execution_info') for executionData in executionInfoList: try: gridTaskType = executionData.getElementsByTagName("type")[0].firstChild.data #TODO: remove "unicode" from TEXT remoteHostName = executionData.getElementsByTagName("hostname")[0].firstChild.data whetstones = float(executionData.getElementsByTagName("whetstones")[0].firstChild.data) waitingTime = float(executionData.getElementsByTagName("execution_time")[0].firstChild.data) dataSize = float(executionData.getElementsByTagName("data_size")[0].firstChild.data) except: print("failed when profiling host " + gridTask.host.hostname + ". File " + execution_file + " could not be found") gridTask.host.updateInfoAfterFailedProfiling() Session.add(gridTask) return #2.- procesar los resultados if gridTaskType != "benchmark": print ("ERROR when updating info from a site profiling") print("Incorrect task type, readed " + gridTaskType + " and should be \"benchmark\"") print (" considering the execution as failed") gridTask.host.updateInfoAfterFailedProfiling() Session.add(gridTask) return if remoteHostName != gridTask.host.hostname: print ("ERROR when updating info from a site profiling") print("Incorrect host name, readed" + remoteHostName + " and should be " + gridTask.host.hostname) print (" considering the execution as failed") gridTask.host.updateInfoAfterFailedProfiling() Session.add(gridTask) return totalActiveTime = InformationManager.readTotalActiveTime(gridTask.gwID) transferTime = totalActiveTime - waitingTime queueTime = InformationManager.readQueueTime(gridTask.gwID) if transferTime > 0: bandwidth = dataSize / transferTime else: bandwidth = -1 #3.- suministrar esa info al host. gridTask.host.updateInfoFromSuccessFulExecution(whetstones, queueTime, bandwidth) #4.- eliminar archivos temporales try: #os.remove(execution_file) print ("IN application.py, I would be deleting " + execution_file) print ("Profiling file has been successfully deleted: " + execution_file) except: print ("Could not delete profiling file " + execution_file) gridTask.endDate = datetime.now()
while True: print('Add records:', '\n') print('Tables list:', '\n') print(tables, '\n') user_response = input('Enter \'table #\' or \'x\' to exit: ') if user_response.lower() == 'x' or user_response == '': break else: table = tables.get(int(user_response), None) if table is not None: # Retrieve list of columns columns = [col.name for col in table.__table__.columns if col.name != 'id'] print(columns) # Initialize a new record data = {} for col in columns: value = input('type a value for {0}: '.format(col)) data[col] = value if input('Add record? y/n') == 'y': record = table(**data) session.add(record) session.commit() else: print('Table not found', '\n')
def updateInfoAfterExecution(self, gridTask): print("Updating info after exetcution of task " +gridTask.gwID + " on host " + gridTask.host.hostname + " (hostID " + str(gridTask.host.id) + ")") gridTask.status="CLEAR" #1.- abrir el archivo correspondiente a esa task hostToUpdate = gridTask.host execution_file = base.tmpExecutionFolder + "/execution_result_" + gridTask.gwID + ".xml" #1.- abrir el archivo correspondiente a esa task try: doc = xml.dom.minidom.parse(execution_file) except: print("failed when updating info after execution. File " + execution_file + " could not be found") hostToUpdate.updateInfoAfterFailedExecution() Session.add(gridTask) return #si los archivos de salida deseados no existen, también la cuento como fallida for outputFile in self.outputFiles.split(","): #JOB_ID has to be replaced by gwID as it happens along the execution splittedFile = outputFile.split('JOB_ID') output="" for pos in range(len(splittedFile)): output += splittedFile[pos] if pos < len(splittedFile) -1: output+=gridTask.gwID if not os.path.exists(base.tmpExecutionFolder + "/" + output): print("failed when updating info after execution. output file " + base.tmpExecutionFolder + "/" + output + " could not be found") hostToUpdate.updateInfoAfterFailedExecution() Session.add(gridTask) return executionInfoList = doc.getElementsByTagName('execution_info') gridTaskType = None remoteHostName = None executionTime = None dataSize = None realSamples = None for executionData in executionInfoList: try: gridTaskType = executionData.getElementsByTagName("type")[0].firstChild.data #TODO: remove "unicode" from TEXT remoteHostName = executionData.getElementsByTagName("hostname")[0].firstChild.data executionTime = float(executionData.getElementsByTagName("execution_time")[0].firstChild.data) dataSize = float(executionData.getElementsByTagName("data_size")[0].firstChild.data) realSamples = int(executionData.getElementsByTagName("real_samples")[0].firstChild.data) except: print ("Error when reading execution file, exiting" ) Session.add(gridTask) return #2.- procesar los resultados if gridTaskType != "execution": print ("ERROR when updating info from an application execution") print("Incorrect task type, expecting \"execution\"") gridTask.status = "CLEAR" Session.add(gridTask) return if remoteHostName != hostToUpdate.hostname: print ("ERROR when updating info from a application execution") print("Incorrect host name, expecting " + hostToUpdate.hostname) gridTask.status = "CLEAR" Session.add(gridTask) return if executionTime == 0: print ("ERROR when updating info from an application execution") print ("Execution time appears to be zero, and that's quite strange") gridTask.status = "CLEAR" hostToUpdate.updateInfoAfterFailedExecution() Session.add(gridTask) return totalActiveTime = InformationManager.readTotalActiveTime(gridTask.gwID) if totalActiveTime == -1: print ("ERROR when updating info from an application execution") print ("Could not read active time from GridWay log, considering that task failed") gridTask.status = "CLEAR" hostToUpdate.updateInfoAfterFailedExecution() Session.add(gridTask) return queueTime = InformationManager.readQueueTime(gridTask.gwID) if queueTime == -1: print ("ERROR when updating info from an application execution") print ("Could not read queue time from GridWay log, considering that task failed") gridTask.status = "CLEAR" hostToUpdate.updateInfoAfterFailedExecution() Session.add(gridTask) return transferTime = totalActiveTime - executionTime if transferTime > 0: bandwidth = dataSize / transferTime else: bandwidth = -1 #3.- actualizar rendimiento del host computationalEffort = self.profile.constantEffort + self.profile.sampleEffort * realSamples whetstones = computationalEffort / executionTime hostToUpdate.updateInfoFromSuccessFulExecution(whetstones, queueTime, bandwidth) hostToUpdate.failedProfilings -=1 #4 actualizar estado de la tarea y la apliación. gridTask.realSamples = realSamples gridTask.status = "CLEAR" self.remainingSamples -= realSamples print("APPLICATION UPDATE: " + str(self.remainingSamples) + "/" + str(self.desiredSamples) + " left") #5.- eliminar archivos temporales try: #os.remove(execution_file) print ("In Application.py, I would be deletign" + execution_file) print ("Execution file has been successfully deleted: " + execution_file) except: print ("Could not delete profiling file " + execution_file) #6 update info on DB gridTask.endDate = datetime.now() Session.add(gridTask) Session.add(self)
def createInfrastructureTasks(self, infrastructureTasks): print ("-------------------") print ("-------------------") print ("createInfrastructureTasks- NewPilotInfrastructure") # self.showHosts() hostList = obtainGWResources() hostsToProfile = [] print ("Analyzing resources ") for hostInfo in hostList: hostName = hostInfo.getElementsByTagName("HOSTNAME")[0].firstChild.data.strip().lower() #TODO: remove "unicode" from TEXT whetstones=0 try: foundArch = hostInfo.getElementsByTagName("ARCH")[0].firstChild.data except: foundArch="" try: foundCpuMHz = int(hostInfo.getElementsByTagName("CPU_MHZ")[0].firstChild.data) except: foundCpuMHz = 0 try: foundLrms = hostInfo.getElementsByTagName("LRMS_NAME")[0].firstChild.data.strip().lower() except: foundLrms = None print ("Could not find LRMS for host " + hostName + ", skipping it") continue try: freeNodeCount = int(hostInfo.getElementsByTagName("FREENODECOUNT")[0].firstChild.data) except: freeNodeCount = 0 if foundLrms == "jobmanager-pilot": #solo tenemos en cuenta los pilots con al menos un slot disponible if not freeNodeCount > 0: continue username = os.getenv("USER") genericStringArgs = hostInfo.getElementsByTagName("GENERIC_VAR_STR") for node in genericStringArgs: if node.attributes['NAME'].value =="PILOT_REAL_HOSTNAME": workerNode = node.attributes['VALUE'].value.strip().lower() if node.attributes['NAME'].value =="PILOT_REAL_RESOURCE": site = node.attributes['VALUE'].value.strip().lower() genericIntArgs = hostInfo.getElementsByTagName("GENERIC_VAR_INT") for node in genericIntArgs: if node.attributes['NAME'].value =="PILOT_" + username + "_VAR_5": whetstones = int(node.attributes['VALUE'].value.strip().lower()) if whetstones > 65534: whetstones = 0 # whetstones = 0 #if host is unknown, create a profiling task currentHost = self.getHost(hostName) if currentHost == None: print ("Host/Pilot not found. hostname: " + hostName + ", LRMS: " + foundLrms) if foundLrms == "jobmanager-pilot": #he encontrado un pilot: #primero busco e resource, y si no existe lo creo. #luego creo un pilot que utilice ese resource pilotResource = base.Session.query(PilotResource).filter(PilotResource.site == site, PilotResource.workerNode == workerNode).first() if pilotResource == None: print (" PilotResource was not found, creating a new one") pilotResource = PilotResource(site, workerNode) print (" Creating a new Pilot in NewPilotInfrastructure.createInfrastructureTasks") newHost = Pilot(hostName, arch=foundArch, cpuMHz = foundCpuMHz, pilotResource = pilotResource, whetstones = whetstones) self.pilots.append(newHost) Session.add(newHost) else: print (" Creating a new Host in NewPilotInfrastructure.createInfrastructureTasks") newHost = Host(hostName, arch=foundArch, cpuMHz = foundCpuMHz, lrms=foundLrms) self.hosts.append(newHost) Session.add(newHost) #ESTO ES PARA HACER EL PROFILING DE LOS PILOT SI NO HAN PUBLICADO LOS WHETSTONES, SI NO NO HACE FALTA #=============================================================== # if whetstones == 0 or whetstones > 65534: # whetstones = 0 # print (" Host to profile: " + hostName + ": whetstone value not initialized ") # hostsToProfile.append(newHost) # #store new host on databae (faiulre resistance # Session.add(newHost) #=============================================================== #if information has changed, update host information elif (currentHost.getWhetstones() != whetstones): #va con un set porque es una operación más complicada, así que está encapsulada en esta funcion currentHost.setWhetstones(whetstones) Session.add(currentHost) print ("Host: " + hostName + " UPDATED, new whetstones=" + str(whetstones)) elif currentHost.lrms == None: currentHost.lrms = foundLrms #pprofiling of new sites hostProfilingTasks = [ExecutionManager.createHostProfilingTask(host) for host in hostsToProfile for i in range(base.profilingTasksPerHost)] #estamos asumiento que todos los pilots publican la variable esa con su #rendimiento, con lo que no hay que hacer el profiling de nada. #AHORA, EN ESA NUEVA APROXIMACION, QUEREMOS TENER UNOS CUANTO SBENCHMARKS PARA IR ARRANCANDO PILOTS print ("creating fake profiling tasks") existingFakeTasks = len([task for task in infrastructureTasks if task.host.hostname=="" and task.status != "PENDING"]) existingGoodPilots = len (self.getGoodHosts()) existingProfilingTasks = len(hostProfilingTasks) #fakeTasksToCreate = base.maxRunningTasks - (existingFakeTasks + existingGoodPilots + existingProfilingTasks) fakeTasksToCreate = base.maxRunningTasks - existingFakeTasks print (" Desired tasks: " + str(base.maxRunningTasks)) print (" Existing fake tasks: " + str(existingFakeTasks)) print (" Existing good pilots: " + str(existingGoodPilots)) print (" created: " + str(fakeTasksToCreate)) emptyHost = FakeHost() fakeHostProfilingTasks = [ExecutionManager.createFakeHostProfilingTask(emptyHost) for i in range(fakeTasksToCreate)] hostProfilingTasks+=fakeHostProfilingTasks return hostProfilingTasks