def old(iterations): p_start = time.time() connection = ENGINE.connect() db_time = [] i = 0 while i < 300: start = time.time() transaction = connection.begin() a = 0 while a < iterations: area = random.randrange(5, 60, 1) wu_id = random.randrange(5, 60, 1) connection.execute(AREA.update() .where(AREA.c.area_id == area) .values(workunit_id=wu_id, update_time=datetime.datetime.now())) a += 1 sleepytime = random.randrange(80, 140, 1) time.sleep(sleepytime/100.0) transaction.commit() print 'Time in DB {0}'.format(time.time() - start) db_time.append(time.time() - start) i += 1 total = 0 for dbtime in db_time: total += dbtime ave = total/len(db_time) print 'Total time: {0}'.format(total) print 'Ave per transaction: {0}'.format(ave) print 'Total program run time: {0}'.format(time.time() - p_start)
def old(iterations): p_start = time.time() connection = ENGINE.connect() db_time = [] i = 0 while i < 300: start = time.time() transaction = connection.begin() a = 0 while a < iterations: area = random.randrange(5, 60, 1) wu_id = random.randrange(5, 60, 1) connection.execute( AREA.update().where(AREA.c.area_id == area).values( workunit_id=wu_id, update_time=datetime.datetime.now())) a += 1 sleepytime = random.randrange(80, 140, 1) time.sleep(sleepytime / 100.0) transaction.commit() print 'Time in DB {0}'.format(time.time() - start) db_time.append(time.time() - start) i += 1 total = 0 for dbtime in db_time: total += dbtime ave = total / len(db_time) print 'Total time: {0}'.format(total) print 'Ave per transaction: {0}'.format(ave) print 'Total program run time: {0}'.format(time.time() - p_start)
def lock(time_p): connection = ENGINE.connect() transaction = connection.begin() i = 999 while True: wu_id = random.randrange(5, 60, 1) connection.execute(AREA.update().where(AREA.c.area_id == wu_id).values( workunit_id=wu_id, update_time=datetime.datetime.now())) connection.execute(PIXEL_RESULT.update().where( PIXEL_RESULT.c.pxresult_id == wu_id).values(y=i, x=i)) i += 1 if i > 100000: break transaction.rollback()
def lock(time_p): connection = ENGINE.connect() transaction = connection.begin() i = 999 while True: wu_id = random.randrange(5, 60, 1) connection.execute( AREA.update() .where(AREA.c.area_id == wu_id) .values(workunit_id=wu_id, update_time=datetime.datetime.now())) connection.execute(PIXEL_RESULT.update().where(PIXEL_RESULT.c.pxresult_id == wu_id).values(y=i, x=i)) i += 1 if i > 100000: break transaction.rollback()
def assimilate_handler(self, wu, results, canonical_result): """ Process the Results. """ self.logDebug("Start of assimilate_handler for wu %d\n", wu.id) connection = None transaction = None try: if wu.canonical_result: outFile = self.get_file_path(canonical_result) self.area = None if outFile: if os.path.isfile(outFile): pass else: self.logDebug("File [%s] not found\n", outFile) outFile = None if outFile: self.logDebug("Reading File [%s]\n", outFile) start = time.time() connection = ENGINE.connect() transaction = connection.begin() resultCount = self._process_result(connection, outFile, wu) if self.noinsert: transaction.rollback() else: if not resultCount: self.logCritical("No results were found in the output file\n") if self._area_id is None: self.logDebug("The Area was not found\n") else: connection.execute(AREA.update(). where(AREA.c.area_id == self._area_id). values(workunit_id = wu.id, update_time = datetime.datetime.now())) user_id_set = set() for result in results: if result.user and result.validate_state == boinc_db.VALIDATE_STATE_VALID: user_id = result.user.id if user_id not in user_id_set: user_id_set.add(user_id) connection.execute(AREA_USER.delete().where(AREA_USER.c.area_id == self._area_id)) insert = AREA_USER.insert() for user_id in user_id_set: connection.execute(insert, area_id=self._area_id, userid=user_id) time_taken = '{0:.2f}'.format(time.time() - start) self.logDebug("Saving %d results for workunit %d in %s seconds\n", resultCount, wu.id, time_taken) transaction.commit() connection.close() else: self.logCritical("The output file was not found\n") else: self.logDebug("No canonical_result for workunit\n") self.report_errors(wu) except: if transaction is not None: transaction.rollback() if connection is not None: connection.close() print "Unexpected error:", sys.exc_info()[0] traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) self.logCritical("Unexpected error occurred, retrying...\n") return -1 return 0
def assimilate_handler(self, wu, results, canonical_result): """ Process the Results. """ self.logDebug("Start of assimilate_handler for wu %d\n", wu.id) connection = None transaction = None try: if wu.canonical_result: outFile = self.get_file_path(canonical_result) self.area = None if outFile: if os.path.isfile(outFile): pass else: self.logDebug("File [%s] not found\n", outFile) outFile = None if outFile: self.logDebug("Reading File [%s]\n", outFile) start = time.time() connection = ENGINE.connect() transaction = connection.begin() resultCount = self._process_result(connection, outFile, wu) if self.noinsert: transaction.rollback() else: if not resultCount: self.logCritical( "No results were found in the output file\n") if self._area_id is None: self.logDebug("The Area was not found\n") else: connection.execute(AREA.update().where( AREA.c.area_id == self._area_id).values( workunit_id=wu.id, update_time=datetime.datetime.now())) user_id_set = set() for result in results: if result.user and result.validate_state == boinc_db.VALIDATE_STATE_VALID: user_id = result.user.id if user_id not in user_id_set: user_id_set.add(user_id) connection.execute(AREA_USER.delete().where( AREA_USER.c.area_id == self._area_id)) insert = AREA_USER.insert() for user_id in user_id_set: connection.execute(insert, area_id=self._area_id, userid=user_id) time_taken = '{0:.2f}'.format(time.time() - start) self.logDebug( "Saving %d results for workunit %d in %s seconds\n", resultCount, wu.id, time_taken) transaction.commit() connection.close() else: self.logCritical("The output file was not found\n") else: self.logDebug("No canonical_result for workunit\n") self.report_errors(wu) except: if transaction is not None: transaction.rollback() if connection is not None: connection.close() print "Unexpected error:", sys.exc_info()[0] traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) self.logCritical("Unexpected error occurred, retrying...\n") return -1 return 0
def assimilate_handler(self, wu, results, canonical_result): """ Process the Results. """ self.logDebug("Start of assimilate_handler for wu %d\n", wu.id) connection = None try: if wu.canonical_result: out_file = self.get_file_path(canonical_result) self.area = None if out_file: if os.path.isfile(out_file): pass else: self.logDebug("File [%s] not found\n", out_file) out_file = None if out_file: self.logDebug("Reading File [%s]\n", out_file) start = time.time() connection = ENGINE.connect() result_count = self._process_result(connection, out_file, wu) if self.noinsert: pass else: if not result_count: self.logCritical("No results were found in the output file\n") if self._area_id is None: self.logDebug("The Area was not found\n") else: self._database_queue.append(AREA.update() .where(AREA.c.area_id == self._area_id) .values(workunit_id=wu.id, update_time=datetime.datetime.now())) user_id_set = set() for result in results: if result.user and result.validate_state == boinc_db.VALIDATE_STATE_VALID: user_id = result.user.id if user_id not in user_id_set: user_id_set.add(user_id) self._database_queue.append(AREA_USER.delete().where(AREA_USER.c.area_id == self._area_id)) insert_area_user = AREA_USER.insert() insert_galaxy_user = GALAXY_USER.insert().prefix_with('IGNORE') for user_id in user_id_set: self._database_queue.append(insert_area_user.values(area_id=self._area_id, userid=user_id)) self._database_queue.append(insert_galaxy_user.values(galaxy_id=self._galaxy_id, userid=user_id)) # Copy the file to S3 s3helper = S3Helper() s3helper.add_file_to_bucket(get_sed_files_bucket(), get_key_sed(self._galaxy_name, self._run_id, self._galaxy_id, self._area_id), out_file, reduced_redundancy=True) time_taken = '{0:.2f}'.format(time.time() - start) self.logDebug("Saving %d results for workunit %d in %s seconds\n", result_count, wu.id, time_taken) self._run_pending_db_tasks(connection) connection.close() else: self.logCritical("The output file was not found\n") else: self.logDebug("No canonical_result for workunit\n") self.report_errors(wu) except: if connection is not None: connection.close() print "Unexpected error:", sys.exc_info()[0] traceback.print_exception(sys.exc_info()[0], sys.exc_info()[1], sys.exc_info()[2]) self.logCritical("Unexpected error occurred, retrying...\n") return -1 return 0