def __call__(self): try: res = self.testdb.run_insert_h() grinder.logger.info("Insertion Results as (start time, end time, " "completion" + " time): (" + str(res[0]) + ", " + str(res[1]) + ", " + str(res[2]) + ")") print("done insert") except StopIteration: # the test is complete grinder.logger.info("Insertion finished at: " + str(time.time())) self.testdb.close_all() grinder.stopThisWorkerThread() res = self.testdb.run_query_all() grinder.logger.info("Query Results as (start time, end time, " "completion" + " time): (" + str(res[0]) + ", " + str(res[1]) + ", " + str(res[2]) + ")") #log db size size = self.testdb.get_db_size() grinder.logger.info("The database size is now " + size + " bytes.") self.testdb.reset_conn_state()
def __call__(self): #start this round try: res = self.testdb.run_insert_w() grinder.logger.info("Insertion Results as (start time, end time, " "completion" + " time): (" + str(res[0]) + ", " + str(res[1]) + ", " + str(res[2]) + ")") except StopIteration: # the test is complete grinder.logger.info("Insertion finished at: " + str(time.time())) self.testdb.close_all() grinder.stopThisWorkerThread() res = self.testdb.query(records=100, streams=1000) grinder.logger.info("Query 100 records from 1000 streams Results as (start time, end time, " "completion" + " time): (" + str(res[0]) + ", " + str(res[1]) + ", " + str(res[2]) + ")") if self.counter % 1000000 == 0: #run the full query test every one million records res = self.testdb.run_query_all() grinder.logger.info("Query all records Results as (start time, end time, " "completion" + " time): (" + str(res[0]) + ", " + str(res[1]) + ", " + str(res[2]) + ")") #log db size size = self.testdb.get_db_size() grinder.logger.info("The database size is now " + size + " bytes.") self.counter += self.streams # add number of streams each time, since we're adding # a point to each stream every round self.testdb.reset_conn_state()
def __call__(self): #start this round print("run #" + str(self.callcounter)) if self.callcounter < self.callcountermax: self.part_one() #this is the pre/during insertion test if self.callcounter == (self.callcountermax-1): #record completion to log grinder.logger.info("Insertion finished at: " + str(time.time())) elif self.callcounter < (self.callcountermax + self.callcounter_q_max): self.part_two() #this is the post insertion test else: self.testdb.close_all() grinder.stopThisWorkerThread() self.callcounter += 1
def __call__(self): if self.callcounter >= 100: grinder.logger.info("Test complete") self.testdb.close_all() grinder.stopThisWorkerThread() else: self.callcounter += 1 self.t1size += 10 #start this round # this is the control. If the time that this takes varies, we know that # external factors are affecting our results. res = self.testdb.run_query_all() grinder.logger.info("Control Query Results as (start time, end time, " "completion" + " time): (" + str(res[0]) + ", " + str(res[1]) + ", " + str(res[2]) + ")") # actual experiment. the values for this will vary over time # this represents querying for data from an arbitrary time window of # fixed width # measure performance vs query size res = self.testdb.query(self.t1size, self.streams) grinder.logger.info("Query " + str(self.t1size*self.streams) + " items Results as (start time, end time, " "completion" + " time): (" + str(res[0]) + ", " + str(res[1]) + ", " + str(res[2]) + ")") # actual experiement. the values for this will vary over time. # this represents querying for the last x values for the stream, which # is very common # reset the connection and statement self.testdb.reset_conn_state()