def processSession(self, sess): #if len(sess["operations"]) > 1: #LOG.info(pformat(sess["operations"])) #LOG.info("-"*100) for op0, op1 in itertools.combinations(sess["operations"], 2): # Skip any pairs that reference the same collection if op0["collection"] == op1["collection"]: continue if op0["query_id"] == op1["query_id"]: continue if op0["collection"].endswith("$cmd"): continue if op1["collection"].endswith("$cmd"): continue content0 = workload.getOpContents(op0) if len(content0) == 0: continue content1 = workload.getOpContents(op1) if len(content1) == 0: continue values0 = self.extractValues(content0[0]) #LOG.info("CONTENT0:\n" + pformat(op0)) #LOG.info("VALUES0:\n" + pformat(values0)) #LOG.info("="*120) #LOG.info("="*120) values1 = self.extractValues(content1[0]) #LOG.info("CONTENT1:\n" + pformat(op1)) #LOG.info("VALUES1:\n" + pformat(values1)) assert op0["collection"] in self.comparisons, op0["collection"]+"-->"+str(self.comparisons.keys()) compare = self.comparisons[op0["collection"]] for key0, key1 in itertools.product(values0.keys(), values1.keys()): compare.addComparison(key0, values0[key0], op1["collection"], key1, values1[key1])
def dumpOp(fd, op): # Remove all of the resp_* fields for k in op.keys(): for regex in STRIP_REGEXES: if regex.match(k): del op[k] break if op["type"] != constants.OP_TYPE_UPDATE: for k in ["update_multi", "update_upsert"]: if k in op: del op[k] ## IF if "query_aggregate" in op and op["query_aggregate"] == False: del op["query_aggregate"] ## FOR # Get $in stats if we have them inHistogram = None for op_contents in workload.getOpContents(op): inHistogram = computeInStats(op_contents) if not inHistogram is None: # We need to compute it for all other ops # with the same hash for other_op in QUERY_HASH_XREF[hash]: if other_op == op: continue for op_contents in workload.getOpContents(other_op): computeInStats(op_contents, inHistogram) break ## FOR contents = pformat(convert(op)) fd.write("Query Count: %.1f%%\n" % percentage) fd.write(contents + "\n") if not inHistogram is None: all_values = inHistogram.getAllValues() fd.write("\n$IN STATISTICS:\n") fd.write(" + min len: %d\n" % min(all_values)) fd.write(" + max len: %d\n" % max(all_values)) fd.write(" + avg len: %.2f\n" % numpy.average(all_values)) fd.write(" + stdev: %.2f\n" % numpy.std(all_values)) return
def guess_op_info(self, design, op): """ Return a tuple containing the best index to use for this operation and a boolean flag that is true if that index covers the entire operation's query """ # Simply choose the index that has most of the fields # referenced in the operation. col_name = op["collection"] indexes = design.getIndexes(col_name) op_contents = workload.getOpContents(op) # extract the keys from op_contents op_index_list = [] for query in op_contents: for key in query.iterkeys(): op_index_list.append(key) # add the projection keys into op_index_set # The op["query_fileds"] is the projection hasProjectionField = False projectionFields = op.get("query_fields", None) if projectionFields: hasProjectionField = True for key in projectionFields.iterkeys(): op_index_list.append(key) best_index = None best_ratio = None for i in xrange(len(indexes)): field_cnt = 0 for indexKey in indexes[i]: indexMatch = indexKey in op_index_list # We can't use a field if it's being used in a regex operation if indexMatch and not workload.isOpRegex(op, field=indexKey): field_cnt += 1 if not indexMatch or field_cnt >= len(op_index_list): break field_ratio = field_cnt / float(len(indexes[i])) if not best_index or field_ratio >= best_ratio: # If the ratios are the same, then choose the # one with the most keys if field_ratio == best_ratio: if len(indexes[i]) <= len(best_index): continue if field_ratio != 0: best_index = indexes[i] best_ratio = field_ratio ## FOR if self.debug: LOG.debug("Op #%d - BestIndex:%s / BestRatio:%s", op["query_id"], best_index, best_ratio) # Check whether this is a covering index covering = False if hasProjectionField: if best_index and op["type"] == constants.OP_TYPE_QUERY: # Extract the indexes from best_index best_index_list = [] for index in best_index: best_index_list.append(index) if len(op_index_list) <= len(best_index_list): counter = 0 while counter < len(op_index_list): if op_index_list[counter] != best_index_list[counter]: break counter += 1 if counter == len(op_index_list): covering = True ## IF ## IF ## IF # Get the size of the best index if not self.no_index_size_estimation: index_size = 0 col_info = self.state.collections[col_name] index_size += getIndexSize(col_info, best_index) if col_name in self.parent_to_children_map: children_set = self.parent_to_children_map[col_name] if len(children_set) > 0: for child in children_set: col_info = self.state.collections[child] index_size += getIndexSize(col_info, best_index) ## FOR ## IF ## IF ## IF else: index_size = 1 # Get the slot size of this operation assert not col_name in self.child_collections, ( "collection %s should not be queried.\n child_collecitons: %s\ndesign: \n%s" % (col_name, self.child_collections, design) ) slot_size = 0 if col_name in self.col_cost_map: slot_size = int(math.ceil(self.col_cost_map[col_name])) else: slot_size = 1 if slot_size != 1: slot_size *= 100 return best_index, covering, index_size, slot_size
def getCostImpl(self, design, num_nodes=None): """ Estimate the Disk Cost for a design and a workload Note: If this is being invoked with overallCost(), then the diskCost() should be calculated before skewCost() because we will reused the same histogram of how often nodes are touched in the workload """ # delta = self.__getDelta__(design) # Initialize all of the LRU buffers # since every lru has the same configuration, we can cache the first initialization then deepcopy it to other # lrus cache = None # for lru in self.buffers: # cache = lru.initialize(design, delta, cache) # LOG.info(lru) # lru.validate() # Ok strap on your helmet, this is the magical part of the whole thing! self.buildEmbeddingCostDictionary(design) # print "Magic map: ", pformat(cost_map) # print "Magic list: ", child_collections # Outline: # + For each operation, we need to figure out what document(s) it's going # to need to touch. From this we want to compute a unique hash signature # for those document so that we can identify what node those documents # reside on and whether those documents are in our working set memory. # # + For each node, we are going to have a single LRU buffer that simulates # the working set for all collections and indexes in the database. # Documents entries are going to be tagged based on whether they are # part of an index or a collection. # # + Now when we iterate through each operation in our workload, we are # going to need to first figure out what index (if any) it will need # to use and how it will be used (i.e., equality look-up or range scan). # We can then compute the hash for the look-up keys. # If that key is in the LRU buffer, then we will update its entry's last # accessed timestamp. If it's not, then we will increase the page hit # counter and evict some other entry. # After evaluating the target index, we will check whether the index # covers the query. If it does, then we're done # If not, then we need to compute hash for the "base" documents that it # wants to access (i.e., in the collection). Then just as before, we # will check whether its in our buffer, make an eviction if not, and # update our page hit counter. # There are several additional corner cases that we need to handle: # INSERT/UPDATE: Check whether it's an upsert query # INSERT/UPDATE/DELETE: We assume that they're using a WAL and therefore # writing dirty pages is "free" # UPDATE/DELETE: Check whether the "multi" flag is set to true, which will # tell us to stop the scan after the first matching document # is found. # # NOTE: We don't need to keep track of evicted tuples. It's either in the LRU buffer or not. # TODO: We may want to figure out how to estimate whether we are traversing # indexes on the right-hand side of the tree. We could some preserve # the sort order the keys when we hash them... # Worst case is when every query requires a full collection scan # Best case, every query is satisfied by main memory totalWorst = 0 totalCost = 0 sess_ctr = 0 total_index_penalty = 0 total_worst_index_penalty = 0 for sess in self.state.workload: for op in sess["operations"]: # is the collection in the design - if not ignore if not design.hasCollection(op["collection"]): if self.debug: LOG.debug("NOT in design: SKIP - All operations on %s", col_name) continue if design.isRelaxed(op["collection"]): if self.debug: LOG.debug("NOT in design: SKIP - All operations on %s", col_name) continue col_info = self.state.collections[op["collection"]] # Initialize cache if necessary # We will always want to do this regardless of whether caching is enabled cache = self.state.getCacheHandle(col_info) # Check whether we have a cache index selection based on query_hashes indexKeys, covering, index_size, slot_size = cache.best_index.get( op["query_hash"], (None, None, None, None) ) if indexKeys is None: indexKeys, covering, index_size, slot_size = self.guess_op_info(design, op) if self.state.cache_enable: if self.debug: self.state.cache_miss_ctr.put("best_index") cache.best_index[op["query_hash"]] = (indexKeys, covering, index_size, slot_size) elif self.debug: self.state.cache_hit_ctr.put("best_index") pageHits = 0 maxHits = 0 indexKeyInsertionPenalty = 0 worst_index_penalty = 0 isRegex = self.state.__getIsOpRegex__(cache, op) try: opNodes = self.state.__getNodeIds__(cache, design, op) except: if self.debug: LOG.warn("Failed to estimate touched nodes for op\n%s" % pformat(op)) self.err_ctr += 1 continue for content in workload.getOpContents(op): for node_id in opNodes: lru = self.buffers[node_id] self.total_op_contents += 1 maxHits += cache.fullscan_pages indexKeyInsertionPenalty += self.getIndexKeyInsertionPenalty(indexKeys, content) worst_index_penalty += 1 # If slot size is too large, we consider it as a full page scan if slot_size >= constants.SLOT_SIZE_LIMIT: pageHits += cache.fullscan_pages continue ## FOR # TODO: Need to handle whether it's a scan or an equality predicate # TODO: We need to handle when we have a regex predicate. These are tricky # because they may use an index that will examine all a subset of collections # and then execute a regex on just those documents. # If we have a target index, hit that up if indexKeys and not isRegex: # FIXME documentId = cache.index_docIds.get(op["query_id"], None) if documentId is None: values = catalog.getFieldValues(indexKeys, content) try: documentId = hash(values) except: if self.debug: LOG.error( "Failed to compute index documentIds for op #%d - %s\n%s", op["query_id"], values, pformat(op), ) self.err_ctr += 1 break if self.state.cache_enable: if self.debug: self.state.cache_miss_ctr.put("index_docIds") cache.index_docIds[op["query_id"]] = documentId elif self.debug: self.state.cache_hit_ctr.put("index_docIds") ## IF hits = lru.getDocumentFromIndex(indexKeys, index_size) # print "hits: ", hits pageHits += hits # maxHits += hits if op['type'] == constants.OP_TYPE_INSERT else cache.fullscan_pages if self.debug: LOG.debug( "Node #%02d: Estimated %d index scan pageHits for op #%d on %s.%s", node_id, hits, op["query_id"], op["collection"], indexKeys, ) # If we don't have an index, then we know that it's a full scan because the # collections are unordered if not indexKeys: if self.debug: LOG.debug( "No index available for op #%d. Will have to do full scan on '%s'", op["query_id"], op["collection"], ) pageHits += cache.fullscan_pages # maxHits += cache.fullscan_pages # Otherwise, if it's not a covering index, then we need to hit up # the collection to retrieve the whole document elif not covering: documentId = cache.collection_docIds.get(op["query_id"], None) if documentId is None: values = catalog.getAllValues(content) try: documentId = hash(values) except: if self.debug: LOG.error( "Failed to compute collection documentIds for op #%d - %s\n%s", op["query_id"], values, pformat(op), ) self.err_ctr += 1 break if self.state.cache_enable: if self.debug: self.state.cache_miss_ctr.put("collection_docIds") cache.collection_docIds[op["query_id"]] = documentId elif self.debug: self.state.cache_hit_ctr.put("collection_docIds") ## IF hits = lru.getDocumentFromCollection(op["collection"], documentId, slot_size) pageHits += hits # maxHits += hits if op['type'] == constants.OP_TYPE_INSERT else cache.fullscan_pages if self.debug: LOG.debug( "Node #%02d: Estimated %d collection scan pageHits for op #%d on %s", node_id, hits, op["query_id"], op["collection"], ) # We have a covering index, which means that we don't have # to do a look-up on the document in the collection. # But we still need to increase maxHits so that the final # ratio is counted correctly # Yang seems happy with this... else: assert op["type"] != constants.OP_TYPE_INSERT # maxHits += cache.fullscan_pages ## FOR (node) ## FOR (content) totalCost += pageHits totalWorst += maxHits total_index_penalty += indexKeyInsertionPenalty total_worst_index_penalty += worst_index_penalty if self.debug: LOG.debug( "Op #%d on '%s' -> [pageHits:%d / worst:%d]", op["query_id"], op["collection"], pageHits, maxHits, ) assert pageHits <= maxHits, "Estimated pageHits [%d] is greater than worst [%d] for op #%d\n%s" % ( pageHits, maxHits, op["query_id"], pformat(op), ) ## FOR (op) sess_ctr += 1 ## FOR (sess) self.total_index_insertion_penalty = total_index_penalty # Add index insertion penalty to the total cost if not self.no_index_insertion_penalty: totalCost += total_index_penalty totalWorst += total_worst_index_penalty ## IF # The final disk cost is the ratio of our estimated disk access cost divided # by the worst possible cost for this design. If we don't have a worst case, # then the cost is simply zero if self.debug: LOG.info("Total operation contents %s, errors %s", self.total_op_contents, self.err_ctr) assert totalCost <= totalWorst, "Estimated total pageHits [%d] is greater than worst case pageHits [%d]" % ( totalCost, totalWorst, ) final_cost = float(totalCost) / float(totalWorst) if totalWorst else 0 evicted = sum([lru.evicted for lru in self.buffers]) LOG.info( "Computed Disk Cost: %s [pageHits=%d / worstCase=%d / evicted=%d]", final_cost, totalCost, totalWorst, evicted, ) return final_cost
def guess_op_info(self, design, op): """ Return a tuple containing the best index to use for this operation and a boolean flag that is true if that index covers the entire operation's query """ # Simply choose the index that has most of the fields # referenced in the operation. col_name = op['collection'] indexes = design.getIndexes(col_name) op_contents = workload.getOpContents(op) # extract the keys from op_contents op_index_list = [] for query in op_contents: for key in query.iterkeys(): op_index_list.append(key) # add the projection keys into op_index_set # The op["query_fileds"] is the projection hasProjectionField = False projectionFields = op.get('query_fields', None) if projectionFields: hasProjectionField = True for key in projectionFields.iterkeys(): op_index_list.append(key) best_index = None best_ratio = None for i in xrange(len(indexes)): field_cnt = 0 for indexKey in indexes[i]: indexMatch = (indexKey in op_index_list) # We can't use a field if it's being used in a regex operation if indexMatch and not workload.isOpRegex(op, field=indexKey): field_cnt += 1 if not indexMatch or field_cnt >= len(op_index_list): break field_ratio = field_cnt / float(len(indexes[i])) if not best_index or field_ratio >= best_ratio: # If the ratios are the same, then choose the # one with the most keys if field_ratio == best_ratio: if len(indexes[i]) <= len(best_index): continue if field_ratio != 0: best_index = indexes[i] best_ratio = field_ratio ## FOR if self.debug: LOG.debug("Op #%d - BestIndex:%s / BestRatio:%s",\ op['query_id'], best_index, best_ratio) # Check whether this is a covering index covering = False if hasProjectionField: if best_index and op['type'] == constants.OP_TYPE_QUERY: # Extract the indexes from best_index best_index_list = [] for index in best_index: best_index_list.append(index) if len(op_index_list) <= len(best_index_list): counter = 0 while counter < len(op_index_list): if op_index_list[counter] != best_index_list[counter]: break counter += 1 if counter == len(op_index_list): covering = True ## IF ## IF ## IF # Get the size of the best index if not self.no_index_size_estimation: index_size = 0 col_info = self.state.collections[col_name] index_size += getIndexSize(col_info, best_index) if col_name in self.parent_to_children_map: children_set = self.parent_to_children_map[col_name] if len(children_set) > 0: for child in children_set: col_info = self.state.collections[child] index_size += getIndexSize(col_info, best_index) ## FOR ## IF ## IF ## IF else: index_size = 1 # Get the slot size of this operation assert not col_name in self.child_collections, "collection %s should not be queried.\n child_collecitons: %s\ndesign: \n%s" % (col_name, self.child_collections, design) slot_size = 0 if col_name in self.col_cost_map: slot_size = int(math.ceil(self.col_cost_map[col_name])) else: slot_size = 1 if slot_size != 1: slot_size *= 100 return best_index, covering, index_size, slot_size
def getCostImpl(self, design): """ Estimate the Disk Cost for a design and a workload Note: If this is being invoked with overallCost(), then the diskCost() should be calculated before skewCost() because we will reused the same histogram of how often nodes are touched in the workload """ # delta = self.__getDelta__(design) # Initialize all of the LRU buffers # since every lru has the same configuration, we can cache the first initialization then deepcopy it to other # lrus cache = None # for lru in self.buffers: # cache = lru.initialize(design, delta, cache) # LOG.info(lru) # lru.validate() # Ok strap on your helmet, this is the magical part of the whole thing! self.buildEmbeddingCostDictionary(design) #print "Magic map: ", pformat(cost_map) #print "Magic list: ", child_collections # Outline: # + For each operation, we need to figure out what document(s) it's going # to need to touch. From this we want to compute a unique hash signature # for those document so that we can identify what node those documents # reside on and whether those documents are in our working set memory. # # + For each node, we are going to have a single LRU buffer that simulates # the working set for all collections and indexes in the database. # Documents entries are going to be tagged based on whether they are # part of an index or a collection. # # + Now when we iterate through each operation in our workload, we are # going to need to first figure out what index (if any) it will need # to use and how it will be used (i.e., equality look-up or range scan). # We can then compute the hash for the look-up keys. # If that key is in the LRU buffer, then we will update its entry's last # accessed timestamp. If it's not, then we will increase the page hit # counter and evict some other entry. # After evaluating the target index, we will check whether the index # covers the query. If it does, then we're done # If not, then we need to compute hash for the "base" documents that it # wants to access (i.e., in the collection). Then just as before, we # will check whether its in our buffer, make an eviction if not, and # update our page hit counter. # There are several additional corner cases that we need to handle: # INSERT/UPDATE: Check whether it's an upsert query # INSERT/UPDATE/DELETE: We assume that they're using a WAL and therefore # writing dirty pages is "free" # UPDATE/DELETE: Check whether the "multi" flag is set to true, which will # tell us to stop the scan after the first matching document # is found. # # NOTE: We don't need to keep track of evicted tuples. It's either in the LRU buffer or not. # TODO: We may want to figure out how to estimate whether we are traversing # indexes on the right-hand side of the tree. We could some preserve # the sort order the keys when we hash them... # Worst case is when every query requires a full collection scan # Best case, every query is satisfied by main memory totalWorst = 0 totalCost = 0 sess_ctr = 0 total_index_penalty = 0 total_worst_index_penalty = 0 for sess in self.state.workload: for op in sess['operations']: # is the collection in the design - if not ignore if not design.hasCollection(op['collection']): if self.debug: LOG.debug("NOT in design: SKIP - All operations on %s", col_name) continue if design.isRelaxed(op['collection']): if self.debug: LOG.debug("NOT in design: SKIP - All operations on %s", col_name) continue col_info = self.state.collections[op['collection']] # Initialize cache if necessary # We will always want to do this regardless of whether caching is enabled cache = self.state.getCacheHandle(col_info) # Check whether we have a cache index selection based on query_hashes indexKeys, covering, index_size, slot_size = cache.best_index.get(op["query_hash"], (None, None, None, None)) if indexKeys is None: indexKeys, covering, index_size, slot_size = self.guess_op_info(design, op) if self.state.cache_enable: if self.debug: self.state.cache_miss_ctr.put("best_index") cache.best_index[op["query_hash"]] = (indexKeys, covering, index_size, slot_size) elif self.debug: self.state.cache_hit_ctr.put("best_index") pageHits = 0 maxHits = 0 indexKeyInsertionPenalty = 0 worst_index_penalty = 0 isRegex = self.state.__getIsOpRegex__(cache, op) try: opNodes = self.state.__getNodeIds__(cache, design, op) except: if self.debug: LOG.warn("Failed to estimate touched nodes for op\n%s" % pformat(op)) self.err_ctr += 1 continue for content in workload.getOpContents(op): for node_id in opNodes: lru = self.buffers[node_id] self.total_op_contents += 1 maxHits += cache.fullscan_pages indexKeyInsertionPenalty += self.getIndexKeyInsertionPenalty(indexKeys, content) worst_index_penalty += 1 # If slot size is too large, we consider it as a full page scan if slot_size >= constants.SLOT_SIZE_LIMIT: pageHits += cache.fullscan_pages continue ## FOR # TODO: Need to handle whether it's a scan or an equality predicate # TODO: We need to handle when we have a regex predicate. These are tricky # because they may use an index that will examine all a subset of collections # and then execute a regex on just those documents. # If we have a target index, hit that up if indexKeys and not isRegex: # FIXME documentId = cache.index_docIds.get(op['query_id'], None) if documentId is None: values = catalog.getFieldValues(indexKeys, content) try: documentId = hash(values) except: if self.debug: LOG.error("Failed to compute index documentIds for op #%d - %s\n%s",\ op['query_id'], values, pformat(op)) self.err_ctr += 1 break if self.state.cache_enable: if self.debug: self.state.cache_miss_ctr.put("index_docIds") cache.index_docIds[op['query_id']] = documentId elif self.debug: self.state.cache_hit_ctr.put("index_docIds") ## IF hits = lru.getDocumentFromIndex(indexKeys, index_size) # print "hits: ", hits pageHits += hits # maxHits += hits if op['type'] == constants.OP_TYPE_INSERT else cache.fullscan_pages if self.debug: LOG.debug("Node #%02d: Estimated %d index scan pageHits for op #%d on %s.%s",\ node_id, hits, op["query_id"], op["collection"], indexKeys) # If we don't have an index, then we know that it's a full scan because the # collections are unordered if not indexKeys: if self.debug: LOG.debug("No index available for op #%d. Will have to do full scan on '%s'",\ op["query_id"], op["collection"]) pageHits += cache.fullscan_pages #maxHits += cache.fullscan_pages # Otherwise, if it's not a covering index, then we need to hit up # the collection to retrieve the whole document elif not covering: documentId = cache.collection_docIds.get(op['query_id'], None) if documentId is None: values = catalog.getAllValues(content) try: documentId = hash(values) except: if self.debug: LOG.error("Failed to compute collection documentIds for op #%d - %s\n%s",\ op['query_id'], values, pformat(op)) self.err_ctr += 1 break if self.state.cache_enable: if self.debug: self.state.cache_miss_ctr.put("collection_docIds") cache.collection_docIds[op['query_id']] = documentId elif self.debug: self.state.cache_hit_ctr.put("collection_docIds") ## IF hits = lru.getDocumentFromCollection(op['collection'], documentId, slot_size) pageHits += hits #maxHits += hits if op['type'] == constants.OP_TYPE_INSERT else cache.fullscan_pages if self.debug: LOG.debug("Node #%02d: Estimated %d collection scan pageHits for op #%d on %s",\ node_id, hits, op["query_id"], op["collection"]) # We have a covering index, which means that we don't have # to do a look-up on the document in the collection. # But we still need to increase maxHits so that the final # ratio is counted correctly # Yang seems happy with this... else: assert op['type'] != constants.OP_TYPE_INSERT #maxHits += cache.fullscan_pages ## FOR (node) ## FOR (content) totalCost += pageHits totalWorst += maxHits total_index_penalty += indexKeyInsertionPenalty total_worst_index_penalty += worst_index_penalty if self.debug: LOG.debug("Op #%d on '%s' -> [pageHits:%d / worst:%d]",\ op["query_id"], op["collection"], pageHits, maxHits) assert pageHits <= maxHits,\ "Estimated pageHits [%d] is greater than worst [%d] for op #%d\n%s" %\ (pageHits, maxHits, op["query_id"], pformat(op)) ## FOR (op) sess_ctr += 1 ## FOR (sess) self.total_index_insertion_penalty = total_index_penalty # Add index insertion penalty to the total cost if not self.no_index_insertion_penalty: totalCost += total_index_penalty totalWorst += total_worst_index_penalty ## IF # The final disk cost is the ratio of our estimated disk access cost divided # by the worst possible cost for this design. If we don't have a worst case, # then the cost is simply zero if self.debug: LOG.info("Total operation contents %s, errors %s", self.total_op_contents, self.err_ctr) assert totalCost <= totalWorst,\ "Estimated total pageHits [%d] is greater than worst case pageHits [%d]" % (totalCost, totalWorst) final_cost = float(totalCost) / float(totalWorst) if totalWorst else 0 evicted = sum([ lru.evicted for lru in self.buffers ]) LOG.info("Computed Disk Cost: %s [pageHits=%d / worstCase=%d / evicted=%d]",\ final_cost, totalCost, totalWorst, evicted) return final_cost
def estimateNodes(self, design, op): """ For the given operation and a design object, return an estimate of a list of node ids that we think that the query will be executed on """ results = set() broadcast = True shardingKeys = design.getShardKeys(op['collection']) if self.debug: LOG.debug("Computing node estimate for Op #%d [sharding=%s]", \ op['query_id'], shardingKeys) # Inserts always go to a single node if op['type'] == constants.OP_TYPE_INSERT: # Get the documents that they're trying to insert and then # compute their hashes based on the sharding key # Because there is no logical replication, each document will # be inserted in one and only one node for content in workload.getOpContents(op): values = catalog.getFieldValues(shardingKeys, content) results.add(self.computeTouchedNode(values)) ## FOR broadcast = False # Network costs of SELECT, UPDATE, DELETE queries are based off # of using the sharding key in the predicate elif len(op['predicates']) > 0: predicate_types = set() for k,v in op['predicates'].iteritems() : if design.inShardKeyPattern(op['collection'], k) : broadcast = False predicate_types.add(v) if self.debug: LOG.debug("Op #%d %s Predicates: %s [broadcast=%s / predicateTypes=%s]",\ op['query_id'], op['collection'], op['predicates'], broadcast, list(predicate_types)) ## ---------------------------------------------- ## PRED_TYPE_REGEX ## ---------------------------------------------- if not broadcast and constants.PRED_TYPE_REGEX in predicate_types: # Any query that is using a regex on the sharding key must be broadcast to every node # It's not complete accurate but it's just easier that way broadcast = True ## ---------------------------------------------- ## PRED_TYPE_RANGE ## ---------------------------------------------- elif not broadcast and constants.PRED_TYPE_RANGE in predicate_types: # If it's a scan, then we need to first figure out what # node they will start the scan at, and then just approximate # what it will do by adding N nodes to the touched list starting # from that first node. We will wrap around to zero num_touched = self.guessNodes(design, op['collection'], k) if self.debug: LOG.info("Estimating that Op #%d on '%s' touches %d nodes",\ op["query_id"], op["collection"], num_touched) for content in workload.getOpContents(op): values = catalog.getFieldValues(shardingKeys, content) if self.debug: LOG.debug("%s -> %s", shardingKeys, values) try: node_id = self.computeTouchedNode(values) except: if self.debug: LOG.error("Unexpected error when computing touched nodes\n%s" % pformat(values)) raise for i in xrange(num_touched): if node_id >= self.num_nodes: node_id = 0 results.add(node_id) node_id += 1 ## FOR ## FOR ## ---------------------------------------------- ## PRED_TYPE_EQUALITY ## ---------------------------------------------- elif not broadcast and constants.PRED_TYPE_EQUALITY in predicate_types: broadcast = False for content in workload.getOpContents(op): values = catalog.getFieldValues(shardingKeys, content) results.add(self.computeTouchedNode(values)) ## FOR ## ---------------------------------------------- ## BUSTED! ## ---------------------------------------------- elif not broadcast: raise Exception("Unexpected predicate types '%s' for op #%d" % (list(predicate_types), op['query_id'])) ## IF if broadcast: if self.debug: LOG.debug("Op #%d on '%s' is a broadcast query to all nodes",\ op["query_id"], op["collection"]) map(results.add, xrange(0, self.num_nodes)) map(self.nodeCounts.put, results) self.op_count += 1 return results
def computeWorkloadStats(self): """Process Workload Trace""" LOG.info("Computing database statistics from workload trace") # We'll maintain a local cache of Collection objects so that # we only have to look-up/create each one once, and then we can # save them all together at the end collectionCache = {} for sess in self.metadata_db.Session.fetch(): start_time = None end_time = None for op in sess['operations']: # We need to know the total number of queries that we've seen self.total_ops += 1 # The start_time is the timestamp of when the first query occurs if not start_time: start_time = op['query_time'] start_time = min(start_time, op['query_time']) # The end_time is the timestamp of when the last response arrives if 'resp_time' in op and op['resp_time']: end_time = max(end_time, op['resp_time']) elif not end_time and op['query_time']: end_time = op['query_time'] # Get the collection information object # We will use this to store the number times each key is referenced in a query if not op['collection'] in collectionCache: if op['collection'] in constants.IGNORED_COLLECTIONS or op[ 'collection'].endswith("$cmd"): continue col_info = self.metadata_db.Collection.one( {'name': op['collection']}) if not col_info: col_info = self.metadata_db.Collection() col_info['name'] = op['collection'] collectionCache[op['collection']] = col_info col_info['workload_queries'] = 0 col_info = collectionCache[op['collection']] col_info['workload_queries'] += 1 if not 'predicates' in op or not op['predicates']: op['predicates'] = {} try: for content in workload.getOpContents(op): self.processOpFields(col_info['fields'], op, content) except: LOG.error("Unexpected error for operation #%d in Session #%d\n%s", \ op['query_id'], sess['session_id'], pformat(op)) raise ## FOR (operations) if start_time and end_time: sess['start_time'] = start_time sess['end_time'] = end_time if self.debug: LOG.debug("Updating Session #%d" % sess['session_id']) try: sess.save() except: LOG.error("Failed to update Session #%d", sess['session_id']) raise ## FOR (sessions) # Save off all our skanky stink boxes right here... for col_info in collectionCache.itervalues(): col_info.save()
def estimateNodes(self, design, op): """ For the given operation and a design object, return an estimate of a list of node ids that we think that the query will be executed on """ results = set() broadcast = True shardingKeys = design.getShardKeys(op['collection']) if self.debug: LOG.debug("Computing node estimate for Op #%d [sharding=%s]", \ op['query_id'], shardingKeys) # Inserts always go to a single node if op['type'] == constants.OP_TYPE_INSERT: # Get the documents that they're trying to insert and then # compute their hashes based on the sharding key # Because there is no logical replication, each document will # be inserted in one and only one node for content in workload.getOpContents(op): values = catalog.getFieldValues(shardingKeys, content) results.add(self.computeTouchedNode(values)) ## FOR broadcast = False # Network costs of SELECT, UPDATE, DELETE queries are based off # of using the sharding key in the predicate elif len(op['predicates']) > 0: predicate_types = set() for k, v in op['predicates'].iteritems(): if design.inShardKeyPattern(op['collection'], k): broadcast = False predicate_types.add(v) if self.debug: LOG.debug("Op #%d %s Predicates: %s [broadcast=%s / predicateTypes=%s]",\ op['query_id'], op['collection'], op['predicates'], broadcast, list(predicate_types)) ## ---------------------------------------------- ## PRED_TYPE_REGEX ## ---------------------------------------------- if not broadcast and constants.PRED_TYPE_REGEX in predicate_types: # Any query that is using a regex on the sharding key must be broadcast to every node # It's not complete accurate but it's just easier that way broadcast = True ## ---------------------------------------------- ## PRED_TYPE_RANGE ## ---------------------------------------------- elif not broadcast and constants.PRED_TYPE_RANGE in predicate_types: # If it's a scan, then we need to first figure out what # node they will start the scan at, and then just approximate # what it will do by adding N nodes to the touched list starting # from that first node. We will wrap around to zero num_touched = self.guessNodes(design, op['collection'], k) if self.debug: LOG.info("Estimating that Op #%d on '%s' touches %d nodes",\ op["query_id"], op["collection"], num_touched) for content in workload.getOpContents(op): values = catalog.getFieldValues(shardingKeys, content) if self.debug: LOG.debug("%s -> %s", shardingKeys, values) try: node_id = self.computeTouchedNode(values) except: if self.debug: LOG.error( "Unexpected error when computing touched nodes\n%s" % pformat(values)) raise for i in xrange(num_touched): if node_id >= self.num_nodes: node_id = 0 results.add(node_id) node_id += 1 ## FOR ## FOR ## ---------------------------------------------- ## PRED_TYPE_EQUALITY ## ---------------------------------------------- elif not broadcast and constants.PRED_TYPE_EQUALITY in predicate_types: broadcast = False for content in workload.getOpContents(op): values = catalog.getFieldValues(shardingKeys, content) results.add(self.computeTouchedNode(values)) ## FOR ## ---------------------------------------------- ## BUSTED! ## ---------------------------------------------- elif not broadcast: raise Exception("Unexpected predicate types '%s' for op #%d" % (list(predicate_types), op['query_id'])) ## IF if broadcast: if self.debug: LOG.debug("Op #%d on '%s' is a broadcast query to all nodes",\ op["query_id"], op["collection"]) map(results.add, xrange(0, self.num_nodes)) map(self.nodeCounts.put, results) self.op_count += 1 return results
def computeWorkloadStats(self): """Process Workload Trace""" LOG.info("Computing database statistics from workload trace") # We'll maintain a local cache of Collection objects so that # we only have to look-up/create each one once, and then we can # save them all together at the end collectionCache = {} for sess in self.metadata_db.Session.fetch(): start_time = None end_time = None for op in sess["operations"]: # We need to know the total number of queries that we've seen self.total_ops += 1 # The start_time is the timestamp of when the first query occurs if not start_time: start_time = op["query_time"] start_time = min(start_time, op["query_time"]) # The end_time is the timestamp of when the last response arrives if "resp_time" in op and op["resp_time"]: end_time = max(end_time, op["resp_time"]) elif not end_time and op["query_time"]: end_time = op["query_time"] # Get the collection information object # We will use this to store the number times each key is referenced in a query if not op["collection"] in collectionCache: if op["collection"] in constants.IGNORED_COLLECTIONS or op["collection"].endswith("$cmd"): continue col_info = self.metadata_db.Collection.one({"name": op["collection"]}) if not col_info: col_info = self.metadata_db.Collection() col_info["name"] = op["collection"] collectionCache[op["collection"]] = col_info col_info["workload_queries"] = 0 col_info = collectionCache[op["collection"]] col_info["workload_queries"] += 1 if not "predicates" in op or not op["predicates"]: op["predicates"] = {} try: for content in workload.getOpContents(op): self.processOpFields(col_info["fields"], op, content) except: LOG.error( "Unexpected error for operation #%d in Session #%d\n%s", op["query_id"], sess["session_id"], pformat(op), ) raise ## FOR (operations) if start_time and end_time: sess["start_time"] = start_time sess["end_time"] = end_time if self.debug: LOG.debug("Updating Session #%d" % sess["session_id"]) try: sess.save() except: LOG.error("Failed to update Session #%d", sess["session_id"]) raise ## FOR (sessions) # Save off all our skanky stink boxes right here... for col_info in collectionCache.itervalues(): col_info.save()
def estimateNodes(self, design, op, num_nodes=None): """ For the given operation and a design object, return an estimate of a list of node ids that we think that the query will be executed on """ results = set() broadcast = True shardingKeys = design.getShardKeys(op['collection']) if self.debug: LOG.debug("Computing node estimate for Op #%d [sharding=%s]", \ op['query_id'], shardingKeys) # If there are no sharding keys # All requests on this collection will be routed to the primary node # We assume the node 0 is the primary node if len(shardingKeys) == 0: broadcast = False results.add(0) # Inserts always go to a single node elif op['type'] == constants.OP_TYPE_INSERT: # Get the documents that they're trying to insert and then # compute their hashes based on the sharding key # Because there is no logical replication, each document will # be inserted in one and only one node for content in workload.getOpContents(op): values = catalog.getFieldValues(shardingKeys, content) results.add(self.computeTouchedNode(op['collection'], shardingKeys, values, num_nodes)) ## FOR broadcast = False # Network costs of SELECT, UPDATE, DELETE queries are based off # of using the sharding key in the predicate elif len(op['predicates']) > 0: predicate_fields = set() predicate_types = set() for k,v in op['predicates'].iteritems() : if design.inShardKeyPattern(op['collection'], k): predicate_fields.add(k) predicate_types.add(v) if len(predicate_fields) == len(shardingKeys): broadcast = False if self.debug: LOG.debug("Op #%d %s Predicates: %s [broadcast=%s / predicateTypes=%s]",\ op['query_id'], op['collection'], op['predicates'], broadcast, list(predicate_types)) ## ---------------------------------------------- ## PRED_TYPE_REGEX ## ---------------------------------------------- if not broadcast and constants.PRED_TYPE_REGEX in predicate_types: # Any query that is using a regex on the sharding key must be broadcast to every node # It's not complete accurate but it's just easier that way broadcast = True ## ---------------------------------------------- ## PRED_TYPE_RANGE ## ---------------------------------------------- elif not broadcast and constants.PRED_TYPE_RANGE in predicate_types: broadcast = True ## ---------------------------------------------- ## PRED_TYPE_EQUALITY ## ---------------------------------------------- elif not broadcast and constants.PRED_TYPE_EQUALITY in predicate_types: broadcast = False for content in workload.getOpContents(op): values = catalog.getFieldValues(shardingKeys, content) results.add(self.computeTouchedNode(op['collection'], shardingKeys, values, num_nodes)) ## FOR ## ---------------------------------------------- ## BUSTED! ## ---------------------------------------------- elif not broadcast: raise Exception("Unexpected predicate types '%s' for op #%d" % (list(predicate_types), op['query_id'])) ## IF if broadcast: if self.debug: LOG.debug("Op #%d on '%s' is a broadcast query to all nodes",\ op["query_id"], op["collection"]) map(results.add, xrange(0, self.colNumNodes(num_nodes, op["collection"]))) map(self.nodeCounts.put, results) self.op_count += 1 return results