def txn(): index = random.randint(0, NUM_SHARDS - 1) shard_name = "shard" + str(index) counter = SSFSMSimpleCounterShard.get_by_key_name(shard_name) if counter is None: counter = SSFSMSimpleCounterShard(key_name=shard_name) counter.count += value counter.put()
def fsm_calculate_run_time(): """ Fantasm does not give call backs when its done. Must figure it out with another job using the last modified date on output entities """ # Get the last job which was run for subset /fsm q = Record.all() q.filter('engine_type =','fsm') q.filter('benchmark =','subset') q.order('-start') results = q.fetch(1) if len(results) == 0: logging.error("Unable to find a record for fsm/subset") return False q = None record = None for ii in results: if ii.state == "Done": logging.error("Last FSM end time has already been calculated") logging.info(str(ii.num_entities)) q = SSFSMSimpleCounterShard.all() if not q: logging.error("No query returned for SubSet results") return False record = ii max_date = None while True: results = q.fetch(1000) for ii in results: date = ii.modified if max_date == None or max_date < date: max_date = date if len(results) < 1000: break; if not max_date: logging.error("Unable to calculate the max date for FSM/subset") return False record.state = "Done" record.end = max_date delta = (record.end - record.start) record.total = float(delta.days * 86400 + delta.seconds) + float(delta.microseconds)/1000000 record.put() return True
def post(self): if self.request.get("fsm_cleanup"): if fsm_calculate_run_time(): self.redirect("/subset") else: self.response.out.write("Error calculating run time of FSM/subset") if self.request.get("reset_fsm_count"): for c in SSFSMSimpleCounterShard.all(): c.delete() self.redirect('/subset') return if self.request.get("reset_mr_count"): for c in SSMRSimpleCounterShard.all(): c.delete() self.redirect('/subset') return if self.request.get("reset_pl_count"): for c in SSPLSimpleCounterShard.all(): c.delete() self.redirect('/subset') return if self.request.get("compute"): engine = self.request.get("engine") dataset = self.request.get("dataset") user = self.request.get('user') data = SubSetDataSet.get_by_key_name(dataset) record = Record(engine_type=engine, dataset=dataset, benchmark="subset", num_entities=data.num_entries, entries_per_pipe=data.entries_per_pipe, user=user, state="Running") if engine == "fsm": record.put() # reset count for c in SSFSMSimpleCounterShard.all(): c.delete() context = {} context['user'] = str(user) context['num_entries'] = int(data.num_entries) fsm.startStateMachine('SubSet', [context]) self.redirect('/subset') elif engine == "pipeline": for c in SSPLSimpleCounterShard.all(): c.delete() mypipeline = SubSetPipeline(data.num_entries) mypipeline.start() record.pipeline_id = mypipeline.pipeline_id record.put() self.redirect('/subset') #self.redirect(mypipeline.base_path + "/status?root=" + mypipeline.pipeline_id) elif engine == "mr": for c in SSMRSimpleCounterShard.all(): c.delete() # Why 1k each per shard or less? is this ideal? if data.num_entries > 1000: shards = data.num_entries/1000 else: shards = 1 kind = get_class(data.num_entries) mapreduce_id = control.start_map( name="Wordcount with just mappers", handler_spec="subset.mr.subset_mapper", reader_spec="mapreduce.input_readers.DatastoreInputReader", mapper_parameters={ "entity_kind": "data.subset."+kind, "processing_rate": 500 }, mapreduce_parameters={model.MapreduceSpec.PARAM_DONE_CALLBACK: '/subset/mr/callback'}, shard_count=shards, queue_name="default", ) record.mr_id = mapreduce_id record.put() self.redirect('/subset')