def txn(): index = random.randint(0, NUM_SHARDS - 1) shard_name = "shard" + str(index) counter = PLSimpleCounterShard.get_by_key_name(shard_name) if counter is None: counter = PLSimpleCounterShard(key_name=shard_name) counter.count += value counter.put()
def post(self): if self.request.get("fsm_cleanup"): if fsm_calculate_run_time(): self.redirect("/agg") else: self.response.out.write("Error calculating run time of FSM/aggregate") if self.request.get("reset_fsm_count"): for c in FSMSimpleCounterShard.all(): c.delete() self.redirect('/agg') return if self.request.get("reset_mr_count"): for c in MRSimpleCounterShard.all(): c.delete() self.redirect('/agg') return if self.request.get("reset_pl_count"): for c in PLSimpleCounterShard.all(): c.delete() self.redirect('/agg') return if self.request.get("compute"): engine = self.request.get("engine") dataset = self.request.get("dataset") user = self.request.get('user') data = AggDataSet.get_by_key_name(dataset) record = Record(engine_type=engine, dataset=dataset, benchmark="aggregate", num_entities=data.num_entries, entries_per_pipe=data.entries_per_pipe, user=user, state="Running") if engine == "fsm": record.put() # reset count for c in FSMSimpleCounterShard.all(): c.delete() context = {} context['user'] = str(user) context['num_entries'] = int(data.num_entries) fsm.startStateMachine('Aggregate', [context]) self.redirect('/agg') elif engine == "fsm_fan_in": """ This one uses fan in """ record.put() # reset count for c in FSMSimpleCounterShard.all(): c.delete() context = {} context['user'] = str(user) context['num_entries'] = int(data.num_entries) fsm.startStateMachine('Aggregate2', [context]) self.redirect('/agg') elif engine == "pipeline": for c in PLSimpleCounterShard.all(): c.delete() mypipeline = AggregatePipeline(data.num_entries) mypipeline.start() record.pipeline_id = mypipeline.pipeline_id record.put() self.redirect('/agg') #self.redirect(mypipeline.base_path + "/status?root=" + mypipeline.pipeline_id) elif engine == "mr": for c in MRSimpleCounterShard.all(): c.delete() # Why 1k each per shard or less? is this ideal? if data.num_entries > 1000: shards = data.num_entries/1000 else: shards = 1 kind = get_class(data.num_entries) mapreduce_id = control.start_map( name="Wordcount with just mappers", handler_spec="aggregate.mr.aggregate_mapper", reader_spec="mapreduce.input_readers.DatastoreInputReader", mapper_parameters={ "entity_kind": "data.aggregate."+kind, "processing_rate": 500 }, mapreduce_parameters={model.MapreduceSpec.PARAM_DONE_CALLBACK: '/agg/mr/callback'}, shard_count=shards, queue_name="default", ) record.mr_id = mapreduce_id record.put() self.redirect('/agg')