def addTaskRange(initialStopStep, globalStop, unique, namespace, batchSize=5, stepsBack=1600, callback = ''): ''' Task Ranges will branch out into weekly clumps for all algs. Remove startAlg, stopAlg from task name and use stepRange value instead. Must calculate expected count to be found for completion entities. ''' import meSchema from google.appengine.api import namespace_manager from math import ceil namespace_manager.set_namespace('') startAlg = 1 stopAlg = int(meSchema.meAlg.all(keys_only=True).order('-__key__').get().name()) ''' Pre-calculating number of batches for callback to check against. ''' numWeeks = ((globalStop - initialStopStep)/400) + 1 numAlgs = stopAlg - startAlg + 1 batchesWeek = ceil(numAlgs/float(batchSize)) totalBatches = int(numWeeks*batchesWeek) JobID = meTools.buildJobID(namespace, unique, globalStop, initialStopStep, stepsBack) ''' Probably need to add a WorkQueue clear function just in case have overlapping JobIDs. ''' for i in range(initialStopStep, globalStop+1, 400): stopStep = i startStep = stopStep - stepsBack stepRange = [startStep] name = 'Main-backTestResult-' + JobID + '-' + str(stopStep).rjust(7,'0') meTools.taskAdd(name, '/backtest/doBackTests', 'default', 0.5, startAlg = startAlg, stopAlg = stopAlg, stopStep = stopStep, batchSize = batchSize, stepRange = stepRange, uniquifier = unique, namespace = namespace, JobID = JobID, totalBatches = totalBatches, callback = callback)
def doAllLiveAlgs(initialStopStep, stepRange, globalStop, namespace, name, callback=""): JobID = meTools.buildJobID(namespace, name, globalStop, initialStopStep, stepRange) # techneCount = len(FTLtype)*len(NRtype) numWeeks = ((globalStop - initialStopStep) / 400) + 1 totalBatches = numWeeks for i in range(initialStopStep, globalStop + 1, 400): stopStep = i calculateWeeklyLiveAlgs(stopStep, stepRange, namespace, JobID, totalBatches, callback)
def startSim(namespace, unique, globalStop, initialStop, stepRange, goNext): from google.appengine.api.datastore import Key JobID = meTools.buildJobID(namespace, unique, globalStop, initialStop, stepRange) persistStops = meSchema.WorkQueue(key_name = JobID, globalStop = globalStop, initialStop = initialStop) meTools.memPut_multi({persistStops.key().name() : persistStops}, priority = 1) if not globalStop >= initialStop: raise(BaseException('globalStop: %s is not >= lastStopStep: %s' % (globalStop, initialStop))) lastDesire = meSchema.desire.all(keys_only = True).filter('__key__ <', Key.from_path('desire','1000000_0000_00')).order('-__key__').get() if lastDesire: lastDesireStop = int(lastDesire.name().split('_')[0]) else: lastDesireStop = 1 desireFunc.primeDesireCache(lastDesireStop) for step in range(lastDesireStop, globalStop + 1): desireFunc.doDesires(step) if goNext == 'true': doNext(JobID, 'weeklyDesires','')
def fanoutTaskAdd(stopStep, startStep, globalStop, namespace, unique, model, callback = ''): from google.appengine.api import namespace_manager namespace_manager.set_namespace(namespace) ''' Partition range of steps into batches to add in parallel. ''' stepRange = stopStep - startStep if stepRange == 800: stepBlock = 2800 elif stepRange == 1600: stepBlock = 2000 JobID = meTools.buildJobID(namespace, unique, globalStop, stopStep, stepRange) totalBatches = ((globalStop - stopStep)/stepBlock) + 1 for i in range(stopStep, globalStop + 1, stepBlock): newStopStep = i newStartStep = newStopStep - stepRange newGlobalStop = min(newStopStep + stepBlock - 1, globalStop) subTaskname = 'RVals-' + JobID + '-' + model + '-' + str(newStopStep) + '-' + str(newStartStep) + '-0' meTools.taskAdd(subTaskname, '/calculate/compounds/calculateCompounds', 'default', 0.5, stopStep = newStopStep, startStep = newStartStep, globalStop = newGlobalStop, name = unique, i = 0, cursor = '', model = model, JobID = JobID, callback = callback, totalBatches = totalBatches, taskname = '') namespace_manager.set_namespace('')