Ejemplo n.º 1
0
def get_test_by_name(name):
    global _tests
    
    logdebug("name: %s" % name)
    retval = _cleantest(_tests.get(name))
    
    return retval
Ejemplo n.º 2
0
 def runtask(*args, **kwargs):
     pickled = cloudpickle.dumps((f, args, kwargs, passthroughargs))
     logdebug("task pickle length: %s" % len(pickled))
     if get_dump():
         logdebug("f:")
         dumper(f)
         logdebug("args:")
         dumper(args)
         logdebug("kwargs:")
         dumper(kwargs)
         logdebug("passthroughargs:")
         dumper(passthroughargs)
     try:
         task = taskqueue.Task(payload=pickled, **taskkwargscopy)
         return task.add(queue, transactional=transactional)
     except taskqueue.TaskTooLargeError:
         pickledf = cloudpickle.dumps(f)
         pickleda = cloudpickle.dumps(args)
         pickledk = cloudpickle.dumps(kwargs)
         pickledp = cloudpickle.dumps(passthroughargs)
         logexception(
             "task too large, need to use datastore (%s, %s, %s, %s)" %
             (len(pickledf), len(pickleda), len(pickledk), len(pickledp)))
         if parent:
             key = _TaskToRun(data=pickled, parent=parent).put()
         else:
             key = _TaskToRun(data=pickled).put()
         rfspickled = cloudpickle.dumps((None, [key], {}, {
             "_run_from_datastore": True
         }))
         task = taskqueue.Task(payload=rfspickled, **taskkwargscopy)
         return task.add(queue, transactional=transactional)
Ejemplo n.º 3
0
def OnProgressF(futurekey):
    futureobj = futurekey.get() if futurekey else None
    if futureobj.parentkey:
        taskkwargs = futureobj.get_taskkwargs()

        logdebug("Enter OnProgressF: %s" % futureobj)

        @task(**taskkwargs)
        def UpdateParent(parentkey):
            logdebug("***************************************************")
            logdebug("Enter UpdateParent: %s" % parentkey)
            logdebug("***************************************************")

            parent = parentkey.get()
            logdebug("1: %s" % parent)
            if parent:
                logdebug("2")
                #                 if not parent.has_result():
                progress = 0
                for childfuture in get_children(parentkey):
                    logdebug("3: %s" % childfuture)
                    progress += childfuture.get_progress()
                logdebug("4: %s" % (progress))
                parent.set_progress(progress)

        UpdateParent(futureobj.parentkey)
Ejemplo n.º 4
0
def _cleantest(test):
    logdebug(test)
    retval = dict(test) if test else None
    if retval:
        if "f" in retval:
            del retval["f"]
        if "taskkwargs" in retval:
            del retval["taskkwargs"]
    return retval
Ejemplo n.º 5
0
    def toplevel(futurekey, *args, **kwargs):
        logdebug("Enter futureparallel.toplevel: %s" % futurekey)

        def OnAllChildSuccess():
            logdebug("Enter OnAllChildSuccess: %s" % futurekey)
            parentfuture = futurekey.get() if futurekey else None
            if parentfuture and not parentfuture.has_result():
                if not parentfuture.initialised or not parentfuture.readyforresult:
                    raise Exception("Parent not initialised, retry")

                @ndb.transactional()
                def get_children_trans():
                    return get_children(parentfuture.key)

                children = get_children_trans()

                logdebug("children: %s" % [child.key for child in children])
                if children:
                    result = []
                    error = None
                    finished = True
                    for childfuture in children:
                        logdebug("childfuture: %s" % childfuture.key)
                        if childfuture.has_result():
                            try:
                                childresult = childfuture.get_result()
                                logdebug("childresult(%s): %s" %
                                         (childfuture.status, childresult))
                                result += [childfuture.get_result()]
                                logdebug("intermediate result:%s" % result)
                            except Exception, ex:
                                logdebug("haserror:%s" % repr(ex))
                                error = ex
                                break
                        else:
                            logdebug("noresult")
                            finished = False

                    if error:
                        logwarning(
                            "Internal error, child has error in OnAllChildSuccess: %s"
                            % error)
                        parentfuture.set_failure(error)
                    elif finished:
                        logdebug("result: %s" % result)
                        parentfuture.set_success(result)
                    else:
                        logdebug(
                            "child not finished in OnAllChildSuccess, skipping"
                        )
                else:
                    logwarning(
                        "Internal error, parent has no children in OnAllChildSuccess"
                    )
                    parentfuture.set_failure(Exception("no children found"))
Ejemplo n.º 6
0
    def InvokeMap(futurekey, key, **kwargs):
        logdebug("Enter InvokeMap: %s" % key)
        try:
            obj = key.get()
            if not obj:
                raise RetryTaskException("couldn't get object for key %s" %
                                         key)

            return mapf(futurekey, obj, **kwargs)
        finally:
            logdebug("Leave InvokeMap: %s" % key)
Ejemplo n.º 7
0
            def higherlevelcompose(lop, rop):
                try:
                    retval = None
                    if lop and rop:
                        blobnames = [lop.get("blobname"), rop.get("blobname")]
                        blobs = getblobsbyname(gcsbucket, *blobnames)
                        if len(blobs) == 2:
                            ltotalcomponent_count = sum(
                                [blob.component_count for blob in blobs])
                            logdebug("ltotalcomponent_count: %s" %
                                     ltotalcomponent_count)
                            if ltotalcomponent_count > 1020:
                                logdebug("doing copying")
                                newblobnames = [
                                    "%s-copy" % blobname
                                    for blobname in blobnames
                                ]
                                for ix, blob in enumerate(blobs):
                                    try:
                                        copyblob(gcsbucket, blob,
                                                 newblobnames[ix])
                                    except Exception:
                                        logexception("deleteblobs(copy)")
                                try:
                                    deleteblobs(gcsbucket, blobs)
                                except Exception:
                                    logexception("deleteblobs(copy)")

                                blobnames = newblobnames
                                blobs = getblobsbyname(gcsbucket, *blobnames)

                            if len(blobs) == 2:
                                llocalfilename = gcstargetfilename if istop else GenerateStableId(
                                    blobnames[0] + blobnames[1])
                                lfilename = "%s/%s-%s" % (gcstargetprefix,
                                                          "composed",
                                                          llocalfilename)
                                retval = composeblobs(gcsbucket, lfilename,
                                                      blobs)
                                retval["count"] = lop.get(
                                    "count", 0) + rop.get("count", 0)
                                try:
                                    deleteblobs(gcsbucket, blobs)
                                except Exception:
                                    logexception("deleteblobs")
                        else:
                            raise Exception("Can't load blobs")
                    else:
                        retval = lop if lop else rop
                    return retval
                except Exception, ex:
                    logexception("higherlevelcompose")
                    raise ex
    def MapOverRange(futurekey, startbyte, endbyte, weight, **kwargs):
        logdebug("Enter MapOverRange: %s, %s, %s" %
                 (startbyte, endbyte, weight))

        linitialresult = initialresult if not initialresult is None else 0
        loncombineresultsf = oncombineresultsf if oncombineresultsf else lambda a, b: a + b

        try:
            # open file at gcspath for read
            with gcs.open(gcspath) as gcsfile:
                page, ranges = hwalk(gcsfile, pagesize, 2, startbyte, endbyte)

            lweightUsed = 0
            if pagemapf:
                lonallchildsuccessf = GenerateOnAllChildSuccess(
                    futurekey, linitialresult, loncombineresultsf)
                taskkwargs["futurename"] = "pagemap %s of %s,%s" % (
                    len(page), startbyte, endbyte)
                lweightUsed = weight * 0.05
                future(pagemapf,
                       parentkey=futurekey,
                       onallchildsuccessf=lonallchildsuccessf,
                       weight=lweightUsed,
                       **taskkwargs)(page)
            else:
                pass
                #setlocalprogress(futurekey, len(page))

            if ranges:
                newweight = (weight -
                             lweightUsed) / len(ranges) if weight else None
                for arange in ranges:
                    taskkwargs["futurename"] = "shard %s" % (arange)

                    lonallchildsuccessf = GenerateOnAllChildSuccess(
                        futurekey, linitialresult if pagemapf else len(page),
                        loncombineresultsf)

                    future(MapOverRange,
                           parentkey=futurekey,
                           onallchildsuccessf=lonallchildsuccessf,
                           weight=newweight,
                           **taskkwargs)(arange[0],
                                         arange[1],
                                         weight=newweight)

            if ranges or pagemapf:
                raise FutureReadyForResult("still going")
            else:
                return len(page)
        finally:
            logdebug("Leave MapOverRange: %s, %s, %s" %
                     (startbyte, endbyte, weight))
Ejemplo n.º 9
0
    def toplevel(futurekey, *args, **kwargs):
        logdebug("Enter futuresequence.toplevel: %s" % futurekey)

        def childonsuccessforindex(index, results):
            logdebug("Enter childonsuccessforindex: %s, %s, %s" %
                     (futurekey, index, json.dumps(results, indent=2)))

            def childonsuccess(childfuturekey):
                logdebug("Enter childonsuccess: %s, %s, %s" %
                         (futurekey, index, childfuturekey))
                logdebug("results: %s" % json.dumps(results, indent=2))
                try:
                    childfuture = GetFutureAndCheckReady(childfuturekey)

                    try:
                        result = childfuture.get_result()
                    except Exception, ex:
                        toplevelfuture = futurekey.get()
                        if toplevelfuture:
                            toplevelfuture.set_failure(ex)
                        else:
                            raise Exception(
                                "Can't load toplevel future for failure")
                    else:
                        logdebug("result: %s" % json.dumps(result, indent=2))
                        newresults = results + [result]
                        islast = (index == (len(flist) - 1))

                        if islast:
                            logdebug("islast")
                            toplevelfuture = futurekey.get()
                            if toplevelfuture:
                                logdebug("setting top level success")
                                toplevelfuture.set_success_and_readyforesult(
                                    newresults)
                            else:
                                raise Exception(
                                    "Can't load toplevel future for success")
                        else:
                            logdebug("not last")
                            taskkwargs["futurename"] = "%s [%s]" % (
                                futurenameprefix if futurenameprefix else "-",
                                index + 1)
                            future(flist[index + 1],
                                   parentkey=futurekey,
                                   onsuccessf=childonsuccessforindex(
                                       index + 1, newresults),
                                   weight=weight /
                                   len(flist) if weight else None,
                                   timeoutsec=timeoutsec,
                                   maxretries=maxretries,
                                   **taskkwargs)(newresults)
Ejemplo n.º 10
0
 def InvokeMap(key, **kwargs):
     logdebug("Enter InvokeMap: %s" % key)
     try:
         obj = key.get()
         if not obj:
             if not skipmissing:
                 raise RetryTaskException("couldn't get object for key %s" %
                                          key)
             # else just skip
         else:
             mapf(obj, **kwargs)
     finally:
         logdebug("Leave InvokeMap: %s" % key)
Ejemplo n.º 11
0
    def getvalue(*args, **kwargs):
        lcachekey = cachekey if cachekey else make_flash(f, *args, **kwargs)

        retval = memcache.get(lcachekey)  #@UndefinedVariable
        if retval is None:
            logdebug("MISS: %s" % lcachekey)
            retval = f(*args, **kwargs)
            memcache.add(key=lcachekey, value=retval,
                         time=expiresec)  #@UndefinedVariable
        else:
            logdebug("HIT: %s" % lcachekey)

        return retval
Ejemplo n.º 12
0
    def MapOverRange(keyrange, **kwargs):
        logdebug("Enter MapOverRange: %s" % keyrange)

        realquery1 = ndbquery() if callable(ndbquery) else ndbquery

        _fixkeyend(keyrange, kind)

        filteredquery = keyrange.filter_ndb_query(realquery1)

        logdebug(filteredquery)

        keys, _, more = filteredquery.fetch_page(pagesize, keys_only=True)

        if pagemapf:
            pagemapf(keys)

        if more and keys:
            newkeyrange = KeyRange(keys[-1], keyrange.key_end,
                                   keyrange.direction, False,
                                   keyrange.include_end)
            krlist = newkeyrange.split_range()
            logdebug("krlist: %s" % krlist)
            for kr in krlist:
                MapOverRange(kr)
        logdebug("Leave MapOverRange: %s" % keyrange)
Ejemplo n.º 13
0
    def MapOverRange(startpos, endpos, **kwargs):
        logdebug("Enter MapOverRange: %s, %s" % (startpos, endpos))

        # open file at gcspath for read
        with gcs.open(gcspath) as gcsfile:
            page, ranges = hwalk(gcsfile, pagesize, initialshards, startpos,
                                 endpos)

        if ranges:
            for arange in ranges:
                MapOverRange(arange[0], arange[1])

        if pagemapf:
            pagemapf(page)

        logdebug("Leave MapOverRange: %s, %s" % (startpos, endpos))
Ejemplo n.º 14
0
            def childonsuccess(childfuturekey):
                logdebug("Enter childonsuccess: %s, %s, %s" %
                         (futurekey, index, childfuturekey))
                logdebug("results: %s" % json.dumps(results, indent=2))
                try:
                    childfuture = GetFutureAndCheckReady(childfuturekey)

                    try:
                        result = childfuture.get_result()
                    except Exception, ex:
                        toplevelfuture = futurekey.get()
                        if toplevelfuture:
                            toplevelfuture.set_failure(ex)
                        else:
                            raise Exception(
                                "Can't load toplevel future for failure")
                    else:
Ejemplo n.º 15
0
 def _set_local_progress_for_success(self):
     progressObj = self._get_progressobject()
     logdebug("progressObj = %s" % progressObj)
     weight = self.get_weight(progressObj) or 0
     #         weight = weight if not weight is None else 1
     logdebug("weight = %s" % weight)
     localprogress = self.get_localprogress(progressObj)
     logdebug("localprogress = %s" % localprogress)
     if localprogress < weight and not self.GetChildren():
         logdebug("No children, we can auto set localprogress from weight")
         self.set_localprogress(weight)
Ejemplo n.º 16
0
            def _futurewrapper(headers):
                if maxretries:
                    lretryCount = 0
                    try:
                        lretryCount = int(
                            headers.get("X-Appengine-Taskretrycount",
                                        0)) if headers else 0
                    except:
                        logexception(
                            "Failed trying to get retry count, using 0")

                    if lretryCount > maxretries:
                        raise PermanentTaskFailure(
                            "Too many retries of Future")

                logdebug("inner, futurekey=%s" % futurekey)
                futureobj2 = futurekey.get()
                if futureobj2:
                    futureobj2.set_weight(weight)  # if weight >= 1 else 1)
                else:
                    raise Exception("Future not ready yet")

                try:
                    logdebug("args, kwargs=%s, %s" % (args, kwargs))
                    result = f(futurekey, *args, **kwargs)

                except FutureReadyForResult:
                    futureobj3 = futurekey.get()
                    if futureobj3:
                        futureobj3.set_readyforesult()

                except FutureNotReadyForResult:
                    futureobj4 = futurekey.get()
                    if futureobj4:
                        futureobj4.set_initialised()

                except PermanentTaskFailure, ptf:
                    try:
                        futureobj5 = futurekey.get()
                        if futureobj5:
                            futureobj5.set_failure(ptf)
                    finally:
                        raise ptf
Ejemplo n.º 17
0
def ndbshardedpagemap(pagemapf=None,
                      ndbquery=None,
                      initialshards=10,
                      pagesize=100,
                      **taskkwargs):
    @task(**taskkwargs)
    def MapOverRange(keyrange, **kwargs):
        logdebug("Enter MapOverRange: %s" % keyrange)

        realquery1 = ndbquery() if callable(ndbquery) else ndbquery

        _fixkeyend(keyrange, kind)

        filteredquery = keyrange.filter_ndb_query(realquery1)

        logdebug(filteredquery)

        keys, _, more = filteredquery.fetch_page(pagesize, keys_only=True)

        if pagemapf:
            pagemapf(keys)

        if more and keys:
            newkeyrange = KeyRange(keys[-1], keyrange.key_end,
                                   keyrange.direction, False,
                                   keyrange.include_end)
            krlist = newkeyrange.split_range()
            logdebug("krlist: %s" % krlist)
            for kr in krlist:
                MapOverRange(kr)
        logdebug("Leave MapOverRange: %s" % keyrange)

    realquery2 = ndbquery() if callable(ndbquery) else ndbquery

    kind = realquery2.kind

    krlist = KeyRange.compute_split_points(kind, initialshards)
    logdebug("first krlist: %s" % krlist)

    for kr in krlist:
        MapOverRange(kr)
Ejemplo n.º 18
0
def _launch_task(pickled, name, headers):
    try:
        # Add some task debug information.
        #         dheaders = []
        #         for key, value in headers.items():
        #             k = key.lower()
        #             if k.startswith("x-appengine-") and k not in _SKIP_HEADERS:
        #                 dheaders.append("%s:%s" % (key, value))
        #         logdebug(", ".join(dheaders))
        logdebug(", ".join(
            ["%s:%s" % (key, value) for key, value in headers.items()]))

        if not isFromTaskQueue(headers):
            raise PermanentTaskFailure(
                'Detected an attempted XSRF attack: we are not executing from a task queue.'
            )

        logdebug('before run "%s"' % name)
        _run(pickled, headers)
        logdebug('after run "%s"' % name)
    except PermanentTaskFailure:
        logexception("Aborting task")
    except:
        logexception("failure")
        raise
Ejemplo n.º 19
0
    def intask(self, nameprefix, f, *args, **kwargs):
        taskkwargs = self.get_taskkwargs()

        if nameprefix:
            name = "%s-%s" % (nameprefix, self.key.id())
            taskkwargs["name"] = name
        elif taskkwargs.get("name"):
            del taskkwargs["name"]
        taskkwargs["transactional"] = False

        @task(**taskkwargs)
        def dof():
            f(*args, **kwargs)

        try:
            # run the wrapper task, and if it fails due to a name clash just skip it (it was already kicked off by an earlier
            # attempt to construct this future).
            #             logdebug("about to run task %s" % name)
            dof()
        except taskqueue.TombstonedTaskError:
            logdebug("skip adding task %s (already been run)" % name)
        except taskqueue.TaskAlreadyExistsError:
            logdebug("skip adding task %s (already running)" % name)
Ejemplo n.º 20
0
def register_test(f=None, name=None, description=None, tags=[], weight=100, **taskkwargs):
    if not f:
        return functools.partial(register_test, name=name, description=description, tags=tags, weight=weight, **taskkwargs)

    global _tests

    lname = name if name else "%s.%s" % (f.__module__, f.__name__)
    
    if not isinstance(lname, basestring):
        raise Exception("name must be a string")
        
    _tests[lname] = {
        "f": f,
        "name": lname,
        "description": description,
        "tags": tags,
        "weight": weight,
        "taskkwargs": taskkwargs
    }
    
    logdebug(_tests)
    
    return f
Ejemplo n.º 21
0
    def set_localprogress(self, value):
        progressobj = self._get_progressobject()
        localprogress = self.get_localprogress(progressobj)
        calculatedprogress = self.get_calculatedprogress(progressobj)
        if localprogress != value:
            #             haschildren = self.GetChildren()
            #             logdebug("haschildren: %s" % haschildren)

            progressobj.localprogress = value
            logdebug("localprogress: %s" % value)
            #             if not haschildren:
            lneedupd = value > calculatedprogress
            if lneedupd:
                logdebug("setting calculated progress")
                progressobj.calculatedprogress = value

            progressobj.put()

            if lneedupd:
                logdebug("kicking off calculate parent progress")
                self._calculate_parent_progress()

            self._callOnProgress()
Ejemplo n.º 22
0
 def InvokeMap(futurekey, line, **kwargs):
     logdebug("Enter InvokeMap: %s" % line)
     try:
         return mapf(line, **kwargs)
     finally:
         logdebug("Leave InvokeMap: %s" % line)
Ejemplo n.º 23
0
    def GCSCombineToTarget(futurekey, startindex, finishindex, istop,
                           **kwargs):
        logdebug("Enter GCSCombineToTarget: %s, %s" %
                 (startindex, finishindex))
        try:

            def higherlevelcompose(lop, rop):
                try:
                    retval = None
                    if lop and rop:
                        blobnames = [lop.get("blobname"), rop.get("blobname")]
                        blobs = getblobsbyname(gcsbucket, *blobnames)
                        if len(blobs) == 2:
                            ltotalcomponent_count = sum(
                                [blob.component_count for blob in blobs])
                            logdebug("ltotalcomponent_count: %s" %
                                     ltotalcomponent_count)
                            if ltotalcomponent_count > 1020:
                                logdebug("doing copying")
                                newblobnames = [
                                    "%s-copy" % blobname
                                    for blobname in blobnames
                                ]
                                for ix, blob in enumerate(blobs):
                                    try:
                                        copyblob(gcsbucket, blob,
                                                 newblobnames[ix])
                                    except Exception:
                                        logexception("deleteblobs(copy)")
                                try:
                                    deleteblobs(gcsbucket, blobs)
                                except Exception:
                                    logexception("deleteblobs(copy)")

                                blobnames = newblobnames
                                blobs = getblobsbyname(gcsbucket, *blobnames)

                            if len(blobs) == 2:
                                llocalfilename = gcstargetfilename if istop else GenerateStableId(
                                    blobnames[0] + blobnames[1])
                                lfilename = "%s/%s-%s" % (gcstargetprefix,
                                                          "composed",
                                                          llocalfilename)
                                retval = composeblobs(gcsbucket, lfilename,
                                                      blobs)
                                retval["count"] = lop.get(
                                    "count", 0) + rop.get("count", 0)
                                try:
                                    deleteblobs(gcsbucket, blobs)
                                except Exception:
                                    logexception("deleteblobs")
                        else:
                            raise Exception("Can't load blobs")
                    else:
                        retval = lop if lop else rop
                    return retval
                except Exception, ex:
                    logexception("higherlevelcompose")
                    raise ex

            onallchildsuccessf = GenerateOnAllChildSuccess(futurekey,
                                                           None,
                                                           higherlevelcompose,
                                                           failonerror=False)

            numfiles = finishindex - startindex

            if numfiles > 32:
                ranges = CalculateFileRanges(startindex, finishindex, 2)
                logdebug("ranges:%s" % ranges)
                for r in ranges:
                    futurename = "split %s" % (r, )
                    future(GCSCombineToTarget,
                           futurename=futurename,
                           onallchildsuccessf=onallchildsuccessf,
                           parentkey=futurekey,
                           weight=r[1] - r[0],
                           **taskkwargs)(r[0], r[1], False)
                raise FutureReadyForResult()
            else:
                lblobs = list(listbucket(
                    gcsbucket, gcssourceprefix))[startindex:finishindex]
                lfilename = "%s/%s" % (gcstargetprefix, gcstargetfilename
                                       if istop else "composed-%s-%s" %
                                       (startindex, finishindex))
                #                 lfilename = "%s/%s-%s-%s" % (gcstargetprefix, "composed", startindex, finishindex)
                retval = composeblobs(gcsbucket, lfilename, lblobs)
                return retval
Ejemplo n.º 24
0
def _fixkeyend(keyrange, kind):
    if keyrange.key_start and not keyrange.key_end:
        endkey = KeyRange.guess_end_key(kind, keyrange.key_start)
        if endkey and endkey > keyrange.key_start:
            logdebug("Fixing end: %s" % endkey)
            keyrange.key_end = endkey
Ejemplo n.º 25
0
 def ProcessPage(keys):
     for index, key in enumerate(keys):
         logdebug("Key #%s: %s" % (index, key))
         InvokeMap(key)
Ejemplo n.º 26
0
                        futureobj5 = futurekey.get()
                        if futureobj5:
                            futureobj5.set_failure(ptf)
                    finally:
                        raise ptf
                else:
                    futureobj6 = futurekey.get()
                    if futureobj6:
                        futureobj6.set_success_and_readyforesult(result)

            try:
                # run the wrapper task, and if it fails due to a name clash just skip it (it was already kicked off by an earlier
                # attempt to construct this future).
                _futurewrapper()
            except taskqueue.TombstonedTaskError:
                logdebug("skip adding task (already been run)")
            except taskqueue.TaskAlreadyExistsError:
                logdebug("skip adding task (already running)")

            return futureobj

        manualRetries = 0
        while True:
            try:
                return runfuturetrans()
            except Timeout, tex:
                if manualRetries < 10:
                    manualRetries += 1
                    time.sleep(manualRetries * 5)
                else:
                    raise tex
Ejemplo n.º 27
0
    def OnAllChildSuccess():
        logdebug("Enter GenerateOnAllChildSuccess: %s" % parentkey)
        parentfuture = parentkey.get() if parentkey else None
        if parentfuture and not parentfuture.has_result():
            if not parentfuture.initialised or not parentfuture.readyforresult:
                raise Exception("Parent not initialised, retry")

            @ndb.transactional()
            def get_children_trans():
                return get_children(parentfuture.key)

            children = get_children_trans()

            logdebug("children: %s" % [child.key for child in children])
            if children:
                result = initialvalue
                error = None
                finished = True
                for childfuture in children:
                    logdebug("childfuture: %s" % childfuture.key)
                    if childfuture.has_result():
                        try:
                            childresult = childfuture.get_result()
                            logdebug("childresult(%s): %s" %
                                     (childfuture.status, childresult))
                            result = combineresultf(result, childresult)
                            logdebug("hasresult:%s" % result)
                        except Exception, ex:
                            logdebug("haserror:%s" % repr(ex))
                            error = ex
                            break
                    else:
                        logdebug("noresult")
                        finished = False

                if error:
                    logwarning(
                        "Internal error, child has error in OnAllChildSuccess: %s"
                        % error)
                    if failonerror:
                        parentfuture.set_failure(error)
                    else:
                        raise error
                elif finished:
                    logdebug("result: %s" % result)
                    parentfuture.set_success(
                        result)  #(result, initialamount, keyrange))
                else:
                    logdebug(
                        "child not finished in OnAllChildSuccess, skipping")
            else:
                logwarning(
                    "Internal error, parent has no children in OnAllChildSuccess"
                )
                parentfuture.set_failure(Exception("no children found"))
Ejemplo n.º 28
0
        def UpdateParent(parentkey):
            logdebug("***************************************************")
            logdebug("Enter UpdateParent: %s" % parentkey)
            logdebug("***************************************************")

            parent = parentkey.get()
            logdebug("1: %s" % parent)
            if parent:
                logdebug("2")
                #                 if not parent.has_result():
                progress = 0
                for childfuture in get_children(parentkey):
                    logdebug("3: %s" % childfuture)
                    progress += childfuture.get_progress()
                logdebug("4: %s" % (progress))
                parent.set_progress(progress)
Ejemplo n.º 29
0
def futureparallel(ffseq,
                   parentkey=None,
                   onsuccessf=None,
                   onfailuref=None,
                   onallchildsuccessf=None,
                   onprogressf=None,
                   weight=None,
                   timeoutsec=1800,
                   maxretries=None,
                   futurenameprefix=None,
                   **taskkwargs):
    logdebug("Enter futureparallel: %s" % len(ffseq))
    flist = list(ffseq)

    taskkwargs[
        "futurename"] = "%s (top level)" % futurenameprefix if futurenameprefix else "parallel"

    @future(parentkey=parentkey,
            onsuccessf=onsuccessf,
            onfailuref=onfailuref,
            onallchildsuccessf=onallchildsuccessf,
            onprogressf=onprogressf,
            weight=weight,
            timeoutsec=timeoutsec,
            maxretries=maxretries,
            **taskkwargs)
    def toplevel(futurekey, *args, **kwargs):
        logdebug("Enter futureparallel.toplevel: %s" % futurekey)

        def OnAllChildSuccess():
            logdebug("Enter OnAllChildSuccess: %s" % futurekey)
            parentfuture = futurekey.get() if futurekey else None
            if parentfuture and not parentfuture.has_result():
                if not parentfuture.initialised or not parentfuture.readyforresult:
                    raise Exception("Parent not initialised, retry")

                @ndb.transactional()
                def get_children_trans():
                    return get_children(parentfuture.key)

                children = get_children_trans()

                logdebug("children: %s" % [child.key for child in children])
                if children:
                    result = []
                    error = None
                    finished = True
                    for childfuture in children:
                        logdebug("childfuture: %s" % childfuture.key)
                        if childfuture.has_result():
                            try:
                                childresult = childfuture.get_result()
                                logdebug("childresult(%s): %s" %
                                         (childfuture.status, childresult))
                                result += [childfuture.get_result()]
                                logdebug("intermediate result:%s" % result)
                            except Exception, ex:
                                logdebug("haserror:%s" % repr(ex))
                                error = ex
                                break
                        else:
                            logdebug("noresult")
                            finished = False

                    if error:
                        logwarning(
                            "Internal error, child has error in OnAllChildSuccess: %s"
                            % error)
                        parentfuture.set_failure(error)
                    elif finished:
                        logdebug("result: %s" % result)
                        parentfuture.set_success(result)
                    else:
                        logdebug(
                            "child not finished in OnAllChildSuccess, skipping"
                        )
                else:
                    logwarning(
                        "Internal error, parent has no children in OnAllChildSuccess"
                    )
                    parentfuture.set_failure(Exception("no children found"))

        for ix, ff in enumerate(flist):
            taskkwargs["futurename"] = "%s [%s]" % (
                futurenameprefix if futurenameprefix else "parallel", ix)
            future(ff,
                   parentkey=futurekey,
                   onallchildsuccessf=OnAllChildSuccess,
                   weight=weight / len(flist) if weight else None,
                   timeoutsec=timeoutsec,
                   maxretries=maxretries,
                   **taskkwargs)()

        logdebug("Leave futureparallel.toplevel: %s" % futurekey)
        raise FutureReadyForResult("parallel started")
Ejemplo n.º 30
0
        def runfuturetrans():
            logdebug("runfuture: parentkey=%s" % parentkey)

            immediateancestorkey = ndb.Key(
                parentkey.kind(), parentkey.id()) if parentkey else None

            taskkwargscopy = dict(taskkwargs)
            if not "name" in taskkwargscopy:
                # can only set transactional if we're not naming the task
                taskkwargscopy["transactional"] = True
                newfutureId = str(uuid.uuid4())  # id doesn't need to be stable
            else:
                # if we're using a named task, we need the key to remain stable in case of transactional retries
                # what can happen is that the task is launched, but the transaction doesn't commit.
                # retries will then always fail to launch the task because it is already launched.
                # therefore retries need to use the same future key id, so that once this transaction does commit,
                # the earlier launch of the task will match up with it.
                taskkwargscopy["transactional"] = False
                newfutureId = GenerateStableId(taskkwargs["name"])

            newkey = ndb.Key(_Future, newfutureId, parent=immediateancestorkey)

            #         logdebug("runfuture: ancestorkey=%s" % immediateancestorkey)
            #         logdebug("runfuture: newkey=%s" % newkey)

            futureobj = _Future(
                key=newkey
            )  # just use immediate ancestor to keep entity groups at local level, not one for the entire tree

            futureobj.parentkey = parentkey  # but keep the real parent key for lookups

            if onsuccessf:
                futureobj.onsuccessfser = cloudpickle.dumps(onsuccessf)
            if onfailuref:
                futureobj.onfailurefser = cloudpickle.dumps(onfailuref)
            if onallchildsuccessf:
                futureobj.onallchildsuccessfser = cloudpickle.dumps(
                    onallchildsuccessf)
            if onprogressf:
                futureobj.onprogressfser = cloudpickle.dumps(onprogressf)
            futureobj.taskkwargsser = cloudpickle.dumps(taskkwargs)

            #         futureobj.onsuccessfser = yccloudpickle.dumps(onsuccessf) if onsuccessf else None
            #         futureobj.onfailurefser = yccloudpickle.dumps(onfailuref) if onfailuref else None
            #         futureobj.onallchildsuccessfser = yccloudpickle.dumps(onallchildsuccessf) if onallchildsuccessf else None
            #         futureobj.onprogressfser = yccloudpickle.dumps(onprogressf) if onprogressf else None
            #         futureobj.taskkwargsser = yccloudpickle.dumps(taskkwargs)

            #         futureobj.set_weight(weight if weight >= 1 else 1)

            futureobj.timeoutsec = timeoutsec

            futureobj.name = futurename

            futureobj.put()
            #         logdebug("runfuture: childkey=%s" % futureobj.key)

            futurekey = futureobj.key
            logdebug("outer, futurekey=%s" % futurekey)

            @task(includeheaders=True, **taskkwargscopy)
            def _futurewrapper(headers):
                if maxretries:
                    lretryCount = 0
                    try:
                        lretryCount = int(
                            headers.get("X-Appengine-Taskretrycount",
                                        0)) if headers else 0
                    except:
                        logexception(
                            "Failed trying to get retry count, using 0")

                    if lretryCount > maxretries:
                        raise PermanentTaskFailure(
                            "Too many retries of Future")

                logdebug("inner, futurekey=%s" % futurekey)
                futureobj2 = futurekey.get()
                if futureobj2:
                    futureobj2.set_weight(weight)  # if weight >= 1 else 1)
                else:
                    raise Exception("Future not ready yet")

                try:
                    logdebug("args, kwargs=%s, %s" % (args, kwargs))
                    result = f(futurekey, *args, **kwargs)

                except FutureReadyForResult:
                    futureobj3 = futurekey.get()
                    if futureobj3:
                        futureobj3.set_readyforesult()

                except FutureNotReadyForResult:
                    futureobj4 = futurekey.get()
                    if futureobj4:
                        futureobj4.set_initialised()

                except PermanentTaskFailure, ptf:
                    try:
                        futureobj5 = futurekey.get()
                        if futureobj5:
                            futureobj5.set_failure(ptf)
                    finally:
                        raise ptf
                else: