Exemple #1
0
def base_start(request):
    session = safe_get_session(request)
    data = get_data(request)

    dump_io_to_file(session, "start", request)

    with get_collection(session) as col:
        col_handler = SyncCollectionHandler(col)

        # FIXME: for the moment we are just ignoring the 'offset' parameter, hoping it will just work if
        # we only use the V1 scheduler
        output = col_handler.start(min_usn=data.get("minUsn"),
                                   lnewer=data.get("lnewer"),
                                   offset=data.get("offset"))

        ## The following gets values that are required for subsequent calls in a sync flow
        ## as a side-effect of the start. This obviously needs to be completely rethought!!!
        ##
        ## We also need the dump_base in the media so we set for a first time in meta, as start isn't
        ## called if there is no main db sync needed
        ##
        ## FIXME: this needs serious testing for at least:
        ## - failure and then start again with a new sync
        session["min_usn"] = col_handler.min_usn
        session["max_usn"] = col_handler.max_usn
        session["lnewer"] = col_handler.lnewer
        session.save()

        resp = JsonResponse(output)

    dump_io_to_file(session, "start", resp)

    return resp
Exemple #2
0
def base_finish(request):
    session = safe_get_session(request)

    dump_io_to_file(session, "finish", request)

    with get_collection(session) as col:
        col_handler = SyncCollectionHandler(col, session=session)

        resp = HttpResponse(col_handler.finish())
    dump_io_to_file(session, "finish", resp)

    return resp
Exemple #3
0
def base_sanityCheck2(request):
    session = safe_get_session(request)
    data = get_data(request)

    dump_io_to_file(session, "sanityCheck2", request)

    with get_collection(session) as col:
        col_handler = SyncCollectionHandler(col, session=session)
        output = col_handler.sanityCheck2(client=data.get("client"))
        resp = JsonResponse(output)
    dump_io_to_file(session, "sanityCheck2", resp)

    return resp
Exemple #4
0
def base_applyGraves(request):
    session = safe_get_session(request)
    data = get_data(request)

    dump_io_to_file(session, "applyGraves", request)

    with get_collection(session) as col:
        col_handler = SyncCollectionHandler(col, session=session)
        col_handler.applyGraves(chunk=data.get("chunk"))

        resp = JsonResponse({})
    dump_io_to_file(session, "applyGraves", resp)

    return resp
Exemple #5
0
def base_applyChanges(request):
    session = safe_get_session(request)
    data = get_data(request)

    dump_io_to_file(session, "applyChanges", request)

    with get_collection(session) as col:
        col_handler = SyncCollectionHandler(col, session=session)

        ## tests for doing it by chunking rather than all in one go
        # output = col_handler.applyChanges(changes=data.get("changes"))
        # session["tablesLeft"] = col_handler.tablesLeft
        # cache.set(session.skey, session)

        output = col_handler.applyChanges(changes=data.get("changes"))
        resp = JsonResponse(output)

    dump_io_to_file(session, "applyChanges", resp)
    return resp
Exemple #6
0
def base_chunk(request):
    session = safe_get_session(
        request)  # performs auth that raises an error if not auth'ed

    dump_io_to_file(session, "chunk", request)

    with get_collection(session) as col:
        col_handler = SyncCollectionHandler(col, session=session)

        ## FIXME: this is where  the chunking needs to happen. The original call was to col_handler.chunk()
        ## which is from a persistent thread which has a database cursor that gets called with chunks of up
        ## to 250 lines, and it can be called as many times as required until there are none left. This is
        ## obviously very stupid in our context. The all_data_to_sync_down updates the db to say that everything
        ## has already been transferred, though in theory the same logic could be used as the cursor but using
        ## a cache value (we can cache the rows returned but not the cursor)

        all_new_data = col_handler.all_data_to_sync_down()

        resp = JsonResponse(all_new_data, safe=False)

    dump_io_to_file(session, "chunk", resp)

    return resp