Exemplo n.º 1
0
def querylocks(request):

    try:
        key = request.GET["key"].strip()
    except KeyError:
        # Fail.  Missing required arguments.
        return HttpResponse("0<br>Missing arguments.")

    authquery = Uploader.objects.filter(key=key)

    # Authenticate the uploader.
    try:
        uploader = authquery[0]
    except IndexError:
        # Fail. No auth key match.
        return HttpResponse("0<br>Authentication failed.")
    else:
        uploaderid = uploader.id

    lock_triples = BucketLockManager.query_locks(uploaderid)

    pairs = ""
    numlocks = 0

    for (court, casenum, nonce) in lock_triples:
        pairs += "%s,%s,%s<br>" % (court, casenum, nonce)
        numlocks += 1

    return HttpResponse("%d<br>%s" % (numlocks, pairs))
Exemplo n.º 2
0
def lock(request):

    try:
        key = request.GET["key"].strip()
        court = request.GET["court"].strip()
        casenum = request.GET["casenum"].strip()
        one_per_uploader = 1 if request.GET.get('one_per_uploader') else 0
    except KeyError:
        # Fail.  Missing required arguments.
        return HttpResponse("0<br>Missing arguments.")

    authquery = Uploader.objects.filter(key=key)

    # Authenticate the uploader.
    try:
        uploader = authquery[0]
    except IndexError:
        # Fail. No auth key match.
        return HttpResponse("0<br>Authentication failed.")
    else:
        uploaderid = uploader.id

    # Try to grab the lock.
    lock_nonce, errmsg = BucketLockManager.get_lock(court, casenum,
                                                    uploaderid, one_per_uploader)

    if lock_nonce:
        return HttpResponse("1<br>%s" % lock_nonce)
    else:
        return HttpResponse("0<br>%s" % errmsg)
Exemplo n.º 3
0
def unlock(request):

    try:
        key = request.GET["key"].strip()
        court = request.GET["court"].strip()
        casenum = request.GET["casenum"].strip()
        modified = bool(int(request.GET["modified"]))
        ignore_nonce = bool(int(request.GET["nononce"]))
    except KeyError:
        # Fail.  Missing required arguments.
        return HttpResponse("0<br>Missing arguments.")
    except ValueError:
        return HttpResponse("0<br>Invalid integer boolean for 'modified'.")

    authquery = Uploader.objects.filter(key=key)

    # Authenticate the uploader.
    try:
        uploader = authquery[0]
    except IndexError:
        # Fail. No auth key match.
        return HttpResponse("0<br>Authentication failed.")
    else:
        uploaderid = uploader.id


    dropped, errmsg = BucketLockManager.drop_lock(court, casenum, uploaderid,
                                                  modified=modified,
                                                  ignore_nonce = ignore_nonce)

    if dropped:
        return HttpResponse("1")
    else:
        return HttpResponse("0<br>%s" % errmsg)
Exemplo n.º 4
0
def querylocks(request):
    try:
        key = request.GET["key"].strip()
    except KeyError:
        # Fail.  Missing required arguments.
        return HttpResponse("0<br>Missing arguments.")

    authquery = Uploader.objects.filter(key=key)

    # Authenticate the uploader.
    try:
        uploader = authquery[0]
    except IndexError:
        # Fail. No auth key match.
        return HttpResponse("0<br>Authentication failed.")
    else:
        uploaderid = uploader.id

    lock_triples = BucketLockManager.query_locks(uploaderid)

    pairs = ""
    numlocks = 0
    for (court, casenum, nonce) in lock_triples:
        pairs += "%s,%s,%s<br>" % (court, casenum, nonce)
        numlocks += 1

    return HttpResponse("%d<br>%s" % (numlocks, pairs))
Exemplo n.º 5
0
def unlock(request):
    try:
        key = request.GET["key"].strip()
        court = request.GET["court"].strip()
        casenum = request.GET["casenum"].strip()
        modified = bool(int(request.GET["modified"]))
        ignore_nonce = bool(int(request.GET["nononce"]))
    except KeyError:
        # Fail.  Missing required arguments.
        return HttpResponse("0<br>Missing arguments.")
    except ValueError:
        return HttpResponse("0<br>Invalid integer boolean for 'modified'.")

    authquery = Uploader.objects.filter(key=key)

    # Authenticate the uploader.
    try:
        uploader = authquery[0]
    except IndexError:
        # Fail. No auth key match.
        return HttpResponse("0<br>Authentication failed.")
    else:
        uploaderid = uploader.id

    dropped, errmsg = BucketLockManager.drop_lock(court,
                                                  casenum,
                                                  uploaderid,
                                                  modified=modified,
                                                  ignore_nonce=ignore_nonce)

    if dropped:
        return HttpResponse("1")
    else:
        return HttpResponse("0<br>%s" % errmsg)
Exemplo n.º 6
0
def lock(request):
    try:
        key = request.GET["key"].strip()
        court = request.GET["court"].strip()
        casenum = request.GET["casenum"].strip()
        one_per_uploader = 1 if request.GET.get('one_per_uploader') else 0
    except KeyError:
        # Fail.  Missing required arguments.
        return HttpResponse("0<br>Missing arguments.")

    authquery = Uploader.objects.filter(key=key)

    # Authenticate the uploader.
    try:
        uploader = authquery[0]
    except IndexError:
        # Fail. No auth key match.
        return HttpResponse("0<br>Authentication failed.")
    else:
        uploaderid = uploader.id

    # Try to grab the lock.
    lock_nonce, errmsg = BucketLockManager.get_lock(court, casenum, uploaderid,
                                                    one_per_uploader)

    if not lock_nonce or lock_nonce == 'bigdoc':
        return HttpResponse("0<br>%s" % errmsg)
    else:
        return HttpResponse("1<br>%s" % lock_nonce)
Exemplo n.º 7
0
def _cron_get_updates():
    ''' Async fetch and update after a lock has been unlocked. '''

    # Calculate the TIMEOUT cutoff
    now = datetime.datetime.now()
    timeout_delta = datetime.timedelta(seconds=LOCK_TIMEOUT)
    timeout_cutoff = now - timeout_delta

    # Set both ready and expired locks to the 'processing' state.
    readylocks = BucketLockManager.mark_ready_for_processing(timeout_cutoff)
    expiredlocks = BucketLockManager.mark_expired_for_processing(timeout_cutoff)

    # Then, go through the ready locks one-by-one with HTTP waiting.
    for lock in readylocks:
        _cron_fetch_update(lock)
    for expiredlock in expiredlocks:
        court = unicode(expiredlock.court)
        casenum = unicode(expiredlock.casenum)
        print "  %s.%s lock expired." % (court, casenum)
        _cron_fetch_update(expiredlock)
Exemplo n.º 8
0
def _cron_fetch_update(lock):
    court = unicode(lock.court)
    casenum = unicode(lock.casenum)
    nonce = unicode(lock.nonce)

    docketstring, fetcherror = IADirect.get_docket_string(court, casenum)

    if not docketstring:
        # Couldn't get the docket.  Try again later.

        if nonce:
            BucketLockManager.try_lock_later(lock)
        else:
            lock.delete()
        print "  %s.%s couldn't fetch the docket: %d" % (court, casenum,
                                                         fetcherror)
        return

    ia_docket, message = DocketXML.parse_xml_string(docketstring)

    if not ia_docket:
        # Docket parsing error.

        if nonce:
            BucketLockManager.try_lock_later(lock)
        else:
            lock.delete()
        print "  %s.%s docket parsing error: %s" % (court, casenum, message)
        return
    elif ia_docket.nonce == nonce or not nonce:
        # Got the docket and it is either:
        # 1. up-to-date (nonce match), or
        #  2. expired (ignore nonce)
        # In both scenarios, update the local DB.
        DocumentManager.update_local_db(ia_docket, ignore_available=0)

        print "  %s.%s fetched and DB updated." % (court, casenum)

        ia_docket_orig_hash = hash(pickle.dumps(ia_docket))

        local_docket = DocumentManager.create_docket_from_local_documents(
            court, casenum)

        if local_docket:
            ia_docket.merge_docket(local_docket)

        ia_docket_after_local_merge_hash = hash(pickle.dumps(ia_docket))

        if ia_docket_orig_hash != ia_docket_after_local_merge_hash:
            print " After fetch, some locally stored information was " \
                  "missing from %s.%s. Local info addition scheduled." % (
                      court, casenum)
            UploadHandler.do_me_up(ia_docket)

        # Remove the lock.
        lock.delete()
    else:
        # Got the docket but it is not update to date.  Try again later.
        BucketLockManager.try_lock_later(lock)
        print "  %s.%s fetched, wait more." % (court, casenum)
Exemplo n.º 9
0
def _cron_fetch_update(lock):
    court = unicode(lock.court)
    casenum = unicode(lock.casenum)
    nonce = unicode(lock.nonce)

    docketstring, fetcherror = IADirect.get_docket_string(court, casenum)

    if not docketstring:
        # Couldn't get the docket.  Try again later.

        if nonce:
            BucketLockManager.try_lock_later(lock)
        else:
            lock.delete()
        print "  %s.%s couldn't fetch the docket: %d" % (court, casenum,
                                                         fetcherror)
        return

    ia_docket, message = DocketXML.parse_xml_string(docketstring)

    if not ia_docket:
        # Docket parsing error.

        if nonce:
            BucketLockManager.try_lock_later(lock)
        else:
            lock.delete()
        print "  %s.%s docket parsing error: %s" % (court, casenum,
                                                    message)
        return
    elif ia_docket.nonce == nonce or not nonce:
        # Got the docket and it is either:
        #  1. up-to-date (nonce match), or
        #  2. expired (ignore nonce)
        # In both scenarios, update the local DB.
        DocumentManager.update_local_db(ia_docket, ignore_available=0)

        print "  %s.%s fetched and DB updated." % (court, casenum)

        ia_docket_orig_hash = hash(pickle.dumps(ia_docket))

        local_docket = DocumentManager.create_docket_from_local_documents(court, casenum)

        if local_docket:
            ia_docket.merge_docket(local_docket)

        ia_docket_after_local_merge_hash = hash(pickle.dumps(ia_docket))

        if ia_docket_orig_hash != ia_docket_after_local_merge_hash:
            print " After fetch, some locally stored information was missing from %s.%s. Local info addition scheduled."  % (court, casenum)
            UploadHandler.do_me_up(ia_docket)

        # Remove the lock.
        lock.delete()
    else:
        # Got the docket but it is not update to date.  Try again later.
        BucketLockManager.try_lock_later(lock)
        print "  %s.%s fetched, wait more." % (court, casenum)
Exemplo n.º 10
0
def _cron_get_updates():
    """ Async fetch and update after a lock has been unlocked. """

    # Calculate the TIMEOUT cutoff
    now = datetime.datetime.now()
    timeout_delta = datetime.timedelta(seconds=LOCK_TIMEOUT)
    timeout_cutoff = now - timeout_delta

    # Set both ready and expired locks to the 'processing' state.
    readylocks = BucketLockManager.mark_ready_for_processing(timeout_cutoff)
    # expiredlocks = BucketLockManager.mark_expired_for_processing(timeout_cutoff)

    # Then, go through the ready locks one-by-one with HTTP waiting.
    for lock in readylocks:
        _cron_fetch_update(lock)
Exemplo n.º 11
0
def _cron_get_updates():
    """ Async fetch and update after a lock has been unlocked. """

    # Calculate the TIMEOUT cutoff
    now = datetime.datetime.now()
    timeout_delta = datetime.timedelta(seconds=LOCK_TIMEOUT)
    timeout_cutoff = now - timeout_delta

    # Set both ready and expired locks to the 'processing' state.
    readylocks = BucketLockManager.mark_ready_for_processing(timeout_cutoff)
    # expiredlocks = BucketLockManager.mark_expired_for_processing(timeout_cutoff)

    # Then, go through the ready locks one-by-one with HTTP waiting.
    for lock in readylocks:
        _cron_fetch_update(lock)
Exemplo n.º 12
0
def _cron_put_pickles():

    # Get uploader credentials.
    uploader_query = Uploader.objects.filter(key=AUTH_KEY)
    try:
        RECAP_UPLOADER_ID = uploader_query[0].id
    except IndexError:
        print "  could not find uploader with key=%s" % AUTH_KEY
        return

    # Get all ready pickles
    query = PickledPut.objects.filter(ready=1, processing=0) \
                              .order_by('-filename')

    # Set all ready pickles to the processing state
    #for ppentry in query:
    #    ppentry.processing = 1
    #    ppentry.save()

    # Keep track of court, casenum.  Only lock and unlock once for each case.
    curr_court = None
    curr_casenum = None
    lock_nonce = None

    # Process pickles one at a time.
    for ppentry in query:

        filename = ppentry.filename

        ppmeta = IACommon.get_meta_from_filename(filename)

        court = ppmeta["court"]
        casenum = ppmeta["casenum"]

        # Make sure we have the lock for this case.

        if curr_court == court and curr_casenum == casenum:
            # Same case as the previous ppentry.

            if not lock_nonce:
                # Skip if we don't have the lock already.
#               ppentry.processing = 0
#               ppentry.save()
                continue

            # Otherwise, we already have the lock, so continue.

        else:
            # Switching to a new case.

            # Drop the current lock (from previous case), if necessary.
            if curr_court and curr_casenum:
                dropped, errmsg = BucketLockManager.drop_lock(curr_court,
                                                              curr_casenum,
                                                              RECAP_UPLOADER_ID,
                                                              nolocaldb=1)
                if not dropped:
                    print "  %s.%s someone stole my lock?" % \
                        (court, unicode(casenum))

            # Grab new lock
            curr_court = court
            curr_casenum = casenum


            lock_nonce, errmsg = BucketLockManager.get_lock(court, casenum,
                                                            RECAP_UPLOADER_ID,
                                                            one_per_uploader=1)

            if not lock_nonce:
                print "  Passing on %s.%s: %s" % (court, casenum, errmsg)

                # We don't have a lock, so don't drop the lock in the next loop
                curr_court = None
                curr_casenum = None
                continue

        # We'll always have the lock here.

        # Unpickle the object
        obj, unpickle_msg = unpickle_object(filename)

        # Two cases for the unpickled object: Request or DocketXML
        if obj and ppentry.docket:
            _cron_process_docketXML(obj, ppentry)

        elif obj:
            # Dispatch the PUT request

            _cron_process_PDF(obj, ppentry)

        else:
           # Unpickling failed
           # If unpickling fails, it could mean that another cron job
           # has already finished this PP - not sure how to distinguish this
            print "  %s %s (Another cron job completed?)" % (filename, unpickle_msg)

            # Delete the entry from the DB
            ppentry.delete()
            # Delete the pickle file
            delete_pickle(filename)

    # Drop last lock
    if curr_court and curr_casenum:
        dropped, errmsg = BucketLockManager.drop_lock(curr_court, curr_casenum,
                                                      RECAP_UPLOADER_ID,
                                                      nolocaldb=1)
        if not dropped:
            print "  %s.%s someone stole my lock??" % (court, unicode(casenum))
Exemplo n.º 13
0
def do_me_up(docket):
    ''' Download, merge and update the docket with IA. '''
    # Pickle this object for do_me_up by the cron process.

    court = docket.get_court()
    casenum = docket.get_casenum()

    docketname = IACommon.get_docketxml_name(court, casenum)

    # Check if this docket is already scheduled to be processed.
    query = PickledPut.objects.filter(filename=docketname)

    try:
        ppentry = query[0]
    except IndexError:
        # Not already scheduled, so schedule it now.
        ppentry = PickledPut(filename=docketname, docket=1)

        try:
            ppentry.save()
        except IntegrityError:
            # Try again.
            do_me_up(docket)
        else:
            # Pickle this object.
            pickle_success, msg = IA.pickle_object(docket, docketname)

            if pickle_success:
                # Ready for processing.
                ppentry.ready = 1
                ppentry.save()

                logging.info("do_me_up: ready. %s" % (docketname))
            else:
                # Pickle failed, remove from DB.
                ppentry.delete()
                logging.error("do_me_up: %s %s" % (msg, docketname))

    else:
        # Already scheduled.
        # If there is a lock for this case, it's being uploaded. Don't merge now
        locked = BucketLockManager.lock_exists(court, casenum)
        if ppentry.ready and not locked:
            # Docket is waiting to be processed by cron job.

            # Revert state back to 'not ready' so we can do local merge.
            ppentry.ready = 0
            ppentry.save()

            # Fetch and unpickle the waiting docket.
            prev_docket, unpickle_msg = IA.unpickle_object(docketname)

            if prev_docket:

                # Do the local merge.
                prev_docket.merge_docket(docket)

                # Pickle it back
                pickle_success, pickle_msg = \
                    IA.pickle_object(prev_docket, docketname)

                if pickle_success:
                    # Merged and ready.
                    ppentry.ready = 1
                    ppentry.save()
                    logging.info("do_me_up: merged and ready. %s" %(docketname))
                else:
                    # Re-pickle failed, delete.
                    ppentry.delete()
                    logging.error("do_me_up: re-%s %s" % (pickle_msg,
                                                          docketname))

            else:
                # Unpickle failed
                ppentry.delete()
                IA.delete_pickle(docketname)
                logging.error("do_me_up: %s %s" % (unpickle_msg, docketname))


        # Ignore if in any of the other three possible state...
        #   because another cron job is already doing work on this entity
        # Don't delete DB entry or pickle file.
        elif ppentry.ready and locked:
            pass
            #logging.debug("do_me_up: %s discarded, processing conflict." %
            #              (docketname))
        elif not ppentry.ready and not locked:
            pass
            #logging.debug("do_me_up: %s discarded, preparation conflict." %
            #              (docketname))
        else:
            logging.error("do_me_up: %s discarded, inconsistent state." %
                          (docketname))
Exemplo n.º 14
0
def _cron_put_pickles():
    # Get uploader credentials.
    uploader_query = Uploader.objects.filter(key=AUTH_KEY)
    try:
        RECAP_UPLOADER_ID = uploader_query[0].id
    except IndexError:
        print "  could not find uploader with key=%s" % AUTH_KEY
        return

    # Get all ready pickles
    query = PickledPut.objects.filter(ready=1, processing=0) \
        .order_by('-filename')

    # Set all ready pickles to the processing state
    # for ppentry in query:
    #    ppentry.processing = 1
    #    ppentry.save()

    # Keep track of court, casenum.  Only lock and unlock once for each case.
    curr_court = None
    curr_casenum = None
    lock_nonce = None

    # Process pickles one at a time.
    for ppentry in query:

        filename = ppentry.filename

        ppmeta = IACommon.get_meta_from_filename(filename)

        court = ppmeta["court"]
        casenum = ppmeta["casenum"]

        # Make sure we have the lock for this case.

        if curr_court == court and curr_casenum == casenum:
            # Same case as the previous ppentry.

            if not lock_nonce:
                # Skip if we don't have the lock already.
                #               ppentry.processing = 0
                #               ppentry.save()
                continue

                # Otherwise, we already have the lock, so continue.

        else:
            # Switching to a new case.

            # Drop the current lock (from previous case), if necessary.
            if curr_court and curr_casenum:
                dropped, errmsg = BucketLockManager.drop_lock(
                    curr_court, curr_casenum, RECAP_UPLOADER_ID, nolocaldb=1)
                if not dropped:
                    print "  %s.%s someone stole my lock?" % \
                          (court, unicode(casenum))

            # Grab new lock
            curr_court = court
            curr_casenum = casenum

            lock_nonce, errmsg = BucketLockManager.get_lock(court,
                                                            casenum,
                                                            RECAP_UPLOADER_ID,
                                                            one_per_uploader=1)
            if not lock_nonce:
                print "  Passing on %s.%s: %s" % (court, casenum, errmsg)

            if not lock_nonce or lock_nonce == 'bigdoc':
                # We don't have a lock, so don't drop the lock in the next loop
                curr_court = None
                curr_casenum = None
                continue

        # We'll always have the lock here.

        # Unpickle the object
        obj, unpickle_msg = unpickle_object(filename)

        # Two cases for the unpickled object: Request or DocketXML
        if obj and ppentry.docket:
            print "Processing docket: %s" % filename
            _cron_process_docketXML(obj, ppentry)

        elif obj:
            # Dispatch the PUT request

            _cron_process_PDF(obj, ppentry)

        else:
            # Unpickling failed
            # If unpickling fails, it could mean that another cron job
            # has already finished this PP - not sure how to distinguish this
            print "  %s %s (Another cron job completed?)" % (filename,
                                                             unpickle_msg)

            # Delete the entry from the DB
            ppentry.delete()
            # Delete the pickle file
            delete_pickle(filename)

    # Drop last lock
    if curr_court and curr_casenum:
        dropped, errmsg = BucketLockManager.drop_lock(curr_court,
                                                      curr_casenum,
                                                      RECAP_UPLOADER_ID,
                                                      nolocaldb=1)
        if not dropped:
            print "  %s.%s someone stole my lock??" % (court, unicode(casenum))
Exemplo n.º 15
0
def do_me_up(docket):
    """ Download, merge and update the docket with IA. """
    # Pickle this object for do_me_up by the cron process.

    court = docket.get_court()
    casenum = docket.get_casenum()

    docketname = IACommon.get_docketxml_name(court, casenum)

    # Check if this docket is already scheduled to be processed.
    query = PickledPut.objects.filter(filename=docketname)

    try:
        ppentry = query[0]
    except IndexError:
        # Not already scheduled, so schedule it now.
        ppentry = PickledPut(filename=docketname, docket=1)

        try:
            ppentry.save()
        except IntegrityError:
            # Try again.
            do_me_up(docket)
        else:
            # Pickle this object.
            pickle_success, msg = IA.pickle_object(docket, docketname)

            if pickle_success:
                # Ready for processing.
                ppentry.ready = 1
                ppentry.save()

                logging.info("do_me_up: ready. %s" % (docketname))
            else:
                # Pickle failed, remove from DB.
                ppentry.delete()
                logging.error("do_me_up: %s %s" % (msg, docketname))

    else:
        # Already scheduled.
        # If there is a lock for this case, it's being uploaded. Don't merge now
        locked = BucketLockManager.lock_exists(court, casenum)
        if ppentry.ready and not locked:
            # Docket is waiting to be processed by cron job.

            # Revert state back to 'not ready' so we can do local merge.
            ppentry.ready = 0
            ppentry.save()

            # Fetch and unpickle the waiting docket.
            prev_docket, unpickle_msg = IA.unpickle_object(docketname)

            if prev_docket:

                # Do the local merge.
                prev_docket.merge_docket(docket)

                # Pickle it back
                pickle_success, pickle_msg = \
                    IA.pickle_object(prev_docket, docketname)

                if pickle_success:
                    # Merged and ready.
                    ppentry.ready = 1
                    ppentry.save()
                    logging.info(
                        "do_me_up: merged and ready. %s" % (docketname))
                else:
                    # Re-pickle failed, delete.
                    ppentry.delete()
                    logging.error("do_me_up: re-%s %s" % (pickle_msg,
                                                          docketname))

            else:
                # Unpickle failed
                ppentry.delete()
                IA.delete_pickle(docketname)
                logging.error("do_me_up: %s %s" % (unpickle_msg, docketname))


        # Ignore if in any of the other three possible state...
        # because another cron job is already doing work on this entity
        # Don't delete DB entry or pickle file.
        elif ppentry.ready and locked:
            pass
            #logging.debug("do_me_up: %s discarded, processing conflict." %
            #              (docketname))
        elif not ppentry.ready and not locked:
            pass
            #logging.debug("do_me_up: %s discarded, preparation conflict." %
            #              (docketname))
        else:
            logging.error("do_me_up: %s discarded, inconsistent state." %
                          (docketname))