Exemple #1
0
def addClassicData(request):
    results = request.GET.get('records', None)
    start = request.GET.get('start', None)
    end = request.GET.get('end', None)

    if not results and not start:
        results = 8000
        start = ""

    ch = Channel.objects.all()

    for c in ch:
        getFeedData(c.data_id, start, results)
        storeAggregatedData(c.id, start, end)

    return HttpResponse("Done")
Exemple #2
0
def addClassicData(request):
    results = request.GET.get('records', None)
    start = request.GET.get('start', None)
    end = request.GET.get('end', None)

    if not results and not start:
        results = 8000
        start = ""

    ch = Channel.objects.all()

    for c in ch:
        getFeedData(c.data_id, start, results)
        storeAggregatedData(c.id, start, end)

    return HttpResponse("Done")
Exemple #3
0
def storecalculatedData():
    # Implementing locking of tasks
    text = "creating aggregated data"
    task_hexdigest = md5(text).hexdigest()
    lock_id = "{0}-lock-{1}".format("storecalculatedData", task_hexdigest)

    # cache.add fails if the key already exists
    acquire_lock = lambda: cache.add(lock_id, "true", LOCK_EXPIRE)

    # memcache delete is very slow, but we have to use it to take
    # advantage of using add() for atomic locking
    release_lock = lambda: cache.delete(lock_id)
    if acquire_lock():
        try:
            logger.info("Start storage of data")
            return storeAggregatedData()
            logger.info("Storage  finished")
        finally:
            release_lock()
        return

    logger.debug("Aggregate data %s is already being calculated by another worker")
Exemple #4
0
def storecalculatedData():
    #Implementing locking of tasks
    text = 'creating aggregated data'
    task_hexdigest = md5(text).hexdigest()
    lock_id = '{0}-lock-{1}'.format('storecalculatedData', task_hexdigest)

    # cache.add fails if the key already exists
    acquire_lock = lambda: cache.add(lock_id, 'true', LOCK_EXPIRE)

    # memcache delete is very slow, but we have to use it to take
    # advantage of using add() for atomic locking
    release_lock = lambda: cache.delete(lock_id)
    if acquire_lock():
        try:
            logger.info("Start storage of data")
            return storeAggregatedData()
            logger.info("Storage  finished")
        finally:
            release_lock()
        return

    logger.debug(
        'Aggregate data %s is already being calculated by another worker')