Esempio n. 1
0
def serverOnline(request):
    logging.info("Handling request: %s", request.REQUEST.items())
    online = util.requestStringToBool(request, "online")

    if online:
        # We don't care about online notifications -- it should already be
        # online
        logging.info("Ignoring online notification")
        return HttpResponse()
    serverAddress = request.REQUEST.get("serverAddress")

    jsonResponse = {"message": "Update succeeded"}
    setOffline(request, serverAddress, jsonResponse)

    return HttpResponse(json_encode(jsonResponse))
Esempio n. 2
0
def instanceOnline(request):
    # logging.info("Handling request: %s", request.REQUEST.items())
    try:
        idArg = request.REQUEST.get("instanceId")
        if isinstance(idArg, int):
            instanceId = int(idArg)
        else:
            instanceId = int(hashlib.md5(idArg).hexdigest(), 16) % 0x7FFFFFFFFFFFFFFF
        online = util.requestStringToBool(request, "online")
        baseUri = request.REQUEST.get("baseUri")
        serverAddress = request.REQUEST.get("serverAddress")

        instance = models.Instance.gql("where instanceId = :1", instanceId).get()

        if instance is None:
            form = InstanceForm(request.REQUEST)
            if not form.is_valid():
                logging.info("ERROR: Form invalid for online instances:\n%s" % repr(form.errors))
                return HttpResponseBadRequest("ERROR: Form invalid for instances:\n%s" % repr(form.errors))
            instance = form.save(commit=False)

        else:
            instance.baseUri = baseUri
            instance.online = online
            instance.serverAddress = serverAddress

        # We get all the models to update so we can do a bulk put
        pendingData = []
        pendingData.append(instance)

        jsonResponse = {"message": "Update succeeded"}
        updateOnlineInstance(request, instanceId, online, serverAddress, pendingData, jsonResponse)
        db.put(pendingData)

        return HttpResponse(json_encode(jsonResponse))
    except DeadlineExceededError:
        logging.error("Deadline exceeded!!")
        logging.exception("DeadlineExceededError")
        return HttpResponse("DeadlineExceededError")
    except MemoryError:
        logging.error("MemoryError!!")
        logging.exception("MemoryError")
        return HttpResponse("MemoryError")
    except AssertionError:
        logging.error("AssertionError!!")
        logging.exception("AssertionError")
        return HttpResponse("AssertionError")
Esempio n. 3
0
def search(request):
    logging.info('Handling search request: %s', request.REQUEST.items())
    
    keywords = request.REQUEST.get('keywords')
    startPage = int(request.REQUEST.get('startPage'))
    itemsPerPage = int(request.REQUEST.get('itemsPerPage'))
    
    os = request.REQUEST.get('os')
    applications = util.requestStringToBool(request, 'applications')
    audio = util.requestStringToBool(request, 'audio')
    docs = util.requestStringToBool(request, 'documents')
    images = util.requestStringToBool(request, 'images') 
    video = util.requestStringToBool(request, 'video')

    logging.info('About to query')

    limit = itemsPerPage
    offset = int(startPage) * limit
    
    mediaTypes = toMediaTypes(os, applications, audio, docs, images, video) 
    logging.info('Created media types: %s', mediaTypes)
    query =  'where tags in :tags and numOnlineInstances > 0 and takenDown = false'
    #query =  'where tags in :tags and takenDown = false'
    addTypes = len(mediaTypes) > 0
    if addTypes:
        query += " and mediaType in :mediaTypes"
    
    query += ' order by numOnlineInstances desc'
    
    logging.info('Created query: %s', query)
    
    if addTypes:
        logging.info('Querying with types: %s', mediaTypes)
        gqlQuery = models.MetaFile.gql(query, tags=keywords.split(), mediaTypes=mediaTypes)
    else:
        logging.info('Querying with no types')
        gqlQuery = models.MetaFile.gql(query, tags=keywords.split())

    
    logging.info('Querying for limit, offset: %s, %s', limit, offset)
    # Both count and fetch will return a maximum of 1000. 
    metaFiles = gqlQuery.fetch(limit, offset=offset)
    
    # TODO: There's a bug with count and ListProperty, so we just set the
    # total results to the size of the result set we're returning for now.
    #
    # See: http://code.google.com/p/googleappengine/issues/detail?id=586&sort=-id&colspec=ID%20Type%20Status%20Priority%20Stars%20Owner%20Summary
    totalResults = len(metaFiles)#gqlQuery.count();
    logging.info('Got total results: %s', totalResults)
    
    """
    if totalResults == 0:
        allQuery = models.MetaFile.all();
        allMetaFiles = allQuery.fetch(0, 100);
        logging.info('all into: %s', allMetaFiles)
        allPickled = searchPickler.pickle(allMetaFiles, itemsPerPage, startPage, 
                                       totalResults, keywords)
        
        logging.info('all pickled into: %s', allPickled)
    """
    
    #logging.info('About to pickle')
    if metaFiles is not None:
        pickled = searchPickler.pickle(metaFiles, itemsPerPage, startPage, 
                                       totalResults, keywords)
        
        #pickled = searchPickler.pickle([], 0, 0, 0, keywords)
        #logging.info('pickled into: %s', pickled)
        
        return HttpResponse(pickled)

    pickled = searchPickler.pickle([], 0, 0, 0, keywords)
    
    logging.info('No results...')
    return HttpResponse(pickled)