Ejemplo n.º 1
0
    def detailAmazon(asin, responseGroup):

        memcacheKey = 'detailAmazon_' + asin + '_' + responseGroup

        # return memcached detail if available
        detail = memcache.get(memcacheKey)

        if detail is not None:
            logging.info('')
            logging.info('-------------- detailAmazon HIT --------------')
            logging.info(memcacheKey)
            logging.info('')
            logging.info('')

            return detail

        # get detail from source
        else:
            amazon = bottlenose.Amazon(Keys.getKey(AMAZON_ACCESS_KEY), Keys.getKey(AMAZON_SECRET_KEY), AMAZON_ASSOCIATE_TAG)
            response = amazon.ItemLookup(ItemId=asin, IdType='ASIN', ResponseGroup=responseGroup)

            logging.info('')
            logging.info('-------------- detailAmazon MISS --------------')
            logging.info(memcacheKey)
            logging.info('')
            logging.info('')

            # cache amazon detail for 1 day
            if not memcache.add(memcacheKey, response, 86400):
                logging.error('detailAmazon: Memcache set failed')
                logging.error(memcacheKey)

            return response
Ejemplo n.º 2
0
    def writeToS3(file, fileType):

        # generate filename
        filename = str(uuid.uuid4()) + '.' + fileType

        # get s3 bucket
        s3conn = S3Connection(Keys.get_key(S3_ACCESS_KEY), Keys.get_key(S3_SECRET_KEY), is_secure=False)
        bucket = s3conn.get_bucket(ASSET_BUCKET, validate=False)

        # create new S3 key
        k = bucket.new_key(filename)

        # set mimetype
        if (fileType == 'jpg'):
            mimeType = 'image/jpeg'
        elif (fileType == 'png'):
            mimeType = 'image/png'
        elif (fileType == 'gif'):
            mimeType = 'image/gif'
        elif (fileType == 'css'):
            mimeType = 'text/css'
        elif (fileType == 'js'):
            mimeType = 'application/javascript'

        k.content_type = mimeType

        # write file from response string set public read permission
        k.set_contents_from_string(file, headers=AWS_HEADERS, replace=True, policy=AWS_ACL)
        k.set_acl('public-read')

        url = S3_URL + ASSET_BUCKET + '/' + filename

        return url
Ejemplo n.º 3
0
def setAPIKey(request):

    if all(k in request.GET for k in ('key_name', 'key_value')):

        # get user parameters
        keyName = request.GET.get('key_name')
        keyValue = request.GET.get('key_value')

    # set key
    Keys.setKey(keyName, keyValue)
Ejemplo n.º 4
0
def parseGTReleasedList(response):

    list = []

    # s3 connection
    s3conn = S3Connection(Keys.getKey(S3_ACCESS_KEY), Keys.getKey(S3_SECRET_KEY), is_secure=False)
    # get s3 bucket
    s3bucket = s3conn.get_bucket(RELEASED_LIST_BUCKET, validate=False)

    html = etree.HTML(response)

    rowSel = CSSSelector('.week li')
    urlSel = CSSSelector('h3 a')
    imageSel = CSSSelector('h3 a img')
    dateSel = CSSSelector('p span')
    platformsSel = CSSSelector('.platforms span')

    for row in rowSel(html):

        try:
            urlElement = urlSel(row)
            imageElement = imageSel(row)
            dateElement = dateSel(row)
            platformsElement = platformsSel(row)

            url = urlElement[0].get('href').strip()
            imageURL = imageElement[0].get('src').strip()
            name = imageElement[0].get('alt').strip()
            date = dateElement[0].text.strip()
            platforms = platformsElement[0].text.strip()

            # update imageURL width
            imageURL = imageURL.split('?')[0] + '?width=120'

            #image = copyImageToS3(image, s3conn)
            if (name != None and name != ''):

                # get filename and extension
                filename = imageURL.split('/')[-1].split('?')[0]
                extension = filename.split('.')[-1].split('?')[0]

                # copy GT image to S3 bucket
                image = copyImageToS3(s3conn, s3bucket, RELEASED_LIST_BUCKET, imageURL, filename, extension)
                listObj = {'name': name, 'GTPage': url, 'calendarDate': date, 'releaseDate': date, 'mediumImage': image, 'platforms': platforms}

                # append to output list
                list.append(listObj)

        except IndexError:
            logging.error('parseGTReleasedList: IndexError')

    # return list
    return list
Ejemplo n.º 5
0
def parseIGNReviewedList(response):

    list = []
    titleIndex = {}

    # s3 connection
    s3conn = S3Connection(Keys.getKey(S3_ACCESS_KEY), Keys.getKey(S3_SECRET_KEY), is_secure=False)
    # get s3 bucket
    s3bucket = s3conn.get_bucket(REVIEWED_LIST_BUCKET, validate=False)

    html = etree.HTML(response)

    rowSel = CSSSelector('.itemList-item')
    nameSel = CSSSelector('.item-title a')
    imageSel = CSSSelector('.grid_3.alpha img')
    dateSel = CSSSelector('.grid_3:nth-child(3) div')
    platformSel = CSSSelector('.item-platform')

    for row in rowSel(html):

        try:
            nameElement = nameSel(row)
            imageElement = imageSel(row)
            dateElement = dateSel(row)
            platformElement = platformSel(row)

            name = nameElement[0].text.strip()
            url = nameElement[0].get('href').strip()
            imageURL = imageElement[0].get('src').strip()
            date = dateElement[0].text.strip()
            displayDate = dateElement[0].text.strip()
            platform = platformElement[0].text.strip()

            # check if title name already added to list
            if (name not in titleIndex):

                # copy IGN image to S3 bucket
                # get filename and extension
                filename = imageURL.split('/')[-1]
                extension = filename.split('.')[-1]
                image = copyImageToS3(s3conn, s3bucket, REVIEWED_LIST_BUCKET, imageURL, filename, extension)

                listObj = {'name': name, 'IGNPage': url, 'calendarDate': displayDate, 'platforms': platform, 'releaseDate': date, 'mediumImage': image}
                list.append(listObj)

                # add to title index
                titleIndex[name] = True

        except IndexError:
            logging.error('parseIGNReviewedList: IndexError')

    # return list
    return list
Ejemplo n.º 6
0
def copyAssetsToS3(s3conn):

    s3conn = S3Connection(Keys.getKey(S3_ACCESS_KEY), Keys.getKey(S3_SECRET_KEY), is_secure=False)

    # assets
    assetList = [

        # sprite
        'http://static.gamedex.net/images/sprites.png',

        # images
        'http://static.gamedex.net/images/bg_tile.png',
        'http://static.gamedex.net/images/bg_tile_light.png',
        'http://static.gamedex.net/images/bg_tile_light2.png',
        'http://static.gamedex.net/images/chosen-sprite.png',
        'http://static.gamedex.net/images/glyphicons-halflings-white.png',
        'http://static.gamedex.net/images/glyphicons-halflings.png',
        'http://static.gamedex.net/images/guide1.png',
        'http://static.gamedex.net/images/guide2.png',
        'http://static.gamedex.net/images/guide3.png',
        'http://static.gamedex.net/images/header_tile.png',
        'http://static.gamedex.net/images/jquery.ui.stars.gif',
        'http://static.gamedex.net/images/loading_bar.gif',
        'http://static.gamedex.net/images/logo.png',
        'http://static.gamedex.net/images/logo_small.png',
        'http://static.gamedex.net/images/no_selection_placeholder.png',
        'http://static.gamedex.net/images/select2.png',
        'http://static.gamedex.net/images/site_description.png',
        'http://static.gamedex.net/images/site_features.png',
        'http://static.gamedex.net/images/site_features_detail.png',
        'http://static.gamedex.net/images/title_bar_center.png',
        'http://static.gamedex.net/images/title_bar_dark_center.png',
        'http://static.gamedex.net/images/title_bar_dark_left.png',
        'http://static.gamedex.net/images/title_bar_dark_right.png',
        'http://static.gamedex.net/images/title_bar_left.png',
        'http://static.gamedex.net/images/title_bar_right.png',
        'http://static.gamedex.net/images/video-js.png',

        # css
        'http://static.gamedex.net/css/bootstrap.css',
        'http://static.gamedex.net/css/gamedex.css',

        # scripts
        'http://static.gamedex.net/dist/scripts.min.js',

    ]

    # iterate urls and copy to s3
    for url in assetList:
        copyUrlToS3(url, s3conn)

    return HttpResponse('done', mimetype='text/html')
Ejemplo n.º 7
0
def parseUpcomingList(response):

    list = []

    # s3 connection
    s3conn = S3Connection(Keys.getKey(S3_ACCESS_KEY), Keys.getKey(S3_SECRET_KEY), is_secure=False)

    html = etree.HTML(response)

    tableSel = CSSSelector('#table-section-index .game-row')
    nameSel = CSSSelector('.title-game a')
    imageSel = CSSSelector('.box-art img')
    dateSel = CSSSelector('td:nth-child(3)')

    for row in tableSel(html):

        try:
            nameElement = nameSel(row)
            imageElement = imageSel(row)
            dateElement = dateSel(row)

            name = nameElement[0].text.strip()
            url = nameElement[0].get('href').strip()
            imageURL = imageElement[0].get('src').strip()
            date = dateElement[0].text.strip()
            displayDate = dateElement[0].text.strip()

            # copy IGN image to S3 bucket
            # get filename and extension
            filename = imageURL.split('/')[-1]
            extension = filename.split('.')[-1]
            image = copyImageToS3(UPCOMING_LIST_BUCKET, imageURL, filename, extension, s3conn)

            # detect Dec 31, 20XX - signifies unknown date > change to TBA 20XX
            dateParts = date.split(',')
            if (dateParts[0] == 'Dec 31'):
                displayDate = 'TBA' + dateParts[1]

            listObj = {'name': name, 'IGNPage': url, 'calendarDate': displayDate, 'releaseDate': date, 'mediumImage': image}
            list.append(listObj)

        except IndexError:
            logging.error('parseIGNUpcomingList: IndexError')

    # return list
    return list
Ejemplo n.º 8
0
    def searchAmazon(keywords, browseNode, responseGroup, searchIndex, page):

        amazon = bottlenose.Amazon(Keys.getKey(AMAZON_ACCESS_KEY), Keys.getKey(AMAZON_SECRET_KEY), AMAZON_ASSOCIATE_TAG)

        # memcache key
        memcacheKey = 'searchAmazon_' + keywords + '_' + browseNode + '_' + responseGroup + '_' + searchIndex + '_' + page

        # return memcached search if available
        search = memcache.get(memcacheKey)

        if search is not None:
            logging.info('')
            logging.info('-------------- searchAmazon CACHE HIT --------------')
            logging.info(memcacheKey)
            logging.info('')
            logging.info('')

            return search

        else:
            # Availability='Available', Condition='All', MerchantId='Amazon', MinimumPrice='800', MaximumPrice='13500'
            if browseNode == '0':
                response = amazon.ItemSearch(SearchIndex=searchIndex, Title=keywords, ResponseGroup=responseGroup, ItemPage=page, Sort='salesrank')
            else:
                response = amazon.ItemSearch(SearchIndex=searchIndex, Title=keywords, ResponseGroup=responseGroup, ItemPage=page, Sort='salesrank', MinimumPrice='800', MaximumPrice='13500', BrowseNode=browseNode)

            logging.info('')
            logging.info('-------------- searchAmazon MISS --------------')
            logging.info(memcacheKey)
            logging.info('')
            logging.info('')

            # cache amazon search for 1 day
            if not memcache.add(memcacheKey, response, 86400):
                logging.error('searchAmazon: Memcache set failed')
                logging.error(memcacheKey)

            return search
Ejemplo n.º 9
0
    def writeToS3(file, fileType):

        # generate filename
        filename = str(uuid.uuid4()) + '.' + fileType

        # get s3 bucket
        s3conn = S3Connection(Keys.get_key(S3_ACCESS_KEY),
                              Keys.get_key(S3_SECRET_KEY),
                              is_secure=False)
        bucket = s3conn.get_bucket(ASSET_BUCKET, validate=False)

        # create new S3 key
        k = bucket.new_key(filename)

        # set mimetype
        if (fileType == 'jpg'):
            mimeType = 'image/jpeg'
        elif (fileType == 'png'):
            mimeType = 'image/png'
        elif (fileType == 'gif'):
            mimeType = 'image/gif'
        elif (fileType == 'css'):
            mimeType = 'text/css'
        elif (fileType == 'js'):
            mimeType = 'application/javascript'

        k.content_type = mimeType

        # write file from response string set public read permission
        k.set_contents_from_string(file,
                                   headers=AWS_HEADERS,
                                   replace=True,
                                   policy=AWS_ACL)
        k.set_acl('public-read')

        url = S3_URL + ASSET_BUCKET + '/' + filename

        return url
Ejemplo n.º 10
0
def giantBombAPICall(resource, queryParameters):

    parameters = urllib.urlencode(queryParameters)
    memcacheKey = 'giantBombAPICall_' + resource + '_' + parameters

    # return memcached detail if available
    giantbombData = memcache.get(memcacheKey)

    if giantbombData is not None:

        logging.info('')
        logging.info('-------------- giantBombAPICall HIT --------------')
        logging.info(memcacheKey)
        logging.info('')
        logging.info('')

        return giantbombData

    else:
        logging.info('')
        logging.info('-------------- giantBombAPICall MISS --------------')
        logging.info(memcacheKey)
        logging.info('')
        logging.info('')

        # http://api.giantbomb.com/search/?api_key=xxxxxxxxxxxxxxxxxxx&format=xml&query=killzone
        api_string = 'http://www.giantbomb.com/api/' + resource + '/?api_key=' + Keys.getKey(GIANTBOMB_API_KEY) + '&format=json&' + urllib.urlencode(queryParameters)

        # fetch - accept gzip (url, payload=None, method=GET, headers={}, allow_truncated=False, follow_redirects=True, deadline=None, validate_certificate=None)
        headers = {'Accept-Encoding': 'gzip'}
        response = urlfetch.fetch(api_string, None, 'GET', headers, False, False, 30)

        # decompress
        f = StringIO.StringIO(response.content)
        c = gzip.GzipFile(fileobj=f)
        content = c.read()

        if response.status_code == 200:

            # cache giantbomb detail for 1 day
            if not memcache.add(memcacheKey, content, 86400):
                logging.error('detailGiantBomb: Memcache set failed')
                logging.error(memcacheKey)

            return content

        else:
            return False
Ejemplo n.º 11
0
def giantBombAPICall(resource, queryParameters):

    parameters = urllib.urlencode(queryParameters)
    memcacheKey = "giantBombAPICall_" + resource + "_" + parameters

    # return memcached detail if available
    giantbombData = memcache.get(memcacheKey)

    if giantbombData is not None:

        logging.info("")
        logging.info("-------------- giantBombAPICall HIT --------------")
        logging.info(memcacheKey)
        logging.info("")
        logging.info("")

        return giantbombData

    else:
        logging.info("")
        logging.info("-------------- giantBombAPICall MISS --------------")
        logging.info(memcacheKey)
        logging.info("")
        logging.info("")

        # http://api.giantbomb.com/search/?api_key=e89927b08203137d0252fbf1f611a38489edb208&format=xml&query=killzone
        api_string = (
            "http://api.giantbomb.com/"
            + resource
            + "/?api_key="
            + Keys.getKey(GIANTBOMB_API_KEY)
            + "&format=json&"
            + urllib.urlencode(queryParameters)
        )
        req = urllib2.Request(api_string, headers={"Accept-Encoding": "gzip"})

        opener = urllib2.build_opener()
        f = opener.open(req)
        jsonResponse = json.load(f)

        # cache giantbomb detail for 1 day
        if not memcache.add(memcacheKey, jsonResponse, 86400):
            logging.error("detailGiantBomb: Memcache set failed")
            logging.error(memcacheKey)

        return jsonResponse
Ejemplo n.º 12
0
def parseIGNUpcomingList(response):

    list = []
    titleIndex = {}

    # s3 connection
    s3conn = S3Connection(Keys.getKey(S3_ACCESS_KEY), Keys.getKey(S3_SECRET_KEY), is_secure=False)
    # get s3 bucket
    s3bucket = s3conn.get_bucket(UPCOMING_LIST_BUCKET, validate=False)

    html = etree.HTML(response)

    rowSel = CSSSelector('.itemList-item')
    nameSel = CSSSelector('.item-title a')
    imageSel = CSSSelector('.grid_3.alpha img')
    dateSel = CSSSelector('.releaseDate')
    platformSel = CSSSelector('.item-platform')

    for row in rowSel(html):

        try:
            nameElement = nameSel(row)
            imageElement = imageSel(row)
            dateElement = dateSel(row)
            platformElement = platformSel(row)

            name = nameElement[0].text.strip()
            url = nameElement[0].get('href').strip()
            imageURL = imageElement[0].get('src').strip()
            date = dateElement[0].text.strip()
            displayDate = dateElement[0].text.strip()
            platform = platformElement[0].text.strip()

            # check if title name already added to list
            if (name not in titleIndex):

                # copy IGN image to S3 bucket
                # get filename and extension
                filename = imageURL.split('/')[-1]
                extension = filename.split('.')[-1]
                image = copyImageToS3(s3conn, s3bucket, UPCOMING_LIST_BUCKET, imageURL, filename, extension)

                # detect TBA 20XX - signifies unknown date > change to real date: Dec 31, 20XX
                dateParts = date.split(' ')
                if (dateParts[0] == 'TBA'):
                    date = 'Dec 31, ' + dateParts[1]

                # detect Q1
                elif (dateParts[0] == 'Q1'):
                    date = 'Mar 31, ' + dateParts[1]
                # detect Q2
                elif (dateParts[0] == 'Q2'):
                    date = 'Jun 31, ' + dateParts[1]
                # detect Q3
                elif (dateParts[0] == 'Q3'):
                    date = 'Sep 31, ' + dateParts[1]
                # detect Q4
                elif (dateParts[0] == 'Q4'):
                    date = 'Dec 31, ' + dateParts[1]

                listObj = {'name': name, 'IGNPage': url, 'calendarDate': displayDate, 'platforms': platform, 'releaseDate': date, 'mediumImage': image}
                list.append(listObj)

                # add to title index
                titleIndex[name] = True

        except IndexError:
            logging.error('parseIGNUpcomingList: IndexError')

    # return list
    return list