Beispiel #1
0
def _post_thumbnails(blogitem):
    blogfiles = BlogFile.objects.filter(blogitem=blogitem).order_by("add_date")

    images = []

    for blogfile in blogfiles:
        if not os.path.isfile(blogfile.file.path):
            continue
        full_im = thumbnail(blogfile.file, "2000x2000", upscale=False, quality=100)
        full_url = full_im.url
        image = {"full_url": full_url, "full_size": full_im.size}
        formats = (
            ("small", "120x120"),
            ("big", "230x230"),
            ("bigger", "370x370"),  # iPhone 6 is 375
        )
        for key, geometry in formats:
            im = thumbnail(blogfile.file, geometry, quality=81)
            url_ = im.url
            image[key] = {
                "url": url_,
                "alt": getattr(blogfile, "title", blogitem.title),
                "width": im.width,
                "height": im.height,
            }
        images.append(image)
    return images
Beispiel #2
0
def _post_thumbnails(blogitem):
    blogfiles = BlogFile.objects.filter(blogitem=blogitem).order_by("add_date")

    images = []

    for blogfile in blogfiles:
        if not os.path.isfile(blogfile.file.path):
            continue
        full_im = thumbnail(blogfile.file,
                            "1000x1000",
                            upscale=False,
                            quality=100)
        full_url = full_im.url
        image = {"full_url": full_url, "full_size": full_im.size}
        formats = (
            ("small", "120x120"),
            ("big", "230x230"),
            ("bigger", "370x370"),  # iPhone 6 is 375
        )
        for key, geometry in formats:
            im = thumbnail(blogfile.file, geometry, quality=81)
            url_ = im.url
            image[key] = {
                "url": url_,
                "alt": getattr(blogfile, "title", blogitem.title),
                "width": im.width,
                "height": im.height,
            }
        images.append(image)
    return images
Beispiel #3
0
def podcast(request, id, slug=None):
    podcast = get_object_or_404(Podcast, id=id)
    context = {}
    context['podcast'] = podcast
    context['page_title'] = podcast.name
    episodes = Episode.objects.filter(
        podcast=podcast
    ).order_by('-published')
    if podcast.image and is_html_document(podcast.image.path):
        print "Found a podcast.image that wasn't an image"
        podcast.image = None
        podcast.save()
        redownload_podcast_image.delay(podcast.id)
    elif not podcast.image and podcast.image_url:
        redownload_podcast_image.delay(podcast.id)

    if podcast.itunes_lookup is None:
        fetch_itunes_lookup.delay(podcast.id)

    if not episodes.exists():
        download_episodes_task.delay(podcast.id)

    context['episodes'] = episodes
    try:
        context['thumb'] = thumbnail(podcast.image, '300x300')
    except IOError:
        # image is so busted it can't be turned into a thumbnail
        podcast.image = None
        podcast.save()
        context['thumb'] = None
        redownload_podcast_image.delay(podcast.id)
    return render(request, 'podcasttime/podcast.html', context)
Beispiel #4
0
def redownload_podcast_image(podcast_id):
    podcast = Podcast.objects.get(id=podcast_id)
    try:
        podcast.download_image()
        # If it worked, it should be possible to make a thumbnail out of
        # if. I've seen downloaded images with the right content-type,
        # and with a size but when you try to turn it into a thumbnail
        # PIL throws IOErrors.
        assert podcast.image
        try:
            thumbnail(podcast.image, "300x300")
            print("Worked!")
        except IOError:
            print("Not a valid image if thumbnails can't be made")
            podcast.image = None
            podcast.save()
    except Exception:
        print("Failed!")
        PodcastError.create(podcast)
        raise
Beispiel #5
0
def redownload_podcast_image(podcast_id):
    podcast = Podcast.objects.get(id=podcast_id)
    try:
        podcast.download_image()
        # If it worked, it should be possible to make a thumbnail out of
        # if. I've seen downloaded images with the right content-type,
        # and with a size but when you try to turn it into a thumbnail
        # PIL throws IOErrors.
        assert podcast.image
        try:
            thumbnail(podcast.image, "300x300")
            print("Worked!")
        except IOError:
            print("Not a valid image if thumbnails can't be made")
            podcast.image = None
            podcast.save()
    except Exception:
        print("Failed!")
        PodcastError.create(podcast)
        raise
Beispiel #6
0
def post_thumbnails(request, oid):
    blogitem = get_object_or_404(BlogItem, oid=oid)
    blogfiles = (
        BlogFile.objects
        .filter(blogitem=blogitem)
        .order_by('add_date')
    )

    images = []

    for blogfile in blogfiles:
        full_im = thumbnail(
            blogfile.file,
            '1000x1000',
            upscale=False,
            quality=100
        )
        full_url = full_im.url
        delete_url = reverse('delete_post_thumbnail', args=(blogfile.pk,))
        image = {
            'full_url': full_url,
            'delete_url': delete_url,
        }
        for key, geometry in (('small', '120x120'), ('big', '230x230')):
            im = thumbnail(
                blogfile.file,
                geometry,
                quality=81
            )
            url_ = im.url
            image[key] = {
                'url': url_,
                'alt': getattr(blogfile, 'title', blogitem.title),
                'width': im.width,
                'height': im.height,
            }
        images.append(image)
    return http.JsonResponse({'images': images})
Beispiel #7
0
def picks_data(request):
    context = {}
    qs = Picked.objects.all().order_by('-modified')

    paginator = Paginator(qs, 5)  # XXX make this something bigger like 15
    page = request.GET.get('page')
    try:
        paged = paginator.page(page)
    except PageNotAnInteger:
        # If page is not an integer, deliver first page.
        paged = paginator.page(1)
    except EmptyPage:
        # If page is out of range (e.g. 9999), deliver last page of results.
        paged = paginator.page(paginator.num_pages)

    items = []
    # XXX ALL of this needs to be optimized
    for pick in paged:
        podcasts = []
        for podcast in pick.podcasts.all().order_by('-times_picked'):
            podcasts.append({
                'name': podcast.name,
                'image': (
                    podcast.image and
                    thumbnail(podcast.image, '300x300').url or
                    None
                ),
                'times_picked': podcast.times_picked,
                'id': podcast.id,
                'slug': podcast.get_or_create_slug(),
            })
        items.append({'podcasts': podcasts, 'id': pick.id})
    context['items'] = items

    pagination = {
        'has_previous': paged.has_previous(),
        'has_next': paged.has_next(),
        'number': paged.number,
        'num_pages': paginator.num_pages,
    }
    if pagination['has_previous']:
        pagination['previous_page_number'] = paged.previous_page_number()
    if pagination['has_next']:
        pagination['next_page_number'] = paged.next_page_number()
    context['pagination'] = pagination

    return http.JsonResponse(context)
Beispiel #8
0
def _render_blog_post(request, oid, screenshot_mode=False):
    if oid.endswith("/"):
        oid = oid[:-1]
    try:
        post = BlogItem.objects.get(oid=oid)
    except BlogItem.DoesNotExist:
        try:
            post = BlogItem.objects.get(oid__iexact=oid)
        except BlogItem.DoesNotExist:
            if oid == "add":
                return redirect(reverse("add_post"))
            raise http.Http404(oid)

    # If you try to view a blog post that is beyond one day in the
    # the future it should raise a 404 error.
    future = timezone.now() + datetime.timedelta(days=1)
    if post.pub_date > future:
        raise http.Http404("not published yet")

    # Reasons for not being here
    if request.method == "HEAD":
        return http.HttpResponse("")
    elif request.method == "GET" and (
        request.GET.get("replypath") or request.GET.get("show-comments")
    ):
        return http.HttpResponsePermanentRedirect(request.path)

    # attach a field called `_absolute_url` which depends on the request
    base_url = "https://" if request.is_secure() else "http://"
    base_url += RequestSite(request).domain
    post._absolute_url = base_url + reverse("blog_post", args=(post.oid,))

    context = {"post": post, "screenshot_mode": screenshot_mode}
    if request.path != "/plog/blogitem-040601-1":
        try:
            context["previous_post"] = post.get_previous_by_pub_date()
        except BlogItem.DoesNotExist:
            context["previous_post"] = None
        try:
            context["next_post"] = post.get_next_by_pub_date(pub_date__lt=utc_now())
        except BlogItem.DoesNotExist:
            context["next_post"] = None

    if post.screenshot_image:
        context["screenshot_image"] = thumbnail(
            post.screenshot_image, "1280x1000", quality=90
        ).url
        if context["screenshot_image"].startswith("//"):
            # facebook is not going to like that
            context["screenshot_image"] = "https:" + context["screenshot_image"]
    else:
        context["screenshot_image"] = None

    # Cheat a little and make the open graph image absolute if need be.
    if post.open_graph_image and "://" not in post.open_graph_image:
        post.open_graph_image = request.build_absolute_uri(post.open_graph_image)

    comments = (
        BlogComment.objects.filter(blogitem=post, approved=True)
        .order_by("add_date")
        .only(
            "oid",
            "blogitem_id",
            "parent_id",
            "approved",
            "comment_rendered",
            "add_date",
            "name",
        )
    )
    comments_truncated = False
    count_comments = post.count_comments()
    if request.GET.get("comments") != "all":
        slice_m, slice_n = (
            max(0, count_comments - settings.MAX_RECENT_COMMENTS),
            count_comments,
        )
        if count_comments > settings.MAX_RECENT_COMMENTS:
            comments_truncated = settings.MAX_RECENT_COMMENTS
        comments = comments = comments[slice_m:slice_n]

    all_comments = defaultdict(list)
    for comment in comments:
        all_comments[comment.parent_id].append(comment)

    # print(all_comments.keys())

    context["comments_truncated"] = comments_truncated
    context["count_comments"] = count_comments
    context["all_comments"] = all_comments
    if request.path != "/plog/blogitem-040601-1":
        context["related_by_keyword"] = get_related_posts_by_keyword(post, limit=5)
        # context["related_by_text"] = get_related_posts_by_text(post, limit=5)
        context["show_buttons"] = not screenshot_mode
    context["show_carbon_ad"] = not screenshot_mode
    context["home_url"] = request.build_absolute_uri("/")
    context["page_title"] = post.title
    context["pub_date_years"] = THIS_YEAR - post.pub_date.year
    return render(request, "plog/post.html", context)
Beispiel #9
0
def _render_blog_post(request, oid, page=None, screenshot_mode=False):
    if oid.endswith("/"):
        oid = oid[:-1]
    try:
        post = BlogItem.objects.get(oid=oid)
    except BlogItem.DoesNotExist:
        try:
            post = BlogItem.objects.get(oid__iexact=oid)
        except BlogItem.DoesNotExist:
            if oid == "add":
                return redirect(reverse("add_post"))
            raise http.Http404(oid)

    # If you try to view a blog post that is beyond 10 days in the
    # the future it should raise a 404 error.
    future = timezone.now() + datetime.timedelta(days=10)
    if post.pub_date > future:
        raise http.Http404("not published yet")

    if page is None:
        page = 1
    else:
        page = int(page)
        if page == 1:
            return redirect("blog_post", oid)

    if page > settings.MAX_BLOGCOMMENT_PAGES:
        raise http.Http404("Gone too far")

    # Reasons for not being here
    if request.method == "HEAD":
        return http.HttpResponse("")
    elif request.method == "GET" and (request.GET.get("replypath")
                                      or request.GET.get("show-comments")):
        return http.HttpResponsePermanentRedirect(request.path)

    # attach a field called `_absolute_url` which depends on the request
    base_url = get_base_url(request)
    post._absolute_url = base_url + reverse("blog_post", args=(post.oid, ))

    context = {"post": post, "screenshot_mode": screenshot_mode}
    if "/plog/blogitem-040601-1" not in request.path:
        try:
            context["previous_post"] = post.get_previous_by_pub_date()
        except BlogItem.DoesNotExist:
            context["previous_post"] = None
        try:
            context["next_post"] = post.get_next_by_pub_date(
                pub_date__lt=timezone.now())
        except BlogItem.DoesNotExist:
            context["next_post"] = None

    if post.screenshot_image:
        context["screenshot_image"] = thumbnail(post.screenshot_image,
                                                "1280x1000",
                                                quality=90).url
        if context["screenshot_image"].startswith("//"):
            # facebook is not going to like that
            context[
                "screenshot_image"] = "https:" + context["screenshot_image"]
    else:
        context["screenshot_image"] = None

    # Cheat a little and make the open graph image absolute if need be.
    if post.open_graph_image and "://" not in post.open_graph_image:
        post.open_graph_image = request.build_absolute_uri(
            post.open_graph_image)

    blogcomments = BlogComment.objects.filter(blogitem=post, approved=True)

    only = (
        "oid",
        "blogitem_id",
        "parent_id",
        "approved",
        "comment_rendered",
        "add_date",
        "name",
    )
    root_comments = (blogcomments.filter(
        parent__isnull=True).order_by("add_date").only(*only))

    replies = blogcomments.filter(
        parent__isnull=False).order_by("add_date").only(*only)

    count_comments = blogcomments.count()

    root_comments_count = root_comments.count()

    if page > 1:
        if (page - 1) * settings.MAX_RECENT_COMMENTS > root_comments_count:
            raise http.Http404("Gone too far")

    slice_m, slice_n = get_blogcomment_slice(root_comments_count, page)
    root_comments = root_comments[slice_m:slice_n]

    comments_truncated = False
    if root_comments_count > settings.MAX_RECENT_COMMENTS:
        comments_truncated = settings.MAX_RECENT_COMMENTS

    all_comments = defaultdict(list)
    for comment in root_comments:
        all_comments[comment.parent_id].append(comment)

    for comment in replies:
        all_comments[comment.parent_id].append(comment)

    context["comments_truncated"] = comments_truncated
    context["count_comments"] = count_comments
    context["all_comments"] = all_comments
    if "/plog/blogitem-040601-1" not in request.path:
        context["related_by_keyword"] = get_related_posts_by_keyword(post,
                                                                     limit=5)
        context["show_buttons"] = not screenshot_mode
    context["show_carbon_ad"] = not screenshot_mode
    # context["show_carbon_ad"] = 0
    # context["show_carbon_native_ad"] = context["show_carbon_ad"]
    # Disabled as of Aug 2019 because the $$$ profit was too small and not
    # worth the web perf "drag" that it costs.
    context["show_carbon_native_ad"] = False
    context["home_url"] = request.build_absolute_uri("/")
    context["page_title"] = post.title
    context["pub_date_years"] = THIS_YEAR - post.pub_date.year
    context["page"] = page
    if page < settings.MAX_BLOGCOMMENT_PAGES:
        # But is there even a next page?!
        if page * settings.MAX_RECENT_COMMENTS < root_comments_count:
            context["paginate_uri_next"] = reverse("blog_post",
                                                   args=(post.oid, page + 1))

    if page > 1:
        context["paginate_uri_previous"] = reverse("blog_post",
                                                   args=(post.oid, page - 1))

    # The `post.open_graph_image` is a string. It looks something like this:
    # '/cache/1e/a7/1ea7b1a42e9161.png' and it would get rendered
    # into the template like this:
    #    <meta property="og:image" content="/cache/1e/a7/1ea7b1a42e9161.png">
    # But post-processing will make this an absolute URL. And that might
    # not pick up the smarts that `get_base_url(request)` can do so
    # turn this into a control template context variable.
    absolute_open_graph_image = None
    if post.open_graph_image:
        absolute_open_graph_image = base_url + urlparse(
            post.open_graph_image).path
    context["absolute_open_graph_image"] = absolute_open_graph_image

    context["not_published_yet"] = post.pub_date > timezone.now()

    response = render(request, "plog/post.html", context)
    response["x-server"] = "django"
    # If it hasn't been published yet, don't cache-control it.
    if context["not_published_yet"]:
        add_never_cache_headers(response)
    return response
Beispiel #10
0
 def get_thumbnail(self, *args, **kwargs):
     assert self.image, "podcast must have an image"
     return thumbnail(self.image, *args, **kwargs)
Beispiel #11
0
 def get_thumbnail(self, *args, **kwargs):
     assert self.image, "podcast must have an image"
     return thumbnail(self.image, *args, **kwargs)
Beispiel #12
0
def _render_blog_post(request, oid, screenshot_mode=False):
    if oid.endswith('/'):
        oid = oid[:-1]
    try:
        post = BlogItem.objects.get(oid=oid)
    except BlogItem.DoesNotExist:
        try:
            post = BlogItem.objects.get(oid__iexact=oid)
        except BlogItem.DoesNotExist:
            if oid == 'add':
                return redirect(reverse('add_post'))
            raise http.Http404(oid)

    # If you try to view a blog post that is beyond one day in the
    # the future it should raise a 404 error.
    future = timezone.now() + datetime.timedelta(days=1)
    if post.pub_date > future:
        raise http.Http404('not published yet')

    # Reasons for not being here
    if request.method == 'HEAD':
        return http.HttpResponse('')
    elif (
        request.method == 'GET' and
        (request.GET.get('replypath') or request.GET.get('show-comments'))
    ):
        return http.HttpResponsePermanentRedirect(request.path)

    # attach a field called `_absolute_url` which depends on the request
    base_url = 'https://' if request.is_secure() else 'http://'
    base_url += RequestSite(request).domain
    post._absolute_url = base_url + reverse('blog_post', args=(post.oid,))

    context = {
        'post': post,
        'screenshot_mode': screenshot_mode,
    }
    try:
        context['previous_post'] = post.get_previous_by_pub_date()
    except BlogItem.DoesNotExist:
        context['previous_post'] = None
    try:
        context['next_post'] = post.get_next_by_pub_date(
            pub_date__lt=utc_now()
        )
    except BlogItem.DoesNotExist:
        context['next_post'] = None

    if post.screenshot_image:
        context['screenshot_image'] = thumbnail(
            post.screenshot_image,
            '1280x1000',
            quality=90
        ).url
        if context['screenshot_image'].startswith('//'):
            # facebook is not going to like that
            context['screenshot_image'] = (
                'https:' + context['screenshot_image']
            )
    else:
        context['screenshot_image'] = None

    comments = (
        BlogComment.objects
        .filter(blogitem=post)
        .order_by('add_date')
    )
    if not request.user.is_staff:
        comments = comments.filter(approved=True)

    comments_truncated = False
    if request.GET.get('comments') != 'all':
        comments = comments[:100]
        if post.count_comments() > 100:
            comments_truncated = 100

    all_comments = defaultdict(list)
    for comment in comments:
        all_comments[comment.parent_id].append(comment)
    context['comments_truncated'] = comments_truncated
    context['all_comments'] = all_comments
    context['related'] = get_related_posts(post)
    context['show_buttons'] = (
        not screenshot_mode and
        not settings.DEBUG and
        request.path != '/plog/blogitem-040601-1'
    )
    context['show_fusion_ad'] = (
        not screenshot_mode and
        not settings.DEBUG
    )
    context['home_url'] = request.build_absolute_uri('/')
    context['page_title'] = post.title
    return render(request, 'plog/post.html', context)
Beispiel #13
0
def podcast_data(request, id, slug=None):
    podcast = get_object_or_404(Podcast, id=id, slug__iexact=slug)
    context = {}
    context.update({
        'id': podcast.id,
        'slug': podcast.slug,
        'name': podcast.name,
        'url': podcast.url,
        'image_url': podcast.image_url,
        'times_picked': podcast.times_picked,
        'total_seconds': podcast.total_seconds,
        'last_fetch': podcast.last_fetch,
        'modified': podcast.modified,
    })
    if podcast.error:
        context['_has_error'] = True
    if (
        not podcast.last_fetch or
        podcast.last_fetch < timezone.now() - datetime.timedelta(days=7)
    ):
        cache_key = 'updating:episodes:{}'.format(podcast.id)
        if not cache.get(cache_key):
            cache.set(cache_key, True, 60)
            download_episodes_task.delay(podcast.id)
            context['_updating'] = True
    episodes = Episode.objects.filter(
        podcast=podcast
    ).order_by('-published')
    if podcast.image and is_html_document(podcast.image.path):
        print "Found a podcast.image that wasn't an image"
        podcast.image = None
        podcast.save()
        redownload_podcast_image.delay(podcast.id)
    elif not podcast.image and podcast.image_url:
        redownload_podcast_image.delay(podcast.id)

    if podcast.itunes_lookup is None:
        fetch_itunes_lookup.delay(podcast.id)

    if not episodes.exists():
        download_episodes_task.delay(podcast.id)
    context['episodes_count'] = episodes.count()
    context['episodes'] = []
    for episode in episodes:
        context['episodes'].append({
            'duration': episode.duration,
            'published': episode.published,
            'guid': episode.guid,
        })
    try:
        thumb = thumbnail(podcast.image, '300x300')
        context['thumb'] = {
            'url': thumb.url,
            'width': thumb.width,
            'height': thumb.height,
        }
    except IOError:
        # image is so busted it can't be turned into a thumbnail
        podcast.image = None
        podcast.save()
        context['thumb'] = None
        redownload_podcast_image.delay(podcast.id)
    return http.JsonResponse(context)
Beispiel #14
0
def find(request):
    if not (request.GET.get('ids') or request.GET.get('q')):
        return http.HttpResponseBadRequest('no ids or q')

    found = []
    max_ = 5
    q = None

    if request.GET.get('ids'):
        ids = [int(x) for x in request.GET['ids'].split(',')]
        found = Podcast.objects.filter(id__in=ids)
        # rearrange them in the order they were
        found = sorted(found, key=lambda x: ids.index(x.id))
        # for podcast in found:
        #     if not podcast.last_fetch:
        #         download_episodes_task.delay(podcast.id)
    elif request.GET.get('itunes'):
        q = request.GET['q']
        try:
            results = itunes_search(
                q,
                attribute='titleTerm',
                timeout=6,
            )['results']
        except (ReadTimeout, ConnectTimeout):
            results = []

        for result in results:
            # pod = {
            #     'image_url': result['artworkUrl600'],
            #     'itunes_url': result['collectionViewUrl'],
            #     'artist_name': result['artistName'],
            #     'tags': result['genres'],
            #     'name': result['collectionName'],
            #     # 'feed_url': result['feedUrl'],
            # }
            try:
                podcast = Podcast.objects.get(
                    url=result['feedUrl'],
                    name=result['collectionName']
                )
            except Podcast.DoesNotExist:
                podcast = Podcast.objects.create(
                    name=result['collectionName'],
                    url=result['feedUrl'],
                    itunes_lookup=result,
                    image_url=result['artworkUrl600'],
                )
                try:
                    podcast.download_image(timeout=3)
                except (ReadTimeout, ConnectTimeout):
                    redownload_podcast_image(podcast.id)
                download_episodes_task.delay(podcast.id)
                # Reload since the task functions operate on a new instance
                # podcast = Podcast.objects.get(id=podcast.id)
            found.append(podcast)
    else:
        q = request.GET['q']
        items = []

        # import time
        # time.sleep(random.randint(1,4))
        base_qs = Podcast.objects.filter(error__isnull=True)
        podcasts = base_qs.filter(name__istartswith=q)
        for podcast in podcasts[:max_]:
            found.append(podcast)
        if len(q) > 2:
            sql = (
                "to_tsvector('english', name) @@ "
                "plainto_tsquery('english', %s)"
            )
            podcasts = base_qs.exclude(
                id__in=[x.id for x in found]
            ).extra(
                where=[sql],
                params=[q]
            )[:max_]
            for podcast in podcasts[:max_]:
                if len(found) >= max_:
                    break
                found.append(podcast)
        if len(q) > 1:
            podcasts = base_qs.filter(name__icontains=q).exclude(
                id__in=[x.id for x in found]
            )
            for podcast in podcasts[:max_]:
                if len(found) >= max_:
                    break
                found.append(podcast)

    def episodes_meta(podcast):
        episodes_cache_key = 'episodes-meta%s' % podcast.id
        meta = cache.get(episodes_cache_key)
        if meta is None:
            episodes = Episode.objects.filter(podcast=podcast)
            episodes_count = episodes.count()
            total_hours = None
            if episodes_count:
                total_seconds = episodes.aggregate(
                    Sum('duration')
                )['duration__sum']
                if total_seconds:
                    total_hours = total_seconds / 3600.0
            else:
                download_episodes_task.delay(podcast.id)
            meta = {
                'count': episodes_count,
                'total_hours': total_hours,
            }
            if episodes_count:
                cache.set(episodes_cache_key, meta, 60 * 60 * 24)
        return meta

    items = []
    for podcast in found:
        if podcast.image and is_html_document(podcast.image.path):
            print "Found a podcast.image that wasn't an image"
            podcast.image = None
            podcast.save()
        if podcast.image:
            if podcast.image.size < 1000:
                print "IMAGE LOOKS SUSPICIOUS"
                print podcast.image_url
                print repr(podcast), podcast.id
                print podcast.url
                print repr(podcast.image.read())
                podcast.download_image()
        thumb_url = None
        if podcast.image:
            try:
                thumb_url = thumbnail(
                    podcast.image,
                    '100x100',
                    quality=81,
                    upscale=False
                ).url
                thumb_url = make_absolute_url(thumb_url, request)
            except IOError:
                import sys
                print "BAD IMAGE!"
                print sys.exc_info()
                print repr(podcast.image)
                print repr(podcast), podcast.url
                print
                podcast.image = None
                podcast.save()
                redownload_podcast_image.delay(podcast.id)
        else:
            redownload_podcast_image.delay(podcast.id)

        # Temporarily put here
        if podcast.itunes_lookup is None:
            fetch_itunes_lookup.delay(podcast.id)

        meta = episodes_meta(podcast)
        episodes_count = meta['count']
        total_hours = meta['total_hours']
        items.append({
            'id': podcast.id,
            'name': podcast.name,
            'image_url': thumb_url,
            'episodes': episodes_count,
            'hours': total_hours,
            'last_fetch': podcast.last_fetch,
            'slug': podcast.get_or_create_slug(),
            'url': reverse(
                'podcasttime:podcast_slug',
                args=(podcast.id, podcast.get_or_create_slug())
            ),
        })
    return http.JsonResponse({
        'items': items,
        'q': q,
    })
Beispiel #15
0
def podcasts_data(request):
    context = {}
    search = request.GET.get('search', '').strip()
    ids = request.GET.get('ids')

    podcasts = Podcast.objects.all()
    if search:
        podcasts = _search_podcasts(search, podcasts)

    if ids:
        ids = [int(x) for x in ids.split(',') if x.strip()]
        podcasts = podcasts.filter(id__in=ids)

    podcasts = podcasts.order_by('-times_picked', 'name')

    paginator = Paginator(podcasts, 15)
    page = request.GET.get('page')
    try:
        paged = paginator.page(page)
    except PageNotAnInteger:
        # If page is not an integer, deliver first page.
        paged = paginator.page(1)
    except EmptyPage:
        # If page is out of range (e.g. 9999), deliver last page of results.
        paged = paginator.page(paginator.num_pages)

    context['count'] = paginator.count

    # past = timezone.now() - datetime.timedelta(days=365)
    episodes = Episode.objects.filter(
        podcast__in=paged,
        # published__gte=past,
    ).values(
        'podcast_id',
    ).annotate(
        duration=Sum('duration'),
        count=Count('podcast_id'),
    )
    episode_counts = {}
    episode_seconds = {}
    for x in episodes:
        episode_counts[x['podcast_id']] = x['count']
        episode_seconds[x['podcast_id']] = x['duration']

    items = []
    for podcast in paged:
        item = {
            'id': podcast.id,
            'name': podcast.name,
            'image': (
                podcast.image and
                thumbnail(podcast.image, '300x300').url or
                None
            ),
            'times_picked': podcast.times_picked,
            'slug': podcast.get_or_create_slug(),
            'last_fetch': (
                podcast.last_fetch and
                podcast.last_fetch.isoformat() or
                None
            ),
            'modified': podcast.modified.isoformat(),
            'episode_count': episode_counts.get(podcast.id, 0),
            'episode_seconds': episode_seconds.get(podcast.id, 0),
        }
        items.append(item)

    context['items'] = items

    pagination = {
        'has_previous': paged.has_previous(),
        'has_next': paged.has_next(),
        'number': paged.number,
        'num_pages': paginator.num_pages,
    }
    if pagination['has_previous']:
        pagination['previous_page_number'] = paged.previous_page_number()
    if pagination['has_next']:
        pagination['next_page_number'] = paged.next_page_number()
    context['pagination'] = pagination
    return http.JsonResponse(context)