Beispiel #1
0
def detalle_episodio(item):
    logger.info("tvalacarta.rtvcm.detalle_episodio")

    idvideo = scrapertools.find_single_match(item.url,"video-(\d+)$")
    url = "http://api.rtvcm.webtv.flumotion.com/pods/"+idvideo+"?extended=true"
    data = scrapertools.cache_page(url)

    try:
        json_object = jsontools.load_json(data)

        item.thumbnail = json_object["video_image_url"].split("?")[0]
        item.geolocked = "0"
        item.duration = scrapertools.parse_duration_secs( json_object["duration"] )
        item.aired_date = scrapertools.parse_date(item.title)

        from servers import rtvcm as servermodule
        video_urls = servermodule.get_video_url(item.url)
        item.media_url = video_urls[0][1]

    except:
        import traceback
        print traceback.format_exc()
        item.media_url = ""

    return item
Beispiel #2
0
def detalle_episodio(item):
    logger.info("tvalacarta.rtvcm.detalle_episodio")

    idvideo = scrapertools.find_single_match(item.url, "video-(\d+)$")
    url = "http://api.rtvcm.webtv.flumotion.com/pods/" + idvideo + "?extended=true"
    data = scrapertools.cache_page(url)

    try:
        json_object = jsontools.load_json(data)

        item.thumbnail = json_object["video_image_url"].split("?")[0]
        item.geolocked = "0"
        item.duration = scrapertools.parse_duration_secs(
            json_object["duration"])
        item.aired_date = scrapertools.parse_date(item.title)

        from servers import rtvcm as servermodule
        video_urls = servermodule.get_video_url(item.url)
        item.media_url = video_urls[0][1]

    except:
        import traceback
        print traceback.format_exc()
        item.media_url = ""

    return item
Beispiel #3
0
def detalle_episodio(item):

    # Saca de conectate la duración y fecha
    rec_id = scrapertools.find_single_match(item.url,"videos/(\d+)")
    data = scrapertools.cache_page("http://www.conectate.gob.ar/sitios/conectate/busqueda/buscar?rec_id="+rec_id)
    scrapeddate = scrapertools.find_single_match(data,'"fecha_creacion"\:"([^"]+)"')

    if scrapeddate=="":
        scrapeddate = scrapertools.find_single_match(data,'"fecha"\:"([^"]+)"')

    item.aired_date = scrapertools.parse_date(scrapeddate.replace("\\/","/"))

    scrapedduration = scrapertools.find_single_match(data,'"duracion_segundos":"(\d+)"')
    item.duration = scrapertools.parse_duration_secs(scrapedduration)

    # Ahora saca de PakaPaka la URL
    data = scrapertools.cache_page(item.url)

    item.geolocked = "0"    
    try:
        from servers import pakapaka as servermodule
        video_urls = servermodule.get_video_url(item.url)
        item.media_url = video_urls[0][1]
    except:
        import traceback
        print traceback.format_exc()
        item.media_url = ""

    return item
Beispiel #4
0
def detalle_episodio(item):

    # Saca de conectate la duración y fecha
    rec_id = scrapertools.find_single_match(item.url, "videos/(\d+)")
    data = scrapertools.cache_page(
        "http://www.conectate.gob.ar/sitios/conectate/busqueda/buscar?rec_id="
        + rec_id)
    scrapeddate = scrapertools.find_single_match(
        data, '"fecha_creacion"\:"([^"]+)"')

    if scrapeddate == "":
        scrapeddate = scrapertools.find_single_match(data,
                                                     '"fecha"\:"([^"]+)"')

    item.aired_date = scrapertools.parse_date(scrapeddate.replace("\\/", "/"))

    scrapedduration = scrapertools.find_single_match(
        data, '"duracion_segundos":"(\d+)"')
    item.duration = scrapertools.parse_duration_secs(scrapedduration)

    # Ahora saca de PakaPaka la URL
    data = scrapertools.cache_page(item.url)

    item.geolocked = "0"
    try:
        from servers import pakapaka as servermodule
        video_urls = servermodule.get_video_url(item.url)
        item.media_url = video_urls[0][1]
    except:
        import traceback
        print traceback.format_exc()
        item.media_url = ""

    return item
Beispiel #5
0
def videos(item):
    logger.info("tvalacarta.euronews videos")
    itemlist = []

    #http://es.euronews.com/api/program/futuris
    #http://es.euronews.com/programas/futuris
    data = scrapertools.cache_page(
        item.url.replace("/programas/", "/api/program/"))

    json_objects = jsontools.load_json(data)

    for json_object in json_objects:
        try:
            title = json_object["title"]
            url = json_object["canonical"]
            thumbnail = json_object["images"][0]["url"].replace(
                "{{w}}", "960").replace("{{h}}", "540")
            plot = json_object["leadin"]

            import datetime
            aired_date = datetime.datetime.fromtimestamp(
                json_object["publishedAt"]).strftime('%Y-%m-%d %H:%M:%S')
            logger.info("aired_date=" + repr(aired_date))

            # Intenta acceder al vídeo, si no tiene deja que la excepción salte y el vídeo no se añada
            video_element = json_object["videos"][0]

            try:
                duration = json_object["videos"][0]["duration"]
                logger.info("duration=" + duration)
                duration = scrapertools.parse_duration_secs(
                    str(int(duration) / 1000))
                logger.info("duration=" + duration)
            except:
                duration = ""

            itemlist.append(
                Item(channel=CHANNELNAME,
                     action="play",
                     server="euronews",
                     title=title,
                     url=url,
                     thumbnail=thumbnail,
                     plot=plot,
                     aired_date=aired_date,
                     duration=duration,
                     show=item.show,
                     folder=False))
        except:
            import traceback
            logger.info("Error al cargar " + title + ", " +
                        traceback.format_exc())

    return itemlist
Beispiel #6
0
def videos(item):
    logger.info("tvalacarta.euronews videos")
    itemlist = []

    #http://es.euronews.com/api/program/futuris
    #http://es.euronews.com/programas/futuris
    data = scrapertools.cache_page(item.url.replace("/programas/","/api/program/"))

    json_objects = jsontools.load_json(data)

    for json_object in json_objects:
        try:
            title = json_object["title"]
            url = json_object["canonical"]
            thumbnail = json_object["images"][0]["url"].replace("{{w}}","960").replace("{{h}}","540")
            plot = json_object["leadin"]

            import datetime
            aired_date = datetime.datetime.fromtimestamp( json_object["publishedAt"] ).strftime('%Y-%m-%d %H:%M:%S')
            logger.info("aired_date="+repr(aired_date))

            # Intenta acceder al vídeo, si no tiene deja que la excepción salte y el vídeo no se añada
            video_element = json_object["videos"][0]

            try:
                duration = json_object["videos"][0]["duration"]
                logger.info("duration="+duration)
                duration = scrapertools.parse_duration_secs( str(int(duration)/1000) )
                logger.info("duration="+duration)
            except:
                duration = ""

            itemlist.append( Item(channel=CHANNELNAME, action="play", server="euronews", title=title, url=url, thumbnail=thumbnail, plot=plot, aired_date=aired_date, duration=duration, show=item.show, folder=False) )
        except:
            import traceback
            logger.info("Error al cargar "+title+", "+traceback.format_exc())

    return itemlist
Beispiel #7
0
# -*- coding: utf-8 -*-