Example #1
0
def download_video(url: str, path: str):
    #	append /.json to the url
    if not url.endswith('/'):
        url += '/'
    url += '.json'

    response = reqs.json_url(url=url)

    #	This will fail if url is not from reddit or is not a video
    try:
        is_video = response[0]['data']['children'][0]['data']['is_video']
        if not is_video:
            return 1
    except:
        return 1

    #	parse audio/video info
    mpd_list_link = response[0]['data']['children'][0]['data']['media'][
        'reddit_video']['dash_url']
    mpd_response = reqs.get(url=mpd_list_link)
    mpd_xml = mpd_response.text

    #	get links for audio and video
    base_link = response[0]['data']['children'][0]['data']['url']
    data = re.findall('<BaseURL>(.*?)</BaseURL>', mpd_xml)
    hq_video_link = base_link + '/' + data[0]
    audio_link = base_link + '/' + data[-1]

    temp_dir = tempfile.gettempdir()
    temp_video_dir = temp_dir + '/' + next(tempfile._get_candidate_names())
    temp_audio_dir = temp_dir + '/' + next(tempfile._get_candidate_names())

    #	store data in temp files
    reqs.ulibreq.urlretrieve(hq_video_link, temp_video_dir)
    reqs.ulibreq.urlretrieve(audio_link, temp_audio_dir)

    ff = ffmpy.FFmpeg(inputs={
        temp_video_dir: None,
        temp_audio_dir: None
    },
                      outputs={path: " -c copy"})

    #	failsafe
    try:
        ff.run()
        os.remove(temp_audio_dir)
        os.remove(temp_video_dir)
    except ffmpy.FFRuntimeError:
        return 1

    return 0
def get_id(html_id, website="http://coolsite.com"):
  request = requests.get(website)
  parsed_html = BeautifulSoup(website.content, features="html.parser")
  return parsed_html.find(id_=html_id)
Example #3
0
def get_id(html_id, website="http://coolsite.com"): # any default value need to be listed in the last in out function definition
  request = requests.get(website)
  parsed_html = BeautifulSoup(website.content, features="html.parser")
  return parsed_html.find(id_=html_id)