示例#1
0
 async def latency_error(self, ctx, error):
     if isinstance(error, commands.CommandOnCooldown):
         if await self.is_owner(ctx.author):
             await ctx.reinvoke()
             return
     else:
         helper.handle_error(ctx, error)
示例#2
0
def download_videos(course_id, folder):
    chapters = get_course_chapters(course_id)
    display_text = True
    for chapter in chapters['user_chapters']:
        page = con.session.get(
            'https://www.datacamp.com/courses/{}/chapters/{}/continue'.format(
                course_id, chapter['chapter_id']))
        video_ids = set(
            re.findall(r';(course_{}_[\d|\w]+)&'.format(course_id), page.text))
        video_type = 1
        if len(video_ids) == 0:
            video_ids = set(
                re.findall(r'(//videos.[/|\w|:|.|-]+[^/])&', page.text))
            video_type = 2
        if len(video_ids) == 0:
            sys.stdout.write(f'{bcolors.FAIL}No videos found!{bcolors.ENDC}\n')
            return

        if display_text:
            sys.stdout.write(
                f'{bcolors.BOLD}Downloading videos...{bcolors.ENDC}\n')
            display_text = False

        for video_id in video_ids:
            while True:
                try:
                    if video_type == 1:
                        video_page = con.session.get(
                            'https://projector.datacamp.com/?projector_key=' +
                            video_id)
                    elif video_type == 2:
                        video_page = con.session.get(
                            'https://projector.datacamp.com/?video_hls=' +
                            video_id)
                except:
                    helper.handle_error(con)
                    continue
                break
            soup = BeautifulSoup(video_page.text, 'html.parser')
            video_url = json.loads(
                soup.find("input", {"id": "videoData"})['value'])

            link = video_url['video_mp4_link']

            if link is None:
                sys.stdout.write(
                    f'{bcolors.FAIL}Videos cannot be downloaded!{bcolors.ENDC}\n'
                )
                return
            if link.endswith('mp4') and not link.startswith('http'):
                link = 'https://' + link[2:]
                name = link.split('/')[-1]
            else:
                if video_type == 1:
                    video_name_url = json.loads(
                        soup.find("input", {"id": "slideDeckData"})['value'])
                    link_name = video_name_url['plain_video_mp4_link']
                    if link_name is not None:
                        name = link_name.split('/')[-1]
                    else:
                        name = video_url['audio_link'].split('/')[-1].split(
                            '.')[0] + '.mp4'
                elif video_type == 2:
                    link_name = video_url['video_mp4_link']
                    name = link_name.split('/')[-1]
                if name.count('_') > 1:
                    name = name.split('_')[1:]
                    name = '_'.join(name)
            file_path = os.path.join(folder, name)

            if helper.file_exist(file_path):
                continue
            helper.download_file(con, link, file_path)