def _download_clip(slug, args): print_out("<dim>Looking up clip...</dim>") clip = twitch.get_clip(slug) if not clip: raise ConsoleError("Clip '{}' not found".format(slug)) print_out( "Found: <green>{}</green> by <yellow>{}</yellow>, playing <blue>{}</blue> ({})" .format(clip["title"], clip["broadcaster"]["displayName"], clip["game"]["name"], utils.format_duration(clip["durationSeconds"]))) target = _clip_target_filename(clip, args) print_out("Target: <blue>{}</blue>".format(target)) if not args.overwrite and path.exists(target): response = input("File exists. Overwrite? [Y/n]: ") if response.lower().strip() not in ["", "y"]: raise ConsoleError("Aborted") args.overwrite = True url = get_clip_authenticated_url(slug, args.quality) print_out("<dim>Selected URL: {}</dim>".format(url)) print_out("<dim>Downloading clip...</dim>") download_file(url, target) print_out("Downloaded: <blue>{}</blue>".format(target))
def download(video_id, max_workers, format='mkv', start=None, end=None, **kwargs): video_id = parse_video_id(video_id) if start and end and end <= start: raise ConsoleError("End time must be greater than start time") print_out("Looking up video...") video = twitch.get_video(video_id) print_out("Found: <blue>{}</blue> by <yellow>{}</yellow>".format( video['title'], video['channel']['display_name'])) print_out("Fetching access token...") access_token = twitch.get_access_token(video_id) print_out("Fetching playlists...") playlists = twitch.get_playlists(video_id, access_token) quality, playlist_url = _select_quality(playlists) print_out("\nFetching playlist...") base_url, filenames = twitch.get_playlist_urls(playlist_url, start, end) if not filenames: raise ConsoleError("No vods matched, check your start and end times") # Create a temp dir to store downloads if it doesn't exist directory = '{}/twitch-dl/{}/{}'.format(tempfile.gettempdir(), video_id, quality) pathlib.Path(directory).mkdir(parents=True, exist_ok=True) print_out("Download dir: {}".format(directory)) print_out("Downloading {} VODs using {} workers...".format( len(filenames), max_workers)) paths = _download_files(base_url, directory, filenames, max_workers) print_out("\n\nJoining files...") target = _video_target_filename(video, format) _join_vods(directory, paths, target) print_out("\nDeleting vods...") for path in paths: os.unlink(path) print_out("\nDownloaded: {}".format(target))
def _join_vods(playlist_path, target, overwrite, video): command = [ "ffmpeg", "-i", playlist_path, "-c", "copy", "-metadata", "artist={}".format(video["creator"]["displayName"]), "-metadata", "title={}".format(video["title"]), "-metadata", "encoded_by=twitch-dl", "-stats", "-loglevel", "warning", target, ] if overwrite: command.append("-y") print_out("<dim>{}</dim>".format(" ".join(command))) result = subprocess.run(command) if result.returncode != 0: raise ConsoleError("Joining files failed")
def _join_vods(playlist_path, target): command = [ "ffmpeg", "-i", playlist_path, "-c", "copy", "-safe", "0", "-f", "concat", "-segment_time_metadata", "1", "-af", "aselect=concatdec_select,aresample=async=1", target, "-stats", "-y", "-loglevel", "warning", ] print_out("<dim>{}</dim>".format(" ".join(command))) result = subprocess.run(command) if result.returncode != 0: raise ConsoleError("Joining files failed")
def _get_clip_url(clip, args): qualities = clip["videoQualities"] # Quality given as an argument if args.quality: selected_quality = args.quality.rstrip( "p") # allow 720p as well as 720 for q in qualities: if q["quality"] == selected_quality: return q["sourceURL"] available = ", ".join([str(q["quality"]) for q in qualities]) msg = "Quality '{}' not found. Available qualities are: {}".format( args.quality, available) raise ConsoleError(msg) # Ask user to select quality print_out("\nAvailable qualities:") for n, q in enumerate(qualities): print_out("{}) {} [{} fps]".format(n + 1, q["quality"], q["frameRate"])) print_out() no = utils.read_int("Choose quality", min=1, max=len(qualities), default=1) selected_quality = qualities[no - 1] return selected_quality["sourceURL"]
def _clip_target_filename(clip, args): date, time = clip["createdAt"].split("T") url = clip["videoQualities"][0]["sourceURL"] _, ext = path.splitext(url) ext = ext.lstrip(".") subs = { "channel": clip["broadcaster"]["displayName"], "channel_login": clip["broadcaster"]["login"], "date": date, "datetime": clip["createdAt"], "format": ext, "game": clip["game"]["name"], "game_slug": utils.slugify(clip["game"]["name"]), "id": clip["id"], "time": time, "title": utils.titlify(clip["title"]), "title_slug": utils.slugify(clip["title"]), } try: return args.output.format(**subs) except KeyError as e: supported = ", ".join(subs.keys()) raise ConsoleError( "Invalid key {} used in --output. Supported keys are: {}".format( e, supported))
def _download_clip(slug, args): print_out("<dim>Looking up clip...</dim>") clip = twitch.get_clip(slug) if not clip: raise ConsoleError("Clip '{}' not found".format(slug)) print_out( "Found: <green>{}</green> by <yellow>{}</yellow>, playing <blue>{}</blue> ({})" .format(clip["title"], clip["broadcaster"]["displayName"], clip["game"]["name"], utils.format_duration(clip["durationSeconds"]))) url = _get_clip_url(clip, args) print_out("<dim>Selected URL: {}</dim>".format(url)) url_path = urlparse(url).path extension = Path(url_path).suffix filename = "{}_{}{}".format(clip["broadcaster"]["login"], utils.slugify(clip["title"]), extension) print_out("Downloading clip...") download_file(url, filename) print_out("Downloaded: {}".format(filename))
def get_channel_videos(channel_id, limit, sort, type="archive", game_ids=[], after=None): query = """ {{ user(login: "******") {{ videos( first: {limit}, type: {type}, sort: {sort}, after: "{after}", options: {{ gameIDs: {game_ids} }} ) {{ totalCount pageInfo {{ hasNextPage }} edges {{ cursor node {{ id title publishedAt broadcastType lengthSeconds game {{ name }} creator {{ login displayName }} }} }} }} }} }} """ query = query.format( **{ "channel_id": channel_id, "game_ids": game_ids, "after": after if after else "", "limit": limit, "sort": sort.upper(), "type": type.upper(), }) response = gql_query(query) if not response["data"]["user"]: raise ConsoleError("Channel {} not found".format(channel_id)) return response["data"]["user"]["videos"]
def _download_video(video_id, max_workers, format='mp4', start=None, end=None, keep=False, **kwargs): if start and end and end <= start: raise ConsoleError("End time must be greater than start time") _log(video_id, "Recherche la video ") video = twitch.get_video(video_id) _log(video_id, "Informations sur {}".format(video['title'])) print_out("Trouvé: <blue>{}</blue> by <yellow>{}</yellow>".format( video['title'], video['channel']['display_name'])) access_token = twitch.get_access_token(video_id) _log(video_id, "Obtention de la liste des fichiers...") playlists = twitch.get_playlists(video_id, access_token) parsed = m3u8.loads(playlists) selected = _select_quality(parsed.playlists) # print_out("\nListe...") response = requests.get(selected.uri) response.raise_for_status() playlist = m3u8.loads(response.text) base_uri = re.sub("/[^/]+$", "/", selected.uri) target_dir = _crete_temp_dir(base_uri) filenames = list(_get_files(playlist, start, end)) # Save playlists for debugging purposes with open(target_dir + "playlists.m3u8", "w") as f: f.write(playlists) with open(target_dir + "playlist.m3u8", "w") as f: f.write(response.text) # print_out("\nTélécharge {} VODs avec {} threads dans {}".format( # len(filenames), max_workers, target_dir)) file_paths = download_files(video_id, base_uri, target_dir, filenames, max_workers) target = _video_target_filename(video, format) print_out("\nCible: {}".format(target)) _join_vods(target_dir, file_paths, "videos/Download/{}".format(target)) if keep: print_out("\nTemporary files not deleted: {}".format(target_dir)) else: # print_out("\nSupprime le fichier temporaire...") shutil.rmtree(target_dir) print_out("Fichier téléchargé: {}".format(target)) _log(video_id, "Terminé {}".format(target))
def parse_playlist(url, playlist, start, end): base_url = re.sub("/[^/]+$", "/{}", url) match = re.search(r"#EXT-X-TWITCH-TOTAL-SECS:(\d+)(.\d+)?", playlist) total_seconds = int(match.group(1)) # Now that video duration is known, validate start and end max values if start and start > total_seconds: raise ConsoleError( "Start time {} greater than video duration {}".format( timedelta(seconds=start), timedelta(seconds=total_seconds))) if end and end > total_seconds: raise ConsoleError("End time {} greater than video duration {}".format( timedelta(seconds=end), timedelta(seconds=total_seconds))) files = list(_get_files(playlist, start, end)) return base_url, files
def _get_playlist_by_name(playlists, quality): for name, _, uri in playlists: if name == quality: return uri available = ", ".join([name for (name, _, _) in playlists]) msg = "Quality '{}' not found. Available qualities are: {}".format( quality, available) raise ConsoleError(msg)
def download(video_id, max_workers, format='mkv', start=None, end=None, keep=False, **kwargs): video_id = _parse_video_id(video_id) if start and end and end <= start: raise ConsoleError("End time must be greater than start time") print_out("Looking up video...") video = twitch.get_video(video_id) print_out("Found: <blue>{}</blue> by <yellow>{}</yellow>".format( video['title'], video['channel']['display_name'])) print_out("Fetching access token...") access_token = twitch.get_access_token(video_id) print_out("Fetching playlists...") playlists = twitch.get_playlists(video_id, access_token) parsed = m3u8.loads(playlists) selected = _select_quality(parsed.playlists) print_out("\nFetching playlist...") response = requests.get(selected.uri) response.raise_for_status() playlist = m3u8.loads(response.text) base_uri = re.sub("/[^/]+$", "/", selected.uri) target_dir = _crete_temp_dir(base_uri) filenames = list(_get_files(playlist, start, end)) # Save playlists for debugging purposes with open(target_dir + "playlists.m3u8", "w") as f: f.write(playlists) with open(target_dir + "playlist.m3u8", "w") as f: f.write(response.text) print_out("\nDownloading {} VODs using {} workers to {}".format( len(filenames), max_workers, target_dir)) _download_files(base_uri, target_dir, filenames, max_workers) print_out("\n\nJoining files...") target = _video_target_filename(video, format) _join_vods(target_dir, filenames, target) if keep: print_out("\nTemporary files not deleted: {}".format(target_dir)) else: print_out("\nDeleting temporary files...") shutil.rmtree(target_dir) print_out("Downloaded: {}".format(target))
def download(args): video_id = utils.parse_video_identifier(args.video) if video_id: return _download_video(video_id, args) clip_slug = utils.parse_clip_identifier(args.video) if clip_slug: return _download_clip(clip_slug, args) raise ConsoleError("Invalid input: {}".format(args.video))
def authenticated_get(url, params={}, headers={}): headers['Client-ID'] = CLIENT_ID response = requests.get(url, params, headers=headers) if response.status_code == 400: data = response.json() raise ConsoleError(data["message"]) response.raise_for_status() return response
def authenticated_post(url, data=None, json=None, headers={}): headers['Client-ID'] = CLIENT_ID response = requests.post(url, data=data, json=json, headers=headers) if response.status_code == 400: data = response.json() raise ConsoleError(data["message"]) response.raise_for_status() return response
def _parse_video_id(video_id): """This can be either a integer ID or an URL to the video on twitch.""" if re.search(r"^\d+$", video_id): return int(video_id) match = re.search(r"^https://www.twitch.tv/videos/(\d+)(\?.+)?$", video_id) if match: return int(match.group(1)) raise ConsoleError( "Invalid video ID given, expected integer ID or Twitch URL")
def _get_game_ids(names): if not names: return [] game_ids = [] for name in names: print_out("<dim>Looking up game '{}'...</dim>".format(name)) game_id = twitch.get_game_id(name) if not game_id: raise ConsoleError("Game '{}' not found".format(name)) game_ids.append(int(game_id)) return game_ids
def authenticated_get(url, params={}, headers={}): headers['Client-ID'] = CLIENT_ID response = requests.get(url, params, headers=headers) if 400 <= response.status_code < 500: data = response.json() # TODO: this does not look nice in the console since data["message"] # can contain a JSON encoded object. raise ConsoleError(data["message"]) response.raise_for_status() return response
def download(args): for pattern in VIDEO_PATTERNS: match = re.match(pattern, args.video) if match: video_id = match.group('id') return _download_video(video_id, args) for pattern in CLIP_PATTERNS: match = re.match(pattern, args.video) if match: clip_slug = match.group('slug') return _download_clip(clip_slug, args) raise ConsoleError("Invalid video: {}".format(args.video))
def get_clip_authenticated_url(slug, quality): print_out("<dim>Fetching access token...</dim>") access_token = twitch.get_clip_access_token(slug) if not access_token: raise ConsoleError("Access token not found for slug '{}'".format(slug)) url = _get_clip_url(access_token, quality) query = urlencode({ "sig": access_token["playbackAccessToken"]["signature"], "token": access_token["playbackAccessToken"]["value"], }) return "{}?{}".format(url, query)
def info(args): video_id = utils.parse_video_identifier(args.identifier) if video_id: print_log("Fetching video...") video = twitch.get_video(video_id) if not video: raise ConsoleError("Video {} not found".format(video_id)) print_log("Fetching access token...") access_token = twitch.get_access_token(video_id) print_log("Fetching playlists...") playlists = twitch.get_playlists(video_id, access_token) if video: if args.json: video_json(video, playlists) else: video_info(video, playlists) return clip_slug = utils.parse_clip_identifier(args.identifier) if clip_slug: print_log("Fetching clip...") clip = twitch.get_clip(clip_slug) if not clip: raise ConsoleError("Clip {} not found".format(clip_slug)) if args.json: print_json(clip) else: clip_info(clip) return raise ConsoleError("Invalid input: {}".format(args.video))
def get_channel_videos(channel_id, limit, sort, type="archive", game_ids=[], after=None): query = """ {{ user(login: "******") {{ videos( first: {limit}, type: {type}, sort: {sort}, after: "{after}", options: {{ gameIDs: {game_ids} }} ) {{ totalCount pageInfo {{ hasNextPage }} edges {{ cursor node {{ {fields} }} }} }} }} }} """ query = query.format(channel_id=channel_id, game_ids=game_ids, after=after if after else "", limit=limit, sort=sort.upper(), type=type.upper(), fields=VIDEO_FIELDS) response = gql_query(query) if not response["data"]["user"]: raise ConsoleError("Channel {} not found".format(channel_id)) return response["data"]["user"]["videos"]
def videos(channel_name, limit, offset, sort, **kwargs): user = twitch.get_user(channel_name) if not user: raise ConsoleError("Utilisateur {} non trouvé.".format(channel_name)) videos = twitch.get_channel_videos(user["id"], limit, offset, sort) count = len(videos['videos']) if not count: print_out("Pas de vidéos") return first = offset + 1 last = offset + len(videos['videos']) total = videos["_total"] for video in videos['videos']: print_video(video)
def _join_vods(playlist_path, target, overwrite): command = [ "ffmpeg", "-i", playlist_path, "-c", "copy", target, "-stats", "-loglevel", "warning", ] if overwrite: command.append("-y") print_out("<dim>{}</dim>".format(" ".join(command))) result = subprocess.run(command) if result.returncode != 0: raise ConsoleError("Joining files failed")
def get_channel_clips(channel_id, period, limit, after=None): """ List channel clips. At the time of writing this: * filtering by game name returns an error * sorting by anything but VIEWS_DESC or TRENDING returns an error * sorting by VIEWS_DESC and TRENDING returns the same results * there is no totalCount """ query = """ {{ user(login: "******") {{ clips(first: {limit}, after: "{after}", criteria: {{ period: {period}, sort: VIEWS_DESC }}) {{ pageInfo {{ hasNextPage hasPreviousPage }} edges {{ cursor node {{ {fields} }} }} }} }} }} """ query = query.format(channel_id=channel_id, after=after if after else "", limit=limit, period=period.upper(), fields=CLIP_FIELDS) response = gql_query(query) user = response["data"]["user"] if not user: raise ConsoleError("Channel {} not found".format(channel_id)) return response["data"]["user"]["clips"]
def videos(channel_name, limit, offset, sort, **kwargs): print_out("Looking up user...") user = twitch.get_user(channel_name) if not user: raise ConsoleError("User {} not found.".format(channel_name)) print_out("Loading videos...") videos = twitch.get_channel_videos(user["id"], limit, offset, sort) count = len(videos['videos']) if not count: print_out("No videos found") return first = offset + 1 last = offset + len(videos['videos']) total = videos["_total"] print_out("<yellow>Showing videos {}-{} of {}</yellow>".format( first, last, total)) for video in videos['videos']: _print_video(video)
def _download_clip(slug, args): print_out("<dim>Looking up clip...</dim>") clip = twitch.get_clip(slug) if not clip: raise ConsoleError("Clip '{}' not found".format(slug)) print_out( "Found: <green>{}</green> by <yellow>{}</yellow>, playing <blue>{}</blue> ({})" .format(clip["title"], clip["broadcaster"]["displayName"], clip["game"]["name"], utils.format_duration(clip["durationSeconds"]))) url = _get_clip_url(clip, args) print_out("<dim>Selected URL: {}</dim>".format(url)) target = _clip_target_filename(clip) print_out("Downloading clip...") download_file(url, target) print_out("Downloaded: {}".format(target))
def _video_target_filename(video, args): date, time = video['publishedAt'].split("T") subs = { "channel": video["creator"]["displayName"], "channel_login": video["creator"]["login"], "date": date, "datetime": video["publishedAt"], "format": args.format, "game": video["game"]["name"], "game_slug": utils.slugify(video["game"]["name"]), "id": video["id"], "time": time, "title": utils.titlify(video["title"]), "title_slug": utils.slugify(video["title"]), } try: return args.output.format(**subs) except KeyError as e: supported = ", ".join(subs.keys()) raise ConsoleError( "Invalid key {} used in --output. Supported keys are: {}".format( e, supported))
def _download_video(video_id, args): if args.start and args.end and args.end <= args.start: raise ConsoleError("End time must be greater than start time") if os.path.isfile(str(Path.home()) + "/.twitchdownloads/" + video_id): print("File already downloaded") return print_out("<dim>Looking up video...</dim>") video = twitch.get_video(video_id) save_json_video(video_id, video, _video_target_filename(video, "json")) print_out("Found: <blue>{}</blue> by <yellow>{}</yellow>".format( video['title'], video['channel']['display_name'])) print_out("<dim>Fetching access token...</dim>") access_token = twitch.get_access_token(video_id) print_out("<dim>Fetching playlists...</dim>") playlists_m3u8 = twitch.get_playlists(video_id, access_token) playlists = list(_parse_playlists(playlists_m3u8)) playlist_uri = (_get_playlist_by_name(playlists, args.quality) if args.quality else _select_playlist_interactive(playlists)) print_out("<dim>Fetching playlist...</dim>") response = requests.get(playlist_uri) response.raise_for_status() playlist = m3u8.loads(response.text) base_uri = re.sub("/[^/]+$", "/", playlist_uri) target_dir = _crete_temp_dir(base_uri) vod_paths = _get_vod_paths(playlist, args.start, args.end) # Save playlists for debugging purposes with open(path.join(target_dir, "playlists.m3u8"), "w") as f: f.write(playlists_m3u8) with open(path.join(target_dir, "playlist.m3u8"), "w") as f: f.write(response.text) print_out("\nDownloading {} VODs using {} workers to {}".format( len(vod_paths), args.max_workers, target_dir)) path_map = download_files(base_uri, target_dir, vod_paths, args.max_workers) # Make a modified playlist which references downloaded VODs # Keep only the downloaded segments and skip the rest org_segments = playlist.segments.copy() playlist.segments.clear() for segment in org_segments: if segment.uri in path_map: segment.uri = path_map[segment.uri] playlist.segments.append(segment) playlist_path = path.join(target_dir, "playlist_downloaded.m3u8") playlist.dump(playlist_path) print_out("\n\nJoining files...") target = _video_target_filename(video, args.format) _join_vods(playlist_path, target) if args.keep: print_out( "\n<dim>Temporary files not deleted: {}</dim>".format(target_dir)) else: print_out("\n<dim>Deleting temporary files...</dim>") shutil.rmtree(target_dir) print_out("\nDownloaded: <green>{}</green>".format(target))
def _download_video(video_id, args): if args.start and args.end and args.end <= args.start: raise ConsoleError("End time must be greater than start time") print_out("<dim>Looking up video...</dim>") video = twitch.get_video(video_id) if not video: raise ConsoleError("Video {} not found".format(video_id)) print_out("Found: <blue>{}</blue> by <yellow>{}</yellow>".format( video['title'], video['creator']['displayName'])) target = _video_target_filename(video, args) print_out("Output: <blue>{}</blue>".format(target)) if not args.overwrite and path.exists(target): response = input("File exists. Overwrite? [Y/n]: ") if response.lower().strip() not in ["", "y"]: raise ConsoleError("Aborted") args.overwrite = True print_out("<dim>Fetching access token...</dim>") access_token = twitch.get_access_token(video_id) print_out("<dim>Fetching playlists...</dim>") playlists_m3u8 = twitch.get_playlists(video_id, access_token) playlists = list(_parse_playlists(playlists_m3u8)) playlist_uri = (_get_playlist_by_name(playlists, args.quality) if args.quality else _select_playlist_interactive(playlists)) print_out("<dim>Fetching playlist...</dim>") response = requests.get(playlist_uri) response.raise_for_status() playlist = m3u8.loads(response.text) base_uri = re.sub("/[^/]+$", "/", playlist_uri) target_dir = _crete_temp_dir(base_uri) vod_paths = _get_vod_paths(playlist, args.start, args.end) # Save playlists for debugging purposes with open(path.join(target_dir, "playlists.m3u8"), "w") as f: f.write(playlists_m3u8) with open(path.join(target_dir, "playlist.m3u8"), "w") as f: f.write(response.text) print_out("\nDownloading {} VODs using {} workers to {}".format( len(vod_paths), args.max_workers, target_dir)) path_map = download_files(base_uri, target_dir, vod_paths, args.max_workers) # Make a modified playlist which references downloaded VODs # Keep only the downloaded segments and skip the rest org_segments = playlist.segments.copy() playlist.segments.clear() for segment in org_segments: if segment.uri in path_map: segment.uri = path_map[segment.uri] playlist.segments.append(segment) playlist_path = path.join(target_dir, "playlist_downloaded.m3u8") playlist.dump(playlist_path) if args.no_join: print_out("\n\n<dim>Skipping joining files...</dim>") print_out("VODs downloaded to:\n<blue>{}</blue>".format(target_dir)) return print_out("\n\nJoining files...") _join_vods(playlist_path, target, args.overwrite, video) if args.keep: print_out( "\n<dim>Temporary files not deleted: {}</dim>".format(target_dir)) else: print_out("\n<dim>Deleting temporary files...</dim>") shutil.rmtree(target_dir) print_out("\nDownloaded: <green>{}</green>".format(target))