Compare commits

..

42 Commits
2.3.0 ... queue

Author SHA1 Message Date
da51ffc31f Use a queue+workers instead of semaphore 2024-08-30 15:35:12 +02:00
8c68132ddb Trim line when printing table, simplify code 2024-08-30 13:39:41 +02:00
75423c7671 Show playlists in a table 2024-08-30 13:34:19 +02:00
7dae0e23cf Show description with some spacing
Looks nicer
2024-08-30 13:34:08 +02:00
dc99ee51bc Improve logging a bit when downloading 2024-08-30 11:58:15 +02:00
2c9420c43d Update changelog 2024-08-30 11:47:14 +02:00
4a86cb16c8 Use relative paths in generated m3u8 playlist
Since we're using relative paths for initi segments, and paths are
relative to the path where the playlist is located, this seems sensible.
Allows the folder to be moved, and the playlist will still work.
2024-08-30 11:44:07 +02:00
cfefae1e69 Fix query to fetch HD qualities 2024-08-30 11:43:48 +02:00
ac7cdba28e Download init segments
These seem to occur in hd quality playlists like 1440p.
2024-08-30 11:42:54 +02:00
2feef136ca Ignore video files 2024-08-30 11:42:21 +02:00
d85aeade86 Update changelog 2024-08-30 08:25:52 +02:00
4db4285d9a Handle videoQualities being null 2024-08-30 08:06:31 +02:00
707b2da934 Improve types 2024-08-30 08:05:16 +02:00
2846925c91 Allow newer versions of m3u8 2024-08-30 08:05:03 +02:00
8f5d7f022f Reinstate "VODs" in progress output 2024-08-30 07:59:51 +02:00
ff4a514b8a Handle video URLs containing account name
fixes #162
2024-08-30 07:55:58 +02:00
f57612ffcc Refactor download_all to work when count is unknown 2024-08-30 07:37:50 +02:00
5be97b9e13 Fix tests 2024-08-28 15:21:22 +02:00
b6e7f8b36c Make Progress work when file count is not known 2024-08-28 15:18:43 +02:00
42f7a9a1a5 Fix a crash when downloading clips 2024-08-28 14:21:22 +02:00
789d3d1939 Add --target-dir option to clips command 2024-08-28 13:15:01 +02:00
07efac1ae7 Add --verbose flag for verbose logging
Don't log HTTP payloads unless --verbose flag is given. Always logging
HTTP payloads tends to make the log unreadable.
2024-08-28 12:58:36 +02:00
a808b7d8ec Don't check if file exists in download_file
This is done outside the function.
2024-08-28 12:39:56 +02:00
936c6a9da1 Don't stop downloading if one download fails 2024-08-28 12:33:05 +02:00
7184feacee Move download_file to http 2024-08-28 11:07:25 +02:00
5679e66270 Improve naming 2024-08-28 11:02:12 +02:00
1658aba124 Embrace pathlib 2024-08-28 10:59:23 +02:00
29d3f7fec2 Extract file naming 2024-08-28 10:32:40 +02:00
91e0447681 Remove unnecessary braces 2024-08-28 09:53:06 +02:00
f9e60082ba Improve downloading logic
Follow redirects to avoid saving an intermediate response, and raise an
error on invalid status to avoid saving an error response.
2024-08-28 09:52:15 +02:00
141ecb7f29 Move entities to entites.py 2024-08-24 20:23:34 +02:00
52b96aab15 Simplify persisted queries 2024-08-24 20:08:01 +02:00
1bdf6a2c02 Remove unused function 2024-08-24 20:05:32 +02:00
38636e2b21 Use ansi escape code to clear line 2024-05-31 12:57:51 +02:00
3fe1faa18e Update changelog 2024-05-19 09:11:45 +02:00
a99a472ad3 add quotes to gql query 2024-05-19 09:09:02 +02:00
47d62bc471 Improve types 2024-04-29 09:08:19 +02:00
de95384e6b Fix tests 2024-04-28 10:16:33 +02:00
aac450a5bc Calculate progress only when printing progress 2024-04-28 10:13:47 +02:00
9549679679 Make Progress not a dataclass 2024-04-28 10:09:53 +02:00
35e974bb45 Upgrade m3u8 dependency 2024-04-28 09:30:24 +02:00
b8e3809810 Print a note if no ids given 2024-04-28 08:02:01 +02:00
21 changed files with 512 additions and 343 deletions

2
.gitignore vendored
View File

@ -15,3 +15,5 @@ tmp/
/*.pyz
/pyrightconfig.json
/book
*.mp4
*.mkv

View File

@ -3,6 +3,22 @@ twitch-dl changelog
<!-- Do not edit. This file is automatically generated from changelog.yaml.-->
### [2.5.0 (2024-08-30)](https://github.com/ihabunek/twitch-dl/releases/tag/2.5.0)
* Add support for HD video qualities (#163)
### [2.4.0 (2024-08-30)](https://github.com/ihabunek/twitch-dl/releases/tag/2.4.0)
* Add `clips --target-dir` option. Use in conjunction with `--download` to
specify target directory.
* Fix a crash when downloading clips (#160)
* Handle video URLs which contain the channel name (#162)
* Don't stop downloading clips if one download fails
### [2.3.1 (2024-05-19)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.1)
* Fix fetching access token (#155, thanks @KryptonicDragon)
### [2.3.0 (2024-04-27)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.0)
* Show more playlist data when choosing quality

View File

@ -1,3 +1,21 @@
2.5.0:
date: 2024-08-30
changes:
- "Add support for HD video qualities (#163)"
2.4.0:
date: 2024-08-30
changes:
- "Add `clips --target-dir` option. Use in conjunction with `--download` to specify target directory."
- "Fix a crash when downloading clips (#160)"
- "Handle video URLs which contain the channel name (#162)"
- "Don't stop downloading clips if one download fails"
2.3.1:
date: 2024-05-19
changes:
- "Fix fetching access token (#155, thanks @KryptonicDragon)"
2.3.0:
date: 2024-04-27
changes:

View File

@ -3,6 +3,22 @@ twitch-dl changelog
<!-- Do not edit. This file is automatically generated from changelog.yaml.-->
### [2.5.0 (2024-08-30)](https://github.com/ihabunek/twitch-dl/releases/tag/2.5.0)
* Add support for HD video qualities (#163)
### [2.4.0 (2024-08-30)](https://github.com/ihabunek/twitch-dl/releases/tag/2.4.0)
* Add `clips --target-dir` option. Use in conjunction with `--download` to
specify target directory.
* Fix a crash when downloading clips (#160)
* Handle video URLs which contain the channel name (#162)
* Don't stop downloading clips if one download fails
### [2.3.1 (2024-05-19)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.1)
* Fix fetching access token (#155, thanks @KryptonicDragon)
### [2.3.0 (2024-04-27)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.0)
* Show more playlist data when choosing quality

View File

@ -43,6 +43,11 @@ twitch-dl clips [OPTIONS] CHANNEL_NAME
<td>Period from which to return clips Possible values: <code>last_day</code>, <code>last_week</code>, <code>last_month</code>, <code>all_time</code>. [default: <code>all_time</code>]</td>
</tr>
<tr>
<td class="code">-t, --target-dir</td>
<td>Target directory when downloading clips [default: <code>.</code>]</td>
</tr>
<tr>
<td class="code">--json</td>
<td>Print data as JSON rather than human readable text</td>

View File

@ -22,7 +22,7 @@ classifiers = [
dependencies = [
"click>=8.0.0,<9.0.0",
"httpx>=0.17.0,<1.0.0",
"m3u8>=1.0.0,<4.0.0",
"m3u8>=3.0.0,<7.0.0",
]
[tool.setuptools]

View File

@ -7,6 +7,7 @@ TEST_VIDEO_PATTERNS = [
("702689313", "https://twitch.tv/videos/702689313"),
("702689313", "https://www.twitch.tv/videos/702689313"),
("702689313", "https://m.twitch.tv/videos/702689313"),
("2223719525", "https://www.twitch.tv/r0dn3y/video/2223719525"),
]
TEST_CLIP_PATTERNS = {

View File

@ -8,8 +8,8 @@ def test_initial_values():
assert progress.progress_perc == 0
assert progress.remaining_time is None
assert progress.speed is None
assert progress.vod_count == 10
assert progress.vod_downloaded_count == 0
assert progress.file_count == 10
assert progress.downloaded_count == 0
def test_downloaded():
@ -23,26 +23,31 @@ def test_downloaded():
assert progress.progress_perc == 0
progress.advance(1, 100)
progress._recalculate()
assert progress.downloaded == 100
assert progress.progress_bytes == 100
assert progress.progress_perc == 11
progress.advance(2, 200)
progress._recalculate()
assert progress.downloaded == 300
assert progress.progress_bytes == 300
assert progress.progress_perc == 33
progress.advance(3, 150)
progress._recalculate()
assert progress.downloaded == 450
assert progress.progress_bytes == 450
assert progress.progress_perc == 50
progress.advance(1, 50)
progress._recalculate()
assert progress.downloaded == 500
assert progress.progress_bytes == 500
assert progress.progress_perc == 55
progress.abort(2)
progress._recalculate()
assert progress.downloaded == 500
assert progress.progress_bytes == 300
assert progress.progress_perc == 33
@ -52,6 +57,7 @@ def test_downloaded():
progress.advance(1, 150)
progress.advance(2, 300)
progress.advance(3, 150)
progress._recalculate()
assert progress.downloaded == 1100
assert progress.progress_bytes == 900
@ -71,12 +77,15 @@ def test_estimated_total():
assert progress.estimated_total is None
progress.start(1, 12000)
progress._recalculate()
assert progress.estimated_total == 12000 * 3
progress.start(2, 11000)
progress._recalculate()
assert progress.estimated_total == 11500 * 3
progress.start(3, 10000)
progress._recalculate()
assert progress.estimated_total == 11000 * 3
@ -87,16 +96,16 @@ def test_vod_downloaded_count():
progress.start(2, 100)
progress.start(3, 100)
assert progress.vod_downloaded_count == 0
assert progress.downloaded_count == 0
progress.advance(1, 100)
progress.end(1)
assert progress.vod_downloaded_count == 1
assert progress.downloaded_count == 1
progress.advance(2, 100)
progress.end(2)
assert progress.vod_downloaded_count == 2
assert progress.downloaded_count == 2
progress.advance(3, 100)
progress.end(3)
assert progress.vod_downloaded_count == 3
assert progress.downloaded_count == 3

View File

@ -2,12 +2,14 @@ import logging
import platform
import re
import sys
from pathlib import Path
from typing import Optional, Tuple
import click
from twitchdl import __version__
from twitchdl.entities import DownloadOptions
from twitchdl.naming import DEFAULT_OUTPUT_TEMPLATE
from twitchdl.twitch import ClipsPeriod, VideosSort, VideosType
# Tweak the Click context
@ -79,11 +81,12 @@ def validate_rate(_ctx: click.Context, _param: click.Parameter, value: str) -> O
@click.group(context_settings=CONTEXT)
@click.option("--debug/--no-debug", default=False, help="Log debug info to stderr")
@click.option("--debug/--no-debug", default=False, help="Enable debug logging to stderr")
@click.option("--verbose/--no-verbose", default=False, help="More verbose debug logging")
@click.option("--color/--no-color", default=sys.stdout.isatty(), help="Use ANSI color in output")
@click.version_option(package_name="twitch-dl")
@click.pass_context
def cli(ctx: click.Context, color: bool, debug: bool):
def cli(ctx: click.Context, color: bool, debug: bool, verbose: bool):
"""twitch-dl - twitch.tv downloader
https://twitch-dl.bezdomni.net/
@ -91,7 +94,7 @@ def cli(ctx: click.Context, color: bool, debug: bool):
ctx.color = color
if debug:
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
logging.getLogger("httpx").setLevel(logging.WARN)
logging.getLogger("httpcore").setLevel(logging.WARN)
@ -139,6 +142,18 @@ def cli(ctx: click.Context, color: bool, debug: bool):
default="all_time",
type=click.Choice(["last_day", "last_week", "last_month", "all_time"]),
)
@click.option(
"-t",
"--target-dir",
help="Target directory when downloading clips",
type=click.Path(
file_okay=False,
readable=False,
writable=True,
path_type=Path,
),
default=Path(),
)
@json_option
def clips(
channel_name: str,
@ -149,10 +164,14 @@ def clips(
limit: Optional[int],
pager: Optional[int],
period: ClipsPeriod,
target_dir: Path,
):
"""List or download clips for given CHANNEL_NAME."""
from twitchdl.commands.clips import clips
if not target_dir.exists():
target_dir.mkdir(parents=True, exist_ok=True)
clips(
channel_name,
all=all,
@ -162,6 +181,7 @@ def clips(
limit=limit,
pager=pager,
period=period,
target_dir=target_dir,
)
@ -229,7 +249,7 @@ def clips(
"-o",
"--output",
help="Output file name template. See docs for details.",
default="{date}_{id}_{channel_login}_{title_slug}.{format}",
default=DEFAULT_OUTPUT_TEMPLATE,
)
@click.option(
"-q",

View File

@ -1,13 +1,15 @@
import re
import sys
from os import path
from typing import Callable, Generator, Optional
from pathlib import Path
from typing import Callable, Generator, List, Optional
import click
from twitchdl import twitch, utils
from twitchdl.commands.download import get_clip_authenticated_url
from twitchdl.download import download_file
from twitchdl.entities import VideoQuality
from twitchdl.http import download_file
from twitchdl.output import green, print_clip, print_clip_compact, print_json, print_paged, yellow
from twitchdl.twitch import Clip, ClipsPeriod
@ -22,6 +24,7 @@ def clips(
limit: Optional[int] = None,
pager: Optional[int] = None,
period: ClipsPeriod = "all_time",
target_dir: Path = Path(),
):
# Set different defaults for limit for compact display
default_limit = 40 if compact else 10
@ -35,7 +38,7 @@ def clips(
return print_json(list(generator))
if download:
return _download_clips(generator)
return _download_clips(target_dir, generator)
print_fn = print_clip_compact if compact else print_clip
@ -45,8 +48,8 @@ def clips(
return _print_all(generator, print_fn, all)
def _target_filename(clip: Clip):
url = clip["videoQualities"][0]["sourceURL"]
def _target_filename(clip: Clip, video_qualities: List[VideoQuality]):
url = video_qualities[0]["sourceURL"]
_, ext = path.splitext(url)
ext = ext.lstrip(".")
@ -67,16 +70,27 @@ def _target_filename(clip: Clip):
return f"{name}.{ext}"
def _download_clips(generator: Generator[Clip, None, None]):
for clip in generator:
target = _target_filename(clip)
def _download_clips(target_dir: Path, generator: Generator[Clip, None, None]):
if not target_dir.exists():
target_dir.mkdir(parents=True, exist_ok=True)
if path.exists(target):
for clip in generator:
# videoQualities can be null in some circumstances, see:
# https://github.com/ihabunek/twitch-dl/issues/160
if not clip["videoQualities"]:
continue
target = target_dir / _target_filename(clip, clip["videoQualities"])
if target.exists():
click.echo(f"Already downloaded: {green(target)}")
else:
url = get_clip_authenticated_url(clip["slug"], "source")
click.echo(f"Downloading: {yellow(target)}")
download_file(url, target)
try:
url = get_clip_authenticated_url(clip["slug"], "source")
click.echo(f"Downloading: {yellow(target)}")
download_file(url, target)
except Exception as ex:
click.secho(ex, err=True, fg="red")
def _print_all(

View File

@ -1,35 +1,39 @@
import asyncio
import os
import platform
import re
import shlex
import shutil
import subprocess
import tempfile
from os import path
from pathlib import Path
from typing import Dict, List
from typing import List, Optional
from urllib.parse import urlencode, urlparse
import click
import httpx
from twitchdl import twitch, utils
from twitchdl.download import download_file
from twitchdl.entities import DownloadOptions
from twitchdl.exceptions import ConsoleError
from twitchdl.http import download_all
from twitchdl.http import download_all, download_file
from twitchdl.naming import clip_filename, video_filename
from twitchdl.output import blue, bold, green, print_log, yellow
from twitchdl.playlists import (
enumerate_vods,
get_init_sections,
load_m3u8,
make_join_playlist,
parse_playlists,
select_playlist,
)
from twitchdl.twitch import Chapter, Clip, ClipAccessToken, Video
from twitchdl.twitch import Chapter, ClipAccessToken, Video
def download(ids: List[str], args: DownloadOptions):
if not ids:
print_log("No IDs to downlad given")
return
for video_id in ids:
download_one(video_id, args)
@ -46,14 +50,14 @@ def download_one(video: str, args: DownloadOptions):
raise ConsoleError(f"Invalid input: {video}")
def _join_vods(playlist_path: str, target: str, overwrite: bool, video: Video):
def _join_vods(playlist_path: Path, target: Path, overwrite: bool, video: Video):
description = video["description"] or ""
description = description.strip()
command = [
command: List[str] = [
"ffmpeg",
"-i",
playlist_path,
str(playlist_path),
"-c",
"copy",
"-metadata",
@ -73,15 +77,15 @@ def _join_vods(playlist_path: str, target: str, overwrite: bool, video: Video):
if overwrite:
command.append("-y")
click.secho(f"{' '.join(command)}", dim=True)
click.secho(f"{shlex.join(command)}", dim=True)
result = subprocess.run(command)
if result.returncode != 0:
raise ConsoleError("Joining files failed")
def _concat_vods(vod_paths: List[str], target: str):
def _concat_vods(vod_paths: List[Path], target: Path):
tool = "type" if platform.system() == "Windows" else "cat"
command = [tool] + vod_paths
command = [tool] + [str(p) for p in vod_paths]
with open(target, "wb") as target_file:
result = subprocess.run(command, stdout=target_file)
@ -89,74 +93,15 @@ def _concat_vods(vod_paths: List[str], target: str):
raise ConsoleError(f"Joining files failed: {result.stderr}")
def get_video_placeholders(video: Video, format: str) -> Dict[str, str]:
date, time = video["publishedAt"].split("T")
game = video["game"]["name"] if video["game"] else "Unknown"
return {
"channel": video["creator"]["displayName"],
"channel_login": video["creator"]["login"],
"date": date,
"datetime": video["publishedAt"],
"format": format,
"game": game,
"game_slug": utils.slugify(game),
"id": video["id"],
"time": time,
"title": utils.titlify(video["title"]),
"title_slug": utils.slugify(video["title"]),
}
def _video_target_filename(video: Video, args: DownloadOptions):
subs = get_video_placeholders(video, args.format)
try:
return args.output.format(**subs)
except KeyError as e:
supported = ", ".join(subs.keys())
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")
def _clip_target_filename(clip: Clip, args: DownloadOptions):
date, time = clip["createdAt"].split("T")
game = clip["game"]["name"] if clip["game"] else "Unknown"
url = clip["videoQualities"][0]["sourceURL"]
_, ext = path.splitext(url)
ext = ext.lstrip(".")
subs = {
"channel": clip["broadcaster"]["displayName"],
"channel_login": clip["broadcaster"]["login"],
"date": date,
"datetime": clip["createdAt"],
"format": ext,
"game": game,
"game_slug": utils.slugify(game),
"id": clip["id"],
"slug": clip["slug"],
"time": time,
"title": utils.titlify(clip["title"]),
"title_slug": utils.slugify(clip["title"]),
}
try:
return args.output.format(**subs)
except KeyError as e:
supported = ", ".join(subs.keys())
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")
def _crete_temp_dir(base_uri: str) -> str:
def _crete_temp_dir(base_uri: str) -> Path:
"""Create a temp dir to store downloads if it doesn't exist."""
path = urlparse(base_uri).path.lstrip("/")
temp_dir = Path(tempfile.gettempdir(), "twitch-dl", path)
temp_dir.mkdir(parents=True, exist_ok=True)
return str(temp_dir)
return temp_dir
def _get_clip_url(access_token: ClipAccessToken, quality: str) -> str:
def _get_clip_url(access_token: ClipAccessToken, quality: Optional[str]) -> str:
qualities = access_token["videoQualities"]
# Quality given as an argument
@ -184,7 +129,7 @@ def _get_clip_url(access_token: ClipAccessToken, quality: str) -> str:
return selected_quality["sourceURL"]
def get_clip_authenticated_url(slug: str, quality: str):
def get_clip_authenticated_url(slug: str, quality: Optional[str]):
print_log("Fetching access token...")
access_token = twitch.get_clip_access_token(slug)
@ -216,10 +161,10 @@ def _download_clip(slug: str, args: DownloadOptions) -> None:
duration = utils.format_duration(clip["durationSeconds"])
click.echo(f"Found: {green(title)} by {yellow(user)}, playing {blue(game)} ({duration})")
target = _clip_target_filename(clip, args)
target = Path(clip_filename(clip, args.output))
click.echo(f"Target: {blue(target)}")
if not args.overwrite and path.exists(target):
if not args.overwrite and target.exists():
response = click.prompt("File exists. Overwrite? [Y/n]", default="Y", show_default=False)
if response.lower().strip() != "y":
raise click.Abort()
@ -248,10 +193,10 @@ def _download_video(video_id: str, args: DownloadOptions) -> None:
click.echo(f"Found: {blue(video['title'])} by {yellow(video['creator']['displayName'])}")
target = _video_target_filename(video, args)
target = Path(video_filename(video, args.format, args.output))
click.echo(f"Output: {blue(target)}")
if not args.overwrite and path.exists(target):
if not args.overwrite and target.exists():
response = click.prompt("File exists. Overwrite? [Y/n]", default="Y", show_default=False)
if response.lower().strip() != "y":
raise click.Abort()
@ -281,19 +226,32 @@ def _download_video(video_id: str, args: DownloadOptions) -> None:
target_dir = _crete_temp_dir(base_uri)
# Save playlists for debugging purposes
with open(path.join(target_dir, "playlists.m3u8"), "w") as f:
with open(target_dir / "playlists.m3u8", "w") as f:
f.write(playlists_text)
with open(path.join(target_dir, "playlist.m3u8"), "w") as f:
with open(target_dir / "playlist.m3u8", "w") as f:
f.write(vods_text)
click.echo(f"\nDownloading {len(vods)} VODs using {args.max_workers} workers to {target_dir}")
init_sections = get_init_sections(vods_m3u8)
for uri in init_sections:
print_log(f"Downloading init section {uri}...")
download_file(f"{base_uri}{uri}", target_dir / uri)
print_log(f"Downloading {len(vods)} VODs using {args.max_workers} workers to {target_dir}")
sources = [base_uri + vod.path for vod in vods]
targets = [os.path.join(target_dir, f"{vod.index:05d}.ts") for vod in vods]
asyncio.run(download_all(sources, targets, args.max_workers, rate_limit=args.rate_limit))
targets = [target_dir / f"{vod.index:05d}.ts" for vod in vods]
asyncio.run(
download_all(
zip(sources, targets),
args.max_workers,
rate_limit=args.rate_limit,
count=len(vods),
)
)
join_playlist = make_join_playlist(vods_m3u8, vods, targets)
join_playlist_path = path.join(target_dir, "playlist_downloaded.m3u8")
join_playlist_path = target_dir / "playlist_downloaded.m3u8"
join_playlist.dump(join_playlist_path) # type: ignore
click.echo()
@ -312,12 +270,12 @@ def _download_video(video_id: str, args: DownloadOptions) -> None:
click.echo()
if args.keep:
click.echo(f"Temporary files not deleted: {target_dir}")
click.echo(f"Temporary files not deleted: {yellow(target_dir)}")
else:
print_log("Deleting temporary files...")
shutil.rmtree(target_dir)
click.echo(f"\nDownloaded: {green(target)}")
click.echo(f"Downloaded: {green(target)}")
def http_get(url: str) -> str:

View File

@ -4,9 +4,9 @@ import click
import m3u8
from twitchdl import twitch, utils
from twitchdl.commands.download import get_video_placeholders
from twitchdl.exceptions import ConsoleError
from twitchdl.output import bold, print_clip, print_json, print_log, print_table, print_video
from twitchdl.naming import video_placeholders
from twitchdl.output import bold, dim, print_clip, print_json, print_log, print_table, print_video
from twitchdl.playlists import parse_playlists
from twitchdl.twitch import Chapter, Clip, Video
@ -55,9 +55,19 @@ def video_info(video: Video, playlists: str, chapters: List[Chapter]):
click.echo()
print_video(video)
click.echo("Playlists:")
for p in parse_playlists(playlists):
click.echo(f"{bold(p.name)} {p.url}")
click.echo("Playlists:\n")
playlist_headers = ["Name", "Group", "Resolution", "URL"]
playlist_data = [
[
f"{p.name} {dim('source')}" if p.is_source else p.name,
p.group_id,
f"{p.resolution}",
p.url,
]
for p in parse_playlists(playlists)
]
print_table(playlist_headers, playlist_data)
if chapters:
click.echo()
@ -67,7 +77,7 @@ def video_info(video: Video, playlists: str, chapters: List[Chapter]):
duration = utils.format_time(chapter["durationMilliseconds"] // 1000)
click.echo(f'{start} {bold(chapter["description"])} ({duration})')
placeholders = get_video_placeholders(video, format="mkv")
placeholders = video_placeholders(video, format="mkv")
placeholders = [[f"{{{k}}}", v] for k, v in placeholders.items()]
click.echo("")
print_table(["Placeholder", "Value"], placeholders)
@ -98,5 +108,8 @@ def clip_info(clip: Clip):
click.echo()
click.echo("Download links:")
for q in clip["videoQualities"]:
click.echo(f"{bold(q['quality'])} [{q['frameRate']} fps] {q['sourceURL']}")
if clip["videoQualities"]:
for q in clip["videoQualities"]:
click.echo(f"{bold(q['quality'])} [{q['frameRate']} fps] {q['sourceURL']}")
else:
click.echo("No download URLs found")

View File

@ -1,37 +0,0 @@
import os
import httpx
from twitchdl.exceptions import ConsoleError
CHUNK_SIZE = 1024
CONNECT_TIMEOUT = 5
RETRY_COUNT = 5
def _download(url: str, path: str):
tmp_path = path + ".tmp"
size = 0
with httpx.stream("GET", url, timeout=CONNECT_TIMEOUT) as response:
with open(tmp_path, "wb") as target:
for chunk in response.iter_bytes(chunk_size=CHUNK_SIZE):
target.write(chunk)
size += len(chunk)
os.rename(tmp_path, path)
return size
def download_file(url: str, path: str, retries: int = RETRY_COUNT):
if os.path.exists(path):
from_disk = True
return (os.path.getsize(path), from_disk)
from_disk = False
for _ in range(retries):
try:
return (_download(url, path), from_disk)
except httpx.RequestError:
pass
raise ConsoleError(f"Failed downloading after {retries} attempts: {url}")

View File

@ -1,5 +1,5 @@
from dataclasses import dataclass
from typing import Any, Mapping, Optional
from typing import Any, List, Literal, Mapping, Optional, TypedDict
@dataclass
@ -20,6 +20,73 @@ class DownloadOptions:
max_workers: int
ClipsPeriod = Literal["last_day", "last_week", "last_month", "all_time"]
VideosSort = Literal["views", "time"]
VideosType = Literal["archive", "highlight", "upload"]
class AccessToken(TypedDict):
signature: str
value: str
class User(TypedDict):
login: str
displayName: str
class Game(TypedDict):
id: str
name: str
class VideoQuality(TypedDict):
frameRate: str
quality: str
sourceURL: str
class ClipAccessToken(TypedDict):
id: str
playbackAccessToken: AccessToken
videoQualities: List[VideoQuality]
class Clip(TypedDict):
id: str
slug: str
title: str
createdAt: str
viewCount: int
durationSeconds: int
url: str
videoQualities: Optional[List[VideoQuality]]
game: Game
broadcaster: User
class Video(TypedDict):
id: str
title: str
description: str
publishedAt: str
broadcastType: str
lengthSeconds: int
game: Game
creator: User
class Chapter(TypedDict):
id: str
durationMilliseconds: int
positionMilliseconds: int
type: str
description: str
subDescription: str
thumbnailURL: str
game: Game
# Type for annotating decoded JSON
# TODO: make data classes for common structs
Data = Mapping[str, Any]

View File

@ -3,10 +3,12 @@ import logging
import os
import time
from abc import ABC, abstractmethod
from typing import List, Optional
from pathlib import Path
from typing import Iterable, NamedTuple, Optional, Tuple
import httpx
from twitchdl.exceptions import ConsoleError
from twitchdl.progress import Progress
logger = logging.getLogger(__name__)
@ -71,7 +73,7 @@ async def download(
client: httpx.AsyncClient,
task_id: int,
source: str,
target: str,
target: Path,
progress: Progress,
token_bucket: TokenBucket,
):
@ -93,52 +95,108 @@ async def download(
async def download_with_retries(
client: httpx.AsyncClient,
semaphore: asyncio.Semaphore,
task_id: int,
source: str,
target: str,
target: Path,
progress: Progress,
token_bucket: TokenBucket,
):
async with semaphore:
if os.path.exists(target):
size = os.path.getsize(target)
progress.already_downloaded(task_id, size)
return
if target.exists():
size = os.path.getsize(target)
progress.already_downloaded(task_id, size)
return
for n in range(RETRY_COUNT):
try:
return await download(client, task_id, source, target, progress, token_bucket)
except httpx.RequestError:
logger.exception("Task {task_id} failed. Retrying. Maybe.")
progress.abort(task_id)
if n + 1 >= RETRY_COUNT:
raise
for n in range(RETRY_COUNT):
try:
return await download(client, task_id, source, target, progress, token_bucket)
except httpx.RequestError:
logger.exception("Task {task_id} failed. Retrying. Maybe.")
progress.abort(task_id)
if n + 1 >= RETRY_COUNT:
raise
raise Exception("Should not happen")
raise Exception("Should not happen")
class QueueItem(NamedTuple):
task_id: int
url: str
target: Path
async def download_worker(
queue: asyncio.Queue[QueueItem],
client: httpx.AsyncClient,
progress: Progress,
token_bucket: TokenBucket,
):
while True:
item = await queue.get()
await download_with_retries(
client,
item.task_id,
item.url,
item.target,
progress,
token_bucket,
)
queue.task_done()
async def download_all(
sources: List[str],
targets: List[str],
workers: int,
source_targets: Iterable[Tuple[str, Path]],
worker_count: int,
*,
count: Optional[int] = None,
rate_limit: Optional[int] = None,
):
progress = Progress(len(sources))
progress = Progress(count)
token_bucket = LimitingTokenBucket(rate_limit) if rate_limit else EndlessTokenBucket()
queue: asyncio.Queue[QueueItem] = asyncio.Queue()
async with httpx.AsyncClient(timeout=TIMEOUT) as client:
semaphore = asyncio.Semaphore(workers)
tasks = [
download_with_retries(
client,
semaphore,
task_id,
source,
target,
progress,
token_bucket,
)
for task_id, (source, target) in enumerate(zip(sources, targets))
asyncio.create_task(download_worker(queue, client, progress, token_bucket))
for _ in range(worker_count)
]
await asyncio.gather(*tasks)
for index, (source, target) in enumerate(source_targets):
await queue.put(QueueItem(index, source, target))
# Wait for queue to deplete
await queue.join()
# Cancel tasks and wait until they are cancelled
for task in tasks:
task.cancel()
await asyncio.gather(*tasks, return_exceptions=True)
def download_file(url: str, target: Path, retries: int = RETRY_COUNT) -> None:
"""Download URL to given target path with retries"""
error_message = ""
for r in range(retries):
try:
retry_info = f" (retry {r})" if r > 0 else ""
logger.info(f"Downloading {url} to {target}{retry_info}")
return _do_download_file(url, target)
except httpx.HTTPStatusError as ex:
logger.error(ex)
error_message = f"Server responded with HTTP {ex.response.status_code}"
except httpx.RequestError as ex:
logger.error(ex)
error_message = str(ex)
raise ConsoleError(f"Failed downloading after {retries} attempts: {error_message}")
def _do_download_file(url: str, target: Path) -> None:
tmp_path = Path(str(target) + ".tmp")
with httpx.stream("GET", url, timeout=TIMEOUT, follow_redirects=True) as response:
response.raise_for_status()
with open(tmp_path, "wb") as f:
for chunk in response.iter_bytes(chunk_size=CHUNK_SIZE):
f.write(chunk)
os.rename(tmp_path, target)

72
twitchdl/naming.py Normal file
View File

@ -0,0 +1,72 @@
import os
from typing import Dict
from twitchdl import utils
from twitchdl.entities import Clip, Video
from twitchdl.exceptions import ConsoleError
DEFAULT_OUTPUT_TEMPLATE = "{date}_{id}_{channel_login}_{title_slug}.{format}"
def video_filename(video: Video, format: str, output: str) -> str:
subs = video_placeholders(video, format)
return _format(output, subs)
def video_placeholders(video: Video, format: str) -> Dict[str, str]:
date, time = video["publishedAt"].split("T")
game = video["game"]["name"] if video["game"] else "Unknown"
return {
"channel": video["creator"]["displayName"],
"channel_login": video["creator"]["login"],
"date": date,
"datetime": video["publishedAt"],
"format": format,
"game": game,
"game_slug": utils.slugify(game),
"id": video["id"],
"time": time,
"title": utils.titlify(video["title"]),
"title_slug": utils.slugify(video["title"]),
}
def clip_filename(clip: Clip, output: str):
subs = clip_placeholders(clip)
return _format(output, subs)
def clip_placeholders(clip: Clip) -> Dict[str, str]:
date, time = clip["createdAt"].split("T")
game = clip["game"]["name"] if clip["game"] else "Unknown"
if clip["videoQualities"]:
url = clip["videoQualities"][0]["sourceURL"]
_, ext = os.path.splitext(url)
ext = ext.lstrip(".")
else:
ext = "mp4"
return {
"channel": clip["broadcaster"]["displayName"],
"channel_login": clip["broadcaster"]["login"],
"date": date,
"datetime": clip["createdAt"],
"format": ext,
"game": game,
"game_slug": utils.slugify(game),
"id": clip["id"],
"slug": clip["slug"],
"time": time,
"title": utils.titlify(clip["title"]),
"title_slug": utils.slugify(clip["title"]),
}
def _format(output: str, subs: Dict[str, str]) -> str:
try:
return output.format(**subs)
except KeyError as e:
supported = ", ".join(subs.keys())
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")

View File

@ -1,15 +1,21 @@
import json
import sys
from itertools import islice
from typing import Any, Callable, Generator, List, Optional, TypeVar
import click
from twitchdl import utils
from twitchdl.twitch import Clip, Video
from twitchdl.entities import Clip, Video
T = TypeVar("T")
def clear_line():
sys.stdout.write("\033[1K")
sys.stdout.write("\r")
def truncate(string: str, length: int) -> str:
if len(string) > length:
return string[: length - 1] + ""
@ -40,11 +46,8 @@ def print_table(headers: List[str], data: List[List[str]]):
underlines = ["-" * width for width in widths]
def print_row(row: List[str]):
for idx, cell in enumerate(row):
width = widths[idx]
click.echo(ljust(cell, width), nl=False)
click.echo(" ", nl=False)
click.echo()
parts = (ljust(cell, widths[idx]) for idx, cell in enumerate(row))
click.echo(" ".join(parts).strip())
print_row(headers)
print_row(underlines)
@ -102,11 +105,12 @@ def print_video(video: Video):
if channel or playing:
click.echo(" ".join([channel, playing]))
if video["description"]:
click.echo(f"Description: {video['description']}")
click.echo(f"Published {blue(published_at)} Length: {blue(length)} ")
click.secho(url, italic=True)
if video["description"]:
click.echo(f"\nDescription:\n{video['description']}")
click.echo()

View File

@ -3,7 +3,8 @@ Parse and manipulate m3u8 playlists.
"""
from dataclasses import dataclass
from typing import Generator, List, Optional, OrderedDict
from pathlib import Path
from typing import Generator, List, Optional, OrderedDict, Set
import click
import m3u8
@ -81,7 +82,7 @@ def enumerate_vods(
def make_join_playlist(
playlist: m3u8.M3U8,
vods: List[Vod],
targets: List[str],
targets: List[Path],
) -> m3u8.Playlist:
"""
Make a modified playlist which references downloaded VODs
@ -93,7 +94,7 @@ def make_join_playlist(
playlist.segments.clear()
for segment in org_segments:
if segment.uri in path_map:
segment.uri = path_map[segment.uri]
segment.uri = str(path_map[segment.uri].name)
playlist.segments.append(segment)
return playlist
@ -168,3 +169,12 @@ def _playlist_key(playlist: Playlist) -> int:
pass
return MAX
def get_init_sections(playlist: m3u8.M3U8) -> Set[str]:
# TODO: we're ignoring initi_section.base_uri and bytes
return set(
segment.init_section.uri
for segment in playlist.segments
if segment.init_section is not None
)

View File

@ -1,13 +1,13 @@
import logging
import time
from collections import deque
from dataclasses import dataclass, field
from dataclasses import dataclass
from statistics import mean
from typing import Deque, Dict, NamedTuple, Optional
import click
from twitchdl.output import blue
from twitchdl.output import blue, clear_line
from twitchdl.utils import format_size, format_time
logger = logging.getLogger(__name__)
@ -31,28 +31,25 @@ class Sample(NamedTuple):
timestamp: float
@dataclass
class Progress:
vod_count: int
downloaded: int = 0
estimated_total: Optional[int] = None
last_printed: float = field(default_factory=time.time)
progress_bytes: int = 0
progress_perc: int = 0
remaining_time: Optional[int] = None
speed: Optional[float] = None
start_time: float = field(default_factory=time.time)
tasks: Dict[TaskId, Task] = field(default_factory=dict)
vod_downloaded_count: int = 0
samples: Deque[Sample] = field(default_factory=lambda: deque(maxlen=100))
def __init__(self, file_count: Optional[int] = None):
self.downloaded: int = 0
self.estimated_total: Optional[int] = None
self.last_printed: Optional[float] = None
self.progress_bytes: int = 0
self.progress_perc: int = 0
self.remaining_time: Optional[int] = None
self.samples: Deque[Sample] = deque(maxlen=1000)
self.speed: Optional[float] = None
self.tasks: Dict[TaskId, Task] = {}
self.file_count = file_count
self.downloaded_count: int = 0
def start(self, task_id: int, size: int):
if task_id in self.tasks:
raise ValueError(f"Task {task_id}: cannot start, already started")
self.tasks[task_id] = Task(task_id, size)
self._calculate_total()
self._calculate_progress()
self.print()
def advance(self, task_id: int, size: int):
@ -63,7 +60,6 @@ class Progress:
self.progress_bytes += size
self.tasks[task_id].advance(size)
self.samples.append(Sample(self.downloaded, time.time()))
self._calculate_progress()
self.print()
def already_downloaded(self, task_id: int, size: int):
@ -72,9 +68,7 @@ class Progress:
self.tasks[task_id] = Task(task_id, size)
self.progress_bytes += size
self.vod_downloaded_count += 1
self._calculate_total()
self._calculate_progress()
self.downloaded_count += 1
self.print()
def abort(self, task_id: int):
@ -83,9 +77,6 @@ class Progress:
del self.tasks[task_id]
self.progress_bytes = sum(t.downloaded for t in self.tasks.values())
self._calculate_total()
self._calculate_progress()
self.print()
def end(self, task_id: int):
@ -98,15 +89,15 @@ class Progress:
f"Taks {task_id} ended with {task.downloaded}b downloaded, expected {task.size}b."
)
self.vod_downloaded_count += 1
self.downloaded_count += 1
self.print()
def _calculate_total(self):
self.estimated_total = (
int(mean(t.size for t in self.tasks.values()) * self.vod_count) if self.tasks else None
)
def _recalculate(self):
if self.tasks and self.file_count:
self.estimated_total = int(mean(t.size for t in self.tasks.values()) * self.file_count)
else:
self.estimated_total = None
def _calculate_progress(self):
self.speed = self._calculate_speed()
self.progress_perc = (
int(100 * self.progress_bytes / self.estimated_total) if self.estimated_total else 0
@ -133,10 +124,14 @@ class Progress:
now = time.time()
# Don't print more often than 10 times per second
if now - self.last_printed < 0.1:
if self.last_printed and now - self.last_printed < 0.1:
return
click.echo(f"\rDownloaded {self.vod_downloaded_count}/{self.vod_count} VODs", nl=False)
self._recalculate()
clear_line()
total_label = f"/{self.file_count}" if self.file_count else ""
click.echo(f"Downloaded {self.downloaded_count}{total_label} VODs", nl=False)
click.secho(f" {self.progress_perc}%", fg="blue", nl=False)
if self.estimated_total is not None:
@ -150,6 +145,4 @@ class Progress:
if self.remaining_time is not None:
click.echo(f" ETA {blue(format_time(self.remaining_time))}", nl=False)
click.echo(" ", nl=False)
self.last_printed = now

View File

@ -2,83 +2,28 @@
Twitch API access.
"""
import json
import logging
import random
import time
from typing import Any, Dict, Generator, List, Literal, Mapping, Optional, Tuple, TypedDict, Union
from typing import Any, Dict, Generator, List, Mapping, Optional, Tuple, Union
import click
import httpx
from twitchdl import CLIENT_ID
from twitchdl.entities import Data
from twitchdl.entities import (
AccessToken,
Chapter,
Clip,
ClipAccessToken,
ClipsPeriod,
Data,
Video,
VideosSort,
VideosType,
)
from twitchdl.exceptions import ConsoleError
ClipsPeriod = Literal["last_day", "last_week", "last_month", "all_time"]
VideosSort = Literal["views", "time"]
VideosType = Literal["archive", "highlight", "upload"]
class AccessToken(TypedDict):
signature: str
value: str
class User(TypedDict):
login: str
displayName: str
class Game(TypedDict):
id: str
name: str
class VideoQuality(TypedDict):
frameRate: str
quality: str
sourceURL: str
class ClipAccessToken(TypedDict):
id: str
playbackAccessToken: AccessToken
videoQualities: List[VideoQuality]
class Clip(TypedDict):
id: str
slug: str
title: str
createdAt: str
viewCount: int
durationSeconds: int
url: str
videoQualities: List[VideoQuality]
game: Game
broadcaster: User
class Video(TypedDict):
id: str
title: str
description: str
publishedAt: str
broadcastType: str
lengthSeconds: int
game: Game
creator: User
class Chapter(TypedDict):
id: str
durationMilliseconds: int
positionMilliseconds: int
type: str
description: str
subDescription: str
thumbnailURL: str
game: Game
from twitchdl.utils import format_size
class GQLError(click.ClickException):
@ -135,22 +80,23 @@ logger = logging.getLogger(__name__)
def log_request(request: httpx.Request):
logger.debug(f"--> {request.method} {request.url}")
logger.info(f"--> {request.method} {request.url}")
if request.content:
logger.debug(f"--> {request.content}")
def log_response(response: httpx.Response, duration: float):
def log_response(response: httpx.Response, duration_seconds: float):
request = response.request
duration_ms = int(1000 * duration)
logger.debug(f"<-- {request.method} {request.url} HTTP {response.status_code} {duration_ms}ms")
duration = f"{int(1000 * duration_seconds)}ms"
size = format_size(len(response.content))
logger.info(f"<-- {request.method} {request.url} HTTP {response.status_code} {duration} {size}")
if response.content:
logger.debug(f"<-- {response.content}")
def gql_post(query: str):
def gql_persisted_query(query: Data):
url = "https://gql.twitch.tv/gql"
response = authenticated_post(url, content=query)
response = authenticated_post(url, json=query)
gql_raise_on_error(response)
return response.json()
@ -238,22 +184,18 @@ def get_clip(slug: str) -> Optional[Clip]:
def get_clip_access_token(slug: str) -> ClipAccessToken:
query = f"""
{{
query = {
"operationName": "VideoAccessToken_Clip",
"variables": {{
"slug": "{slug}"
}},
"extensions": {{
"persistedQuery": {{
"variables": {"slug": slug},
"extensions": {
"persistedQuery": {
"version": 1,
"sha256Hash": "36b89d2507fce29e5ca551df756d27c1cfe079e2609642b4390aa4c35796eb11"
}}
}}
}}
"""
"sha256Hash": "36b89d2507fce29e5ca551df756d27c1cfe079e2609642b4390aa4c35796eb11",
}
},
}
response = gql_post(query.strip())
response = gql_persisted_query(query)
return response["data"]["clip"]
@ -325,23 +267,6 @@ def channel_clips_generator(
return _generator(clips, limit)
def channel_clips_generator_old(channel_id: str, period: ClipsPeriod, limit: int):
cursor = ""
while True:
clips = get_channel_clips(channel_id, period, limit, after=cursor)
if not clips["edges"]:
break
has_next = clips["pageInfo"]["hasNextPage"]
cursor = clips["edges"][-1]["cursor"] if has_next else None
yield clips, has_next
if not cursor:
break
def get_channel_videos(
channel_id: str,
limit: int,
@ -422,7 +347,7 @@ def get_access_token(video_id: str, auth_token: Optional[str] = None) -> AccessT
query = f"""
{{
videoPlaybackAccessToken(
id: {video_id},
id: "{video_id}",
params: {{
platform: "web",
playerBackend: "mediaplayer",
@ -467,8 +392,12 @@ def get_playlists(video_id: str, access_token: AccessToken) -> str:
"allow_audio_only": "true",
"allow_source": "true",
"player": "twitchweb",
"platform": "web",
"supported_codecs": "av1,h265,h264",
"p": random.randint(1000000, 10000000),
},
)
response.raise_for_status()
return response.content.decode("utf-8")
@ -503,7 +432,7 @@ def get_video_chapters(video_id: str) -> List[Chapter]:
},
}
response = gql_post(json.dumps(query))
response = gql_persisted_query(query)
return list(_chapter_nodes(response["data"]["video"]["moments"]))

View File

@ -85,6 +85,7 @@ def titlify(value: str) -> str:
VIDEO_PATTERNS = [
r"^(?P<id>\d+)?$",
r"^https://(www\.|m\.)?twitch\.tv/videos/(?P<id>\d+)(\?.+)?$",
r"^https://(www\.|m\.)?twitch\.tv/\w+/video/(?P<id>\d+)(\?.+)?$",
]
CLIP_PATTERNS = [