Compare commits

..

24 Commits
2.3.1 ... 2.4.0

Author SHA1 Message Date
d85aeade86 Update changelog 2024-08-30 08:25:52 +02:00
4db4285d9a Handle videoQualities being null 2024-08-30 08:06:31 +02:00
707b2da934 Improve types 2024-08-30 08:05:16 +02:00
2846925c91 Allow newer versions of m3u8 2024-08-30 08:05:03 +02:00
8f5d7f022f Reinstate "VODs" in progress output 2024-08-30 07:59:51 +02:00
ff4a514b8a Handle video URLs containing account name
fixes #162
2024-08-30 07:55:58 +02:00
f57612ffcc Refactor download_all to work when count is unknown 2024-08-30 07:37:50 +02:00
5be97b9e13 Fix tests 2024-08-28 15:21:22 +02:00
b6e7f8b36c Make Progress work when file count is not known 2024-08-28 15:18:43 +02:00
42f7a9a1a5 Fix a crash when downloading clips 2024-08-28 14:21:22 +02:00
789d3d1939 Add --target-dir option to clips command 2024-08-28 13:15:01 +02:00
07efac1ae7 Add --verbose flag for verbose logging
Don't log HTTP payloads unless --verbose flag is given. Always logging
HTTP payloads tends to make the log unreadable.
2024-08-28 12:58:36 +02:00
a808b7d8ec Don't check if file exists in download_file
This is done outside the function.
2024-08-28 12:39:56 +02:00
936c6a9da1 Don't stop downloading if one download fails 2024-08-28 12:33:05 +02:00
7184feacee Move download_file to http 2024-08-28 11:07:25 +02:00
5679e66270 Improve naming 2024-08-28 11:02:12 +02:00
1658aba124 Embrace pathlib 2024-08-28 10:59:23 +02:00
29d3f7fec2 Extract file naming 2024-08-28 10:32:40 +02:00
91e0447681 Remove unnecessary braces 2024-08-28 09:53:06 +02:00
f9e60082ba Improve downloading logic
Follow redirects to avoid saving an intermediate response, and raise an
error on invalid status to avoid saving an error response.
2024-08-28 09:52:15 +02:00
141ecb7f29 Move entities to entites.py 2024-08-24 20:23:34 +02:00
52b96aab15 Simplify persisted queries 2024-08-24 20:08:01 +02:00
1bdf6a2c02 Remove unused function 2024-08-24 20:05:32 +02:00
38636e2b21 Use ansi escape code to clear line 2024-05-31 12:57:51 +02:00
20 changed files with 357 additions and 275 deletions

View File

@ -3,6 +3,14 @@ twitch-dl changelog
<!-- Do not edit. This file is automatically generated from changelog.yaml.-->
### [2.4.0 (2024-08-30)](https://github.com/ihabunek/twitch-dl/releases/tag/2.4.0)
* Add `clips --target-dir` option. Use in conjunction with `--download` to
specify target directory.
* Fix a crash when downloading clips (#160)
* Handle video URLs which contain the channel name (#162)
* Don't stop downloading clips if one download fails
### [2.3.1 (2024-05-19)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.1)
* Fix fetching access token (#155, thanks @KryptonicDragon)

View File

@ -1,3 +1,11 @@
2.4.0:
date: 2024-08-30
changes:
- "Add `clips --target-dir` option. Use in conjunction with `--download` to specify target directory."
- "Fix a crash when downloading clips (#160)"
- "Handle video URLs which contain the channel name (#162)"
- "Don't stop downloading clips if one download fails"
2.3.1:
date: 2024-05-19
changes:

View File

@ -3,6 +3,14 @@ twitch-dl changelog
<!-- Do not edit. This file is automatically generated from changelog.yaml.-->
### [2.4.0 (2024-08-30)](https://github.com/ihabunek/twitch-dl/releases/tag/2.4.0)
* Add `clips --target-dir` option. Use in conjunction with `--download` to
specify target directory.
* Fix a crash when downloading clips (#160)
* Handle video URLs which contain the channel name (#162)
* Don't stop downloading clips if one download fails
### [2.3.1 (2024-05-19)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.1)
* Fix fetching access token (#155, thanks @KryptonicDragon)

View File

@ -43,6 +43,11 @@ twitch-dl clips [OPTIONS] CHANNEL_NAME
<td>Period from which to return clips Possible values: <code>last_day</code>, <code>last_week</code>, <code>last_month</code>, <code>all_time</code>. [default: <code>all_time</code>]</td>
</tr>
<tr>
<td class="code">-t, --target-dir</td>
<td>Target directory when downloading clips [default: <code>.</code>]</td>
</tr>
<tr>
<td class="code">--json</td>
<td>Print data as JSON rather than human readable text</td>

View File

@ -22,7 +22,7 @@ classifiers = [
dependencies = [
"click>=8.0.0,<9.0.0",
"httpx>=0.17.0,<1.0.0",
"m3u8>=3.0.0,<5.0.0",
"m3u8>=3.0.0,<7.0.0",
]
[tool.setuptools]

View File

@ -7,6 +7,7 @@ TEST_VIDEO_PATTERNS = [
("702689313", "https://twitch.tv/videos/702689313"),
("702689313", "https://www.twitch.tv/videos/702689313"),
("702689313", "https://m.twitch.tv/videos/702689313"),
("2223719525", "https://www.twitch.tv/r0dn3y/video/2223719525"),
]
TEST_CLIP_PATTERNS = {

View File

@ -8,8 +8,8 @@ def test_initial_values():
assert progress.progress_perc == 0
assert progress.remaining_time is None
assert progress.speed is None
assert progress.vod_count == 10
assert progress.vod_downloaded_count == 0
assert progress.file_count == 10
assert progress.downloaded_count == 0
def test_downloaded():
@ -96,16 +96,16 @@ def test_vod_downloaded_count():
progress.start(2, 100)
progress.start(3, 100)
assert progress.vod_downloaded_count == 0
assert progress.downloaded_count == 0
progress.advance(1, 100)
progress.end(1)
assert progress.vod_downloaded_count == 1
assert progress.downloaded_count == 1
progress.advance(2, 100)
progress.end(2)
assert progress.vod_downloaded_count == 2
assert progress.downloaded_count == 2
progress.advance(3, 100)
progress.end(3)
assert progress.vod_downloaded_count == 3
assert progress.downloaded_count == 3

View File

@ -2,12 +2,14 @@ import logging
import platform
import re
import sys
from pathlib import Path
from typing import Optional, Tuple
import click
from twitchdl import __version__
from twitchdl.entities import DownloadOptions
from twitchdl.naming import DEFAULT_OUTPUT_TEMPLATE
from twitchdl.twitch import ClipsPeriod, VideosSort, VideosType
# Tweak the Click context
@ -79,11 +81,12 @@ def validate_rate(_ctx: click.Context, _param: click.Parameter, value: str) -> O
@click.group(context_settings=CONTEXT)
@click.option("--debug/--no-debug", default=False, help="Log debug info to stderr")
@click.option("--debug/--no-debug", default=False, help="Enable debug logging to stderr")
@click.option("--verbose/--no-verbose", default=False, help="More verbose debug logging")
@click.option("--color/--no-color", default=sys.stdout.isatty(), help="Use ANSI color in output")
@click.version_option(package_name="twitch-dl")
@click.pass_context
def cli(ctx: click.Context, color: bool, debug: bool):
def cli(ctx: click.Context, color: bool, debug: bool, verbose: bool):
"""twitch-dl - twitch.tv downloader
https://twitch-dl.bezdomni.net/
@ -91,7 +94,7 @@ def cli(ctx: click.Context, color: bool, debug: bool):
ctx.color = color
if debug:
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO)
logging.getLogger("httpx").setLevel(logging.WARN)
logging.getLogger("httpcore").setLevel(logging.WARN)
@ -139,6 +142,18 @@ def cli(ctx: click.Context, color: bool, debug: bool):
default="all_time",
type=click.Choice(["last_day", "last_week", "last_month", "all_time"]),
)
@click.option(
"-t",
"--target-dir",
help="Target directory when downloading clips",
type=click.Path(
file_okay=False,
readable=False,
writable=True,
path_type=Path,
),
default=Path(),
)
@json_option
def clips(
channel_name: str,
@ -149,10 +164,14 @@ def clips(
limit: Optional[int],
pager: Optional[int],
period: ClipsPeriod,
target_dir: Path,
):
"""List or download clips for given CHANNEL_NAME."""
from twitchdl.commands.clips import clips
if not target_dir.exists():
target_dir.mkdir(parents=True, exist_ok=True)
clips(
channel_name,
all=all,
@ -162,6 +181,7 @@ def clips(
limit=limit,
pager=pager,
period=period,
target_dir=target_dir,
)
@ -229,7 +249,7 @@ def clips(
"-o",
"--output",
help="Output file name template. See docs for details.",
default="{date}_{id}_{channel_login}_{title_slug}.{format}",
default=DEFAULT_OUTPUT_TEMPLATE,
)
@click.option(
"-q",

View File

@ -1,13 +1,15 @@
import re
import sys
from os import path
from typing import Callable, Generator, Optional
from pathlib import Path
from typing import Callable, Generator, List, Optional
import click
from twitchdl import twitch, utils
from twitchdl.commands.download import get_clip_authenticated_url
from twitchdl.download import download_file
from twitchdl.entities import VideoQuality
from twitchdl.http import download_file
from twitchdl.output import green, print_clip, print_clip_compact, print_json, print_paged, yellow
from twitchdl.twitch import Clip, ClipsPeriod
@ -22,6 +24,7 @@ def clips(
limit: Optional[int] = None,
pager: Optional[int] = None,
period: ClipsPeriod = "all_time",
target_dir: Path = Path(),
):
# Set different defaults for limit for compact display
default_limit = 40 if compact else 10
@ -35,7 +38,7 @@ def clips(
return print_json(list(generator))
if download:
return _download_clips(generator)
return _download_clips(target_dir, generator)
print_fn = print_clip_compact if compact else print_clip
@ -45,8 +48,8 @@ def clips(
return _print_all(generator, print_fn, all)
def _target_filename(clip: Clip):
url = clip["videoQualities"][0]["sourceURL"]
def _target_filename(clip: Clip, video_qualities: List[VideoQuality]):
url = video_qualities[0]["sourceURL"]
_, ext = path.splitext(url)
ext = ext.lstrip(".")
@ -67,16 +70,27 @@ def _target_filename(clip: Clip):
return f"{name}.{ext}"
def _download_clips(generator: Generator[Clip, None, None]):
for clip in generator:
target = _target_filename(clip)
def _download_clips(target_dir: Path, generator: Generator[Clip, None, None]):
if not target_dir.exists():
target_dir.mkdir(parents=True, exist_ok=True)
if path.exists(target):
for clip in generator:
# videoQualities can be null in some circumstances, see:
# https://github.com/ihabunek/twitch-dl/issues/160
if not clip["videoQualities"]:
continue
target = target_dir / _target_filename(clip, clip["videoQualities"])
if target.exists():
click.echo(f"Already downloaded: {green(target)}")
else:
url = get_clip_authenticated_url(clip["slug"], "source")
click.echo(f"Downloading: {yellow(target)}")
download_file(url, target)
try:
url = get_clip_authenticated_url(clip["slug"], "source")
click.echo(f"Downloading: {yellow(target)}")
download_file(url, target)
except Exception as ex:
click.secho(ex, err=True, fg="red")
def _print_all(

View File

@ -1,23 +1,21 @@
import asyncio
import os
import platform
import re
import shutil
import subprocess
import tempfile
from os import path
from pathlib import Path
from typing import Dict, List, Optional
from typing import List, Optional
from urllib.parse import urlencode, urlparse
import click
import httpx
from twitchdl import twitch, utils
from twitchdl.download import download_file
from twitchdl.entities import DownloadOptions
from twitchdl.exceptions import ConsoleError
from twitchdl.http import download_all
from twitchdl.http import download_all, download_file
from twitchdl.naming import clip_filename, video_filename
from twitchdl.output import blue, bold, green, print_log, yellow
from twitchdl.playlists import (
enumerate_vods,
@ -26,7 +24,7 @@ from twitchdl.playlists import (
parse_playlists,
select_playlist,
)
from twitchdl.twitch import Chapter, Clip, ClipAccessToken, Video
from twitchdl.twitch import Chapter, ClipAccessToken, Video
def download(ids: List[str], args: DownloadOptions):
@ -50,14 +48,14 @@ def download_one(video: str, args: DownloadOptions):
raise ConsoleError(f"Invalid input: {video}")
def _join_vods(playlist_path: str, target: str, overwrite: bool, video: Video):
def _join_vods(playlist_path: Path, target: Path, overwrite: bool, video: Video):
description = video["description"] or ""
description = description.strip()
command = [
command: List[str] = [
"ffmpeg",
"-i",
playlist_path,
str(playlist_path),
"-c",
"copy",
"-metadata",
@ -83,9 +81,9 @@ def _join_vods(playlist_path: str, target: str, overwrite: bool, video: Video):
raise ConsoleError("Joining files failed")
def _concat_vods(vod_paths: List[str], target: str):
def _concat_vods(vod_paths: List[Path], target: Path):
tool = "type" if platform.system() == "Windows" else "cat"
command = [tool] + vod_paths
command = [tool] + [str(p) for p in vod_paths]
with open(target, "wb") as target_file:
result = subprocess.run(command, stdout=target_file)
@ -93,71 +91,12 @@ def _concat_vods(vod_paths: List[str], target: str):
raise ConsoleError(f"Joining files failed: {result.stderr}")
def get_video_placeholders(video: Video, format: str) -> Dict[str, str]:
date, time = video["publishedAt"].split("T")
game = video["game"]["name"] if video["game"] else "Unknown"
return {
"channel": video["creator"]["displayName"],
"channel_login": video["creator"]["login"],
"date": date,
"datetime": video["publishedAt"],
"format": format,
"game": game,
"game_slug": utils.slugify(game),
"id": video["id"],
"time": time,
"title": utils.titlify(video["title"]),
"title_slug": utils.slugify(video["title"]),
}
def _video_target_filename(video: Video, args: DownloadOptions):
subs = get_video_placeholders(video, args.format)
try:
return args.output.format(**subs)
except KeyError as e:
supported = ", ".join(subs.keys())
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")
def _clip_target_filename(clip: Clip, args: DownloadOptions):
date, time = clip["createdAt"].split("T")
game = clip["game"]["name"] if clip["game"] else "Unknown"
url = clip["videoQualities"][0]["sourceURL"]
_, ext = path.splitext(url)
ext = ext.lstrip(".")
subs = {
"channel": clip["broadcaster"]["displayName"],
"channel_login": clip["broadcaster"]["login"],
"date": date,
"datetime": clip["createdAt"],
"format": ext,
"game": game,
"game_slug": utils.slugify(game),
"id": clip["id"],
"slug": clip["slug"],
"time": time,
"title": utils.titlify(clip["title"]),
"title_slug": utils.slugify(clip["title"]),
}
try:
return args.output.format(**subs)
except KeyError as e:
supported = ", ".join(subs.keys())
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")
def _crete_temp_dir(base_uri: str) -> str:
def _crete_temp_dir(base_uri: str) -> Path:
"""Create a temp dir to store downloads if it doesn't exist."""
path = urlparse(base_uri).path.lstrip("/")
temp_dir = Path(tempfile.gettempdir(), "twitch-dl", path)
temp_dir.mkdir(parents=True, exist_ok=True)
return str(temp_dir)
return temp_dir
def _get_clip_url(access_token: ClipAccessToken, quality: Optional[str]) -> str:
@ -220,10 +159,10 @@ def _download_clip(slug: str, args: DownloadOptions) -> None:
duration = utils.format_duration(clip["durationSeconds"])
click.echo(f"Found: {green(title)} by {yellow(user)}, playing {blue(game)} ({duration})")
target = _clip_target_filename(clip, args)
target = Path(clip_filename(clip, args.output))
click.echo(f"Target: {blue(target)}")
if not args.overwrite and path.exists(target):
if not args.overwrite and target.exists():
response = click.prompt("File exists. Overwrite? [Y/n]", default="Y", show_default=False)
if response.lower().strip() != "y":
raise click.Abort()
@ -252,10 +191,10 @@ def _download_video(video_id: str, args: DownloadOptions) -> None:
click.echo(f"Found: {blue(video['title'])} by {yellow(video['creator']['displayName'])}")
target = _video_target_filename(video, args)
target = Path(video_filename(video, args.format, args.output))
click.echo(f"Output: {blue(target)}")
if not args.overwrite and path.exists(target):
if not args.overwrite and target.exists():
response = click.prompt("File exists. Overwrite? [Y/n]", default="Y", show_default=False)
if response.lower().strip() != "y":
raise click.Abort()
@ -285,19 +224,27 @@ def _download_video(video_id: str, args: DownloadOptions) -> None:
target_dir = _crete_temp_dir(base_uri)
# Save playlists for debugging purposes
with open(path.join(target_dir, "playlists.m3u8"), "w") as f:
with open(target_dir / "playlists.m3u8", "w") as f:
f.write(playlists_text)
with open(path.join(target_dir, "playlist.m3u8"), "w") as f:
with open(target_dir / "playlist.m3u8", "w") as f:
f.write(vods_text)
click.echo(f"\nDownloading {len(vods)} VODs using {args.max_workers} workers to {target_dir}")
sources = [base_uri + vod.path for vod in vods]
targets = [os.path.join(target_dir, f"{vod.index:05d}.ts") for vod in vods]
asyncio.run(download_all(sources, targets, args.max_workers, rate_limit=args.rate_limit))
targets = [target_dir / f"{vod.index:05d}.ts" for vod in vods]
asyncio.run(
download_all(
zip(sources, targets),
args.max_workers,
rate_limit=args.rate_limit,
count=len(vods),
)
)
join_playlist = make_join_playlist(vods_m3u8, vods, targets)
join_playlist_path = path.join(target_dir, "playlist_downloaded.m3u8")
join_playlist_path = target_dir / "playlist_downloaded.m3u8"
join_playlist.dump(join_playlist_path) # type: ignore
click.echo()

View File

@ -4,8 +4,8 @@ import click
import m3u8
from twitchdl import twitch, utils
from twitchdl.commands.download import get_video_placeholders
from twitchdl.exceptions import ConsoleError
from twitchdl.naming import video_placeholders
from twitchdl.output import bold, print_clip, print_json, print_log, print_table, print_video
from twitchdl.playlists import parse_playlists
from twitchdl.twitch import Chapter, Clip, Video
@ -67,7 +67,7 @@ def video_info(video: Video, playlists: str, chapters: List[Chapter]):
duration = utils.format_time(chapter["durationMilliseconds"] // 1000)
click.echo(f'{start} {bold(chapter["description"])} ({duration})')
placeholders = get_video_placeholders(video, format="mkv")
placeholders = video_placeholders(video, format="mkv")
placeholders = [[f"{{{k}}}", v] for k, v in placeholders.items()]
click.echo("")
print_table(["Placeholder", "Value"], placeholders)
@ -98,5 +98,8 @@ def clip_info(clip: Clip):
click.echo()
click.echo("Download links:")
for q in clip["videoQualities"]:
click.echo(f"{bold(q['quality'])} [{q['frameRate']} fps] {q['sourceURL']}")
if clip["videoQualities"]:
for q in clip["videoQualities"]:
click.echo(f"{bold(q['quality'])} [{q['frameRate']} fps] {q['sourceURL']}")
else:
click.echo("No download URLs found")

View File

@ -1,37 +0,0 @@
import os
import httpx
from twitchdl.exceptions import ConsoleError
CHUNK_SIZE = 1024
CONNECT_TIMEOUT = 5
RETRY_COUNT = 5
def _download(url: str, path: str):
tmp_path = path + ".tmp"
size = 0
with httpx.stream("GET", url, timeout=CONNECT_TIMEOUT) as response:
with open(tmp_path, "wb") as target:
for chunk in response.iter_bytes(chunk_size=CHUNK_SIZE):
target.write(chunk)
size += len(chunk)
os.rename(tmp_path, path)
return size
def download_file(url: str, path: str, retries: int = RETRY_COUNT):
if os.path.exists(path):
from_disk = True
return (os.path.getsize(path), from_disk)
from_disk = False
for _ in range(retries):
try:
return (_download(url, path), from_disk)
except httpx.RequestError:
pass
raise ConsoleError(f"Failed downloading after {retries} attempts: {url}")

View File

@ -1,5 +1,5 @@
from dataclasses import dataclass
from typing import Any, Mapping, Optional
from typing import Any, List, Literal, Mapping, Optional, TypedDict
@dataclass
@ -20,6 +20,73 @@ class DownloadOptions:
max_workers: int
ClipsPeriod = Literal["last_day", "last_week", "last_month", "all_time"]
VideosSort = Literal["views", "time"]
VideosType = Literal["archive", "highlight", "upload"]
class AccessToken(TypedDict):
signature: str
value: str
class User(TypedDict):
login: str
displayName: str
class Game(TypedDict):
id: str
name: str
class VideoQuality(TypedDict):
frameRate: str
quality: str
sourceURL: str
class ClipAccessToken(TypedDict):
id: str
playbackAccessToken: AccessToken
videoQualities: List[VideoQuality]
class Clip(TypedDict):
id: str
slug: str
title: str
createdAt: str
viewCount: int
durationSeconds: int
url: str
videoQualities: Optional[List[VideoQuality]]
game: Game
broadcaster: User
class Video(TypedDict):
id: str
title: str
description: str
publishedAt: str
broadcastType: str
lengthSeconds: int
game: Game
creator: User
class Chapter(TypedDict):
id: str
durationMilliseconds: int
positionMilliseconds: int
type: str
description: str
subDescription: str
thumbnailURL: str
game: Game
# Type for annotating decoded JSON
# TODO: make data classes for common structs
Data = Mapping[str, Any]

View File

@ -3,10 +3,12 @@ import logging
import os
import time
from abc import ABC, abstractmethod
from typing import List, Optional
from pathlib import Path
from typing import Iterable, Optional, Tuple
import httpx
from twitchdl.exceptions import ConsoleError
from twitchdl.progress import Progress
logger = logging.getLogger(__name__)
@ -71,7 +73,7 @@ async def download(
client: httpx.AsyncClient,
task_id: int,
source: str,
target: str,
target: Path,
progress: Progress,
token_bucket: TokenBucket,
):
@ -96,12 +98,12 @@ async def download_with_retries(
semaphore: asyncio.Semaphore,
task_id: int,
source: str,
target: str,
target: Path,
progress: Progress,
token_bucket: TokenBucket,
):
async with semaphore:
if os.path.exists(target):
if target.exists():
size = os.path.getsize(target)
progress.already_downloaded(task_id, size)
return
@ -119,13 +121,13 @@ async def download_with_retries(
async def download_all(
sources: List[str],
targets: List[str],
source_targets: Iterable[Tuple[str, Path]],
workers: int,
*,
count: Optional[int] = None,
rate_limit: Optional[int] = None,
):
progress = Progress(len(sources))
progress = Progress(count)
token_bucket = LimitingTokenBucket(rate_limit) if rate_limit else EndlessTokenBucket()
async with httpx.AsyncClient(timeout=TIMEOUT) as client:
semaphore = asyncio.Semaphore(workers)
@ -139,6 +141,36 @@ async def download_all(
progress,
token_bucket,
)
for task_id, (source, target) in enumerate(zip(sources, targets))
for task_id, (source, target) in enumerate(source_targets)
]
await asyncio.gather(*tasks)
def download_file(url: str, target: Path, retries: int = RETRY_COUNT) -> None:
"""Download URL to given target path with retries"""
error_message = ""
for r in range(retries):
try:
retry_info = f" (retry {r})" if r > 0 else ""
logger.info(f"Downloading {url} to {target}{retry_info}")
return _do_download_file(url, target)
except httpx.HTTPStatusError as ex:
logger.error(ex)
error_message = f"Server responded with HTTP {ex.response.status_code}"
except httpx.RequestError as ex:
logger.error(ex)
error_message = str(ex)
raise ConsoleError(f"Failed downloading after {retries} attempts: {error_message}")
def _do_download_file(url: str, target: Path) -> None:
tmp_path = Path(str(target) + ".tmp")
with httpx.stream("GET", url, timeout=TIMEOUT, follow_redirects=True) as response:
response.raise_for_status()
with open(tmp_path, "wb") as f:
for chunk in response.iter_bytes(chunk_size=CHUNK_SIZE):
f.write(chunk)
os.rename(tmp_path, target)

72
twitchdl/naming.py Normal file
View File

@ -0,0 +1,72 @@
import os
from typing import Dict
from twitchdl import utils
from twitchdl.entities import Clip, Video
from twitchdl.exceptions import ConsoleError
DEFAULT_OUTPUT_TEMPLATE = "{date}_{id}_{channel_login}_{title_slug}.{format}"
def video_filename(video: Video, format: str, output: str) -> str:
subs = video_placeholders(video, format)
return _format(output, subs)
def video_placeholders(video: Video, format: str) -> Dict[str, str]:
date, time = video["publishedAt"].split("T")
game = video["game"]["name"] if video["game"] else "Unknown"
return {
"channel": video["creator"]["displayName"],
"channel_login": video["creator"]["login"],
"date": date,
"datetime": video["publishedAt"],
"format": format,
"game": game,
"game_slug": utils.slugify(game),
"id": video["id"],
"time": time,
"title": utils.titlify(video["title"]),
"title_slug": utils.slugify(video["title"]),
}
def clip_filename(clip: Clip, output: str):
subs = clip_placeholders(clip)
return _format(output, subs)
def clip_placeholders(clip: Clip) -> Dict[str, str]:
date, time = clip["createdAt"].split("T")
game = clip["game"]["name"] if clip["game"] else "Unknown"
if clip["videoQualities"]:
url = clip["videoQualities"][0]["sourceURL"]
_, ext = os.path.splitext(url)
ext = ext.lstrip(".")
else:
ext = "mp4"
return {
"channel": clip["broadcaster"]["displayName"],
"channel_login": clip["broadcaster"]["login"],
"date": date,
"datetime": clip["createdAt"],
"format": ext,
"game": game,
"game_slug": utils.slugify(game),
"id": clip["id"],
"slug": clip["slug"],
"time": time,
"title": utils.titlify(clip["title"]),
"title_slug": utils.slugify(clip["title"]),
}
def _format(output: str, subs: Dict[str, str]) -> str:
try:
return output.format(**subs)
except KeyError as e:
supported = ", ".join(subs.keys())
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")

View File

@ -1,15 +1,21 @@
import json
import sys
from itertools import islice
from typing import Any, Callable, Generator, List, Optional, TypeVar
import click
from twitchdl import utils
from twitchdl.twitch import Clip, Video
from twitchdl.entities import Clip, Video
T = TypeVar("T")
def clear_line():
sys.stdout.write("\033[1K")
sys.stdout.write("\r")
def truncate(string: str, length: int) -> str:
if len(string) > length:
return string[: length - 1] + ""

View File

@ -3,6 +3,7 @@ Parse and manipulate m3u8 playlists.
"""
from dataclasses import dataclass
from pathlib import Path
from typing import Generator, List, Optional, OrderedDict
import click
@ -81,7 +82,7 @@ def enumerate_vods(
def make_join_playlist(
playlist: m3u8.M3U8,
vods: List[Vod],
targets: List[str],
targets: List[Path],
) -> m3u8.Playlist:
"""
Make a modified playlist which references downloaded VODs
@ -93,7 +94,7 @@ def make_join_playlist(
playlist.segments.clear()
for segment in org_segments:
if segment.uri in path_map:
segment.uri = path_map[segment.uri]
segment.uri = str(path_map[segment.uri])
playlist.segments.append(segment)
return playlist

View File

@ -7,7 +7,7 @@ from typing import Deque, Dict, NamedTuple, Optional
import click
from twitchdl.output import blue
from twitchdl.output import blue, clear_line
from twitchdl.utils import format_size, format_time
logger = logging.getLogger(__name__)
@ -32,7 +32,7 @@ class Sample(NamedTuple):
class Progress:
def __init__(self, vod_count: int):
def __init__(self, file_count: Optional[int] = None):
self.downloaded: int = 0
self.estimated_total: Optional[int] = None
self.last_printed: Optional[float] = None
@ -42,8 +42,8 @@ class Progress:
self.samples: Deque[Sample] = deque(maxlen=1000)
self.speed: Optional[float] = None
self.tasks: Dict[TaskId, Task] = {}
self.vod_count = vod_count
self.vod_downloaded_count: int = 0
self.file_count = file_count
self.downloaded_count: int = 0
def start(self, task_id: int, size: int):
if task_id in self.tasks:
@ -68,7 +68,7 @@ class Progress:
self.tasks[task_id] = Task(task_id, size)
self.progress_bytes += size
self.vod_downloaded_count += 1
self.downloaded_count += 1
self.print()
def abort(self, task_id: int):
@ -89,13 +89,15 @@ class Progress:
f"Taks {task_id} ended with {task.downloaded}b downloaded, expected {task.size}b."
)
self.vod_downloaded_count += 1
self.downloaded_count += 1
self.print()
def _recalculate(self):
self.estimated_total = (
int(mean(t.size for t in self.tasks.values()) * self.vod_count) if self.tasks else None
)
if self.tasks and self.file_count:
self.estimated_total = int(mean(t.size for t in self.tasks.values()) * self.file_count)
else:
self.estimated_total = None
self.speed = self._calculate_speed()
self.progress_perc = (
int(100 * self.progress_bytes / self.estimated_total) if self.estimated_total else 0
@ -127,7 +129,9 @@ class Progress:
self._recalculate()
click.echo(f"\rDownloaded {self.vod_downloaded_count}/{self.vod_count} VODs", nl=False)
clear_line()
total_label = f"/{self.file_count}" if self.file_count else ""
click.echo(f"Downloaded {self.downloaded_count}{total_label} VODs", nl=False)
click.secho(f" {self.progress_perc}%", fg="blue", nl=False)
if self.estimated_total is not None:
@ -141,6 +145,4 @@ class Progress:
if self.remaining_time is not None:
click.echo(f" ETA {blue(format_time(self.remaining_time))}", nl=False)
click.echo(" ", nl=False)
self.last_printed = now

View File

@ -2,83 +2,27 @@
Twitch API access.
"""
import json
import logging
import time
from typing import Any, Dict, Generator, List, Literal, Mapping, Optional, Tuple, TypedDict, Union
from typing import Any, Dict, Generator, List, Mapping, Optional, Tuple, Union
import click
import httpx
from twitchdl import CLIENT_ID
from twitchdl.entities import Data
from twitchdl.entities import (
AccessToken,
Chapter,
Clip,
ClipAccessToken,
ClipsPeriod,
Data,
Video,
VideosSort,
VideosType,
)
from twitchdl.exceptions import ConsoleError
ClipsPeriod = Literal["last_day", "last_week", "last_month", "all_time"]
VideosSort = Literal["views", "time"]
VideosType = Literal["archive", "highlight", "upload"]
class AccessToken(TypedDict):
signature: str
value: str
class User(TypedDict):
login: str
displayName: str
class Game(TypedDict):
id: str
name: str
class VideoQuality(TypedDict):
frameRate: str
quality: str
sourceURL: str
class ClipAccessToken(TypedDict):
id: str
playbackAccessToken: AccessToken
videoQualities: List[VideoQuality]
class Clip(TypedDict):
id: str
slug: str
title: str
createdAt: str
viewCount: int
durationSeconds: int
url: str
videoQualities: List[VideoQuality]
game: Game
broadcaster: User
class Video(TypedDict):
id: str
title: str
description: str
publishedAt: str
broadcastType: str
lengthSeconds: int
game: Game
creator: User
class Chapter(TypedDict):
id: str
durationMilliseconds: int
positionMilliseconds: int
type: str
description: str
subDescription: str
thumbnailURL: str
game: Game
from twitchdl.utils import format_size
class GQLError(click.ClickException):
@ -135,22 +79,23 @@ logger = logging.getLogger(__name__)
def log_request(request: httpx.Request):
logger.debug(f"--> {request.method} {request.url}")
logger.info(f"--> {request.method} {request.url}")
if request.content:
logger.debug(f"--> {request.content}")
def log_response(response: httpx.Response, duration: float):
def log_response(response: httpx.Response, duration_seconds: float):
request = response.request
duration_ms = int(1000 * duration)
logger.debug(f"<-- {request.method} {request.url} HTTP {response.status_code} {duration_ms}ms")
duration = f"{int(1000 * duration_seconds)}ms"
size = format_size(len(response.content))
logger.info(f"<-- {request.method} {request.url} HTTP {response.status_code} {duration} {size}")
if response.content:
logger.debug(f"<-- {response.content}")
def gql_post(query: str):
def gql_persisted_query(query: Data):
url = "https://gql.twitch.tv/gql"
response = authenticated_post(url, content=query)
response = authenticated_post(url, json=query)
gql_raise_on_error(response)
return response.json()
@ -238,22 +183,18 @@ def get_clip(slug: str) -> Optional[Clip]:
def get_clip_access_token(slug: str) -> ClipAccessToken:
query = f"""
{{
query = {
"operationName": "VideoAccessToken_Clip",
"variables": {{
"slug": "{slug}"
}},
"extensions": {{
"persistedQuery": {{
"variables": {"slug": slug},
"extensions": {
"persistedQuery": {
"version": 1,
"sha256Hash": "36b89d2507fce29e5ca551df756d27c1cfe079e2609642b4390aa4c35796eb11"
}}
}}
}}
"""
"sha256Hash": "36b89d2507fce29e5ca551df756d27c1cfe079e2609642b4390aa4c35796eb11",
}
},
}
response = gql_post(query.strip())
response = gql_persisted_query(query)
return response["data"]["clip"]
@ -325,23 +266,6 @@ def channel_clips_generator(
return _generator(clips, limit)
def channel_clips_generator_old(channel_id: str, period: ClipsPeriod, limit: int):
cursor = ""
while True:
clips = get_channel_clips(channel_id, period, limit, after=cursor)
if not clips["edges"]:
break
has_next = clips["pageInfo"]["hasNextPage"]
cursor = clips["edges"][-1]["cursor"] if has_next else None
yield clips, has_next
if not cursor:
break
def get_channel_videos(
channel_id: str,
limit: int,
@ -503,7 +427,7 @@ def get_video_chapters(video_id: str) -> List[Chapter]:
},
}
response = gql_post(json.dumps(query))
response = gql_persisted_query(query)
return list(_chapter_nodes(response["data"]["video"]["moments"]))

View File

@ -85,6 +85,7 @@ def titlify(value: str) -> str:
VIDEO_PATTERNS = [
r"^(?P<id>\d+)?$",
r"^https://(www\.|m\.)?twitch\.tv/videos/(?P<id>\d+)(\?.+)?$",
r"^https://(www\.|m\.)?twitch\.tv/\w+/video/(?P<id>\d+)(\?.+)?$",
]
CLIP_PATTERNS = [