Compare commits

..

No commits in common. "master" and "2.2.3" have entirely different histories.

22 changed files with 344 additions and 569 deletions

2
.gitignore vendored
View File

@ -15,5 +15,3 @@ tmp/
/*.pyz /*.pyz
/pyrightconfig.json /pyrightconfig.json
/book /book
*.mp4
*.mkv

View File

@ -3,32 +3,6 @@ twitch-dl changelog
<!-- Do not edit. This file is automatically generated from changelog.yaml.--> <!-- Do not edit. This file is automatically generated from changelog.yaml.-->
### [2.5.0 (2024-08-30)](https://github.com/ihabunek/twitch-dl/releases/tag/2.5.0)
* Add support for HD video qualities (#163)
### [2.4.0 (2024-08-30)](https://github.com/ihabunek/twitch-dl/releases/tag/2.4.0)
* Add `clips --target-dir` option. Use in conjunction with `--download` to
specify target directory.
* Fix a crash when downloading clips (#160)
* Handle video URLs which contain the channel name (#162)
* Don't stop downloading clips if one download fails
### [2.3.1 (2024-05-19)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.1)
* Fix fetching access token (#155, thanks @KryptonicDragon)
### [2.3.0 (2024-04-27)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.0)
* Show more playlist data when choosing quality
* Improve detection of 'source' quality for Twitch Enhanced Broadcast Streams
(#154)
### [2.2.4 (2024-04-25)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.4)
* Add m dot url support to video and clip regexes (thanks @localnerve)
### [2.2.3 (2024-04-24)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.3) ### [2.2.3 (2024-04-24)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.3)
* Respect --dry-run option when downloading videos * Respect --dry-run option when downloading videos

View File

@ -7,7 +7,7 @@ dist:
clean : clean :
find . -name "*pyc" | xargs rm -rf $1 find . -name "*pyc" | xargs rm -rf $1
rm -rf build dist book bundle MANIFEST htmlcov deb_dist twitch-dl.*.pyz twitch-dl.1.man twitch_dl.egg-info rm -rf build dist bundle MANIFEST htmlcov deb_dist twitch-dl.*.pyz twitch-dl.1.man twitch_dl.egg-info
bundle: bundle:
mkdir bundle mkdir bundle

View File

@ -1,32 +1,3 @@
2.5.0:
date: 2024-08-30
changes:
- "Add support for HD video qualities (#163)"
2.4.0:
date: 2024-08-30
changes:
- "Add `clips --target-dir` option. Use in conjunction with `--download` to specify target directory."
- "Fix a crash when downloading clips (#160)"
- "Handle video URLs which contain the channel name (#162)"
- "Don't stop downloading clips if one download fails"
2.3.1:
date: 2024-05-19
changes:
- "Fix fetching access token (#155, thanks @KryptonicDragon)"
2.3.0:
date: 2024-04-27
changes:
- "Show more playlist data when choosing quality"
- "Improve detection of 'source' quality for Twitch Enhanced Broadcast Streams (#154)"
2.2.4:
date: 2024-04-25
changes:
- "Add m dot url support to video and clip regexes (thanks @localnerve)"
2.2.3: 2.2.3:
date: 2024-04-24 date: 2024-04-24
changes: changes:

View File

@ -3,32 +3,6 @@ twitch-dl changelog
<!-- Do not edit. This file is automatically generated from changelog.yaml.--> <!-- Do not edit. This file is automatically generated from changelog.yaml.-->
### [2.5.0 (2024-08-30)](https://github.com/ihabunek/twitch-dl/releases/tag/2.5.0)
* Add support for HD video qualities (#163)
### [2.4.0 (2024-08-30)](https://github.com/ihabunek/twitch-dl/releases/tag/2.4.0)
* Add `clips --target-dir` option. Use in conjunction with `--download` to
specify target directory.
* Fix a crash when downloading clips (#160)
* Handle video URLs which contain the channel name (#162)
* Don't stop downloading clips if one download fails
### [2.3.1 (2024-05-19)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.1)
* Fix fetching access token (#155, thanks @KryptonicDragon)
### [2.3.0 (2024-04-27)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.0)
* Show more playlist data when choosing quality
* Improve detection of 'source' quality for Twitch Enhanced Broadcast Streams
(#154)
### [2.2.4 (2024-04-25)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.4)
* Add m dot url support to video and clip regexes (thanks @localnerve)
### [2.2.3 (2024-04-24)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.3) ### [2.2.3 (2024-04-24)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.3)
* Respect --dry-run option when downloading videos * Respect --dry-run option when downloading videos

View File

@ -43,11 +43,6 @@ twitch-dl clips [OPTIONS] CHANNEL_NAME
<td>Period from which to return clips Possible values: <code>last_day</code>, <code>last_week</code>, <code>last_month</code>, <code>all_time</code>. [default: <code>all_time</code>]</td> <td>Period from which to return clips Possible values: <code>last_day</code>, <code>last_week</code>, <code>last_month</code>, <code>all_time</code>. [default: <code>all_time</code>]</td>
</tr> </tr>
<tr>
<td class="code">-t, --target-dir</td>
<td>Target directory when downloading clips [default: <code>.</code>]</td>
</tr>
<tr> <tr>
<td class="code">--json</td> <td class="code">--json</td>
<td>Print data as JSON rather than human readable text</td> <td>Print data as JSON rather than human readable text</td>

View File

@ -22,7 +22,7 @@ classifiers = [
dependencies = [ dependencies = [
"click>=8.0.0,<9.0.0", "click>=8.0.0,<9.0.0",
"httpx>=0.17.0,<1.0.0", "httpx>=0.17.0,<1.0.0",
"m3u8>=3.0.0,<7.0.0", "m3u8>=1.0.0,<4.0.0",
] ]
[tool.setuptools] [tool.setuptools]

View File

@ -1,39 +1,35 @@
import pytest import pytest
from twitchdl.utils import parse_clip_identifier, parse_video_identifier from twitchdl.utils import parse_video_identifier, parse_clip_identifier
TEST_VIDEO_PATTERNS = [ TEST_VIDEO_PATTERNS = [
("702689313", "702689313"), ("702689313", "702689313"),
("702689313", "https://twitch.tv/videos/702689313"), ("702689313", "https://twitch.tv/videos/702689313"),
("702689313", "https://www.twitch.tv/videos/702689313"), ("702689313", "https://www.twitch.tv/videos/702689313"),
("702689313", "https://m.twitch.tv/videos/702689313"),
("2223719525", "https://www.twitch.tv/r0dn3y/video/2223719525"),
] ]
TEST_CLIP_PATTERNS = { TEST_CLIP_PATTERNS = {
("AbrasivePlayfulMangoMau5", "AbrasivePlayfulMangoMau5"), ("AbrasivePlayfulMangoMau5", "AbrasivePlayfulMangoMau5"),
("AbrasivePlayfulMangoMau5", "https://clips.twitch.tv/AbrasivePlayfulMangoMau5"), ("AbrasivePlayfulMangoMau5", "https://clips.twitch.tv/AbrasivePlayfulMangoMau5"),
("AbrasivePlayfulMangoMau5", "https://www.twitch.tv/dracul1nx/clip/AbrasivePlayfulMangoMau5"), ("AbrasivePlayfulMangoMau5", "https://www.twitch.tv/dracul1nx/clip/AbrasivePlayfulMangoMau5"),
("AbrasivePlayfulMangoMau5", "https://m.twitch.tv/dracul1nx/clip/AbrasivePlayfulMangoMau5"),
("AbrasivePlayfulMangoMau5", "https://twitch.tv/dracul1nx/clip/AbrasivePlayfulMangoMau5"), ("AbrasivePlayfulMangoMau5", "https://twitch.tv/dracul1nx/clip/AbrasivePlayfulMangoMau5"),
("HungryProudRadicchioDoggo", "HungryProudRadicchioDoggo"), ("HungryProudRadicchioDoggo", "HungryProudRadicchioDoggo"),
("HungryProudRadicchioDoggo", "https://clips.twitch.tv/HungryProudRadicchioDoggo"), ("HungryProudRadicchioDoggo", "https://clips.twitch.tv/HungryProudRadicchioDoggo"),
("HungryProudRadicchioDoggo", "https://www.twitch.tv/bananasaurus_rex/clip/HungryProudRadicchioDoggo?filter=clips&range=7d&sort=time"), ("HungryProudRadicchioDoggo", "https://www.twitch.tv/bananasaurus_rex/clip/HungryProudRadicchioDoggo?filter=clips&range=7d&sort=time"),
("HungryProudRadicchioDoggo", "https://m.twitch.tv/bananasaurus_rex/clip/HungryProudRadicchioDoggo?filter=clips&range=7d&sort=time"),
("HungryProudRadicchioDoggo", "https://twitch.tv/bananasaurus_rex/clip/HungryProudRadicchioDoggo?filter=clips&range=7d&sort=time"), ("HungryProudRadicchioDoggo", "https://twitch.tv/bananasaurus_rex/clip/HungryProudRadicchioDoggo?filter=clips&range=7d&sort=time"),
("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ"), ("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ"),
("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ"), ("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ"),
("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ?filter=clips&range=7d&sort=time"), ("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ?filter=clips&range=7d&sort=time"),
("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://www.twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ?filter=clips&range=7d&sort=time"), ("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://www.twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ?filter=clips&range=7d&sort=time"),
("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://m.twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ?filter=clips&range=7d&sort=time"),
} }
@pytest.mark.parametrize("expected,input", TEST_VIDEO_PATTERNS) @pytest.mark.parametrize("expected,input", TEST_VIDEO_PATTERNS)
def test_video_patterns(expected: str, input: str): def test_video_patterns(expected, input):
assert parse_video_identifier(input) == expected assert parse_video_identifier(input) == expected
@pytest.mark.parametrize("expected,input", TEST_CLIP_PATTERNS) @pytest.mark.parametrize("expected,input", TEST_CLIP_PATTERNS)
def test_clip_patterns(expected: str, input: str): def test_clip_patterns(expected, input):
assert parse_clip_identifier(input) == expected assert parse_clip_identifier(input) == expected

View File

@ -8,8 +8,8 @@ def test_initial_values():
assert progress.progress_perc == 0 assert progress.progress_perc == 0
assert progress.remaining_time is None assert progress.remaining_time is None
assert progress.speed is None assert progress.speed is None
assert progress.file_count == 10 assert progress.vod_count == 10
assert progress.downloaded_count == 0 assert progress.vod_downloaded_count == 0
def test_downloaded(): def test_downloaded():
@ -23,31 +23,26 @@ def test_downloaded():
assert progress.progress_perc == 0 assert progress.progress_perc == 0
progress.advance(1, 100) progress.advance(1, 100)
progress._recalculate()
assert progress.downloaded == 100 assert progress.downloaded == 100
assert progress.progress_bytes == 100 assert progress.progress_bytes == 100
assert progress.progress_perc == 11 assert progress.progress_perc == 11
progress.advance(2, 200) progress.advance(2, 200)
progress._recalculate()
assert progress.downloaded == 300 assert progress.downloaded == 300
assert progress.progress_bytes == 300 assert progress.progress_bytes == 300
assert progress.progress_perc == 33 assert progress.progress_perc == 33
progress.advance(3, 150) progress.advance(3, 150)
progress._recalculate()
assert progress.downloaded == 450 assert progress.downloaded == 450
assert progress.progress_bytes == 450 assert progress.progress_bytes == 450
assert progress.progress_perc == 50 assert progress.progress_perc == 50
progress.advance(1, 50) progress.advance(1, 50)
progress._recalculate()
assert progress.downloaded == 500 assert progress.downloaded == 500
assert progress.progress_bytes == 500 assert progress.progress_bytes == 500
assert progress.progress_perc == 55 assert progress.progress_perc == 55
progress.abort(2) progress.abort(2)
progress._recalculate()
assert progress.downloaded == 500 assert progress.downloaded == 500
assert progress.progress_bytes == 300 assert progress.progress_bytes == 300
assert progress.progress_perc == 33 assert progress.progress_perc == 33
@ -57,7 +52,6 @@ def test_downloaded():
progress.advance(1, 150) progress.advance(1, 150)
progress.advance(2, 300) progress.advance(2, 300)
progress.advance(3, 150) progress.advance(3, 150)
progress._recalculate()
assert progress.downloaded == 1100 assert progress.downloaded == 1100
assert progress.progress_bytes == 900 assert progress.progress_bytes == 900
@ -77,15 +71,12 @@ def test_estimated_total():
assert progress.estimated_total is None assert progress.estimated_total is None
progress.start(1, 12000) progress.start(1, 12000)
progress._recalculate()
assert progress.estimated_total == 12000 * 3 assert progress.estimated_total == 12000 * 3
progress.start(2, 11000) progress.start(2, 11000)
progress._recalculate()
assert progress.estimated_total == 11500 * 3 assert progress.estimated_total == 11500 * 3
progress.start(3, 10000) progress.start(3, 10000)
progress._recalculate()
assert progress.estimated_total == 11000 * 3 assert progress.estimated_total == 11000 * 3
@ -96,16 +87,16 @@ def test_vod_downloaded_count():
progress.start(2, 100) progress.start(2, 100)
progress.start(3, 100) progress.start(3, 100)
assert progress.downloaded_count == 0 assert progress.vod_downloaded_count == 0
progress.advance(1, 100) progress.advance(1, 100)
progress.end(1) progress.end(1)
assert progress.downloaded_count == 1 assert progress.vod_downloaded_count == 1
progress.advance(2, 100) progress.advance(2, 100)
progress.end(2) progress.end(2)
assert progress.downloaded_count == 2 assert progress.vod_downloaded_count == 2
progress.advance(3, 100) progress.advance(3, 100)
progress.end(3) progress.end(3)
assert progress.downloaded_count == 3 assert progress.vod_downloaded_count == 3

View File

@ -2,14 +2,12 @@ import logging
import platform import platform
import re import re
import sys import sys
from pathlib import Path
from typing import Optional, Tuple from typing import Optional, Tuple
import click import click
from twitchdl import __version__ from twitchdl import __version__
from twitchdl.entities import DownloadOptions from twitchdl.entities import DownloadOptions
from twitchdl.naming import DEFAULT_OUTPUT_TEMPLATE
from twitchdl.twitch import ClipsPeriod, VideosSort, VideosType from twitchdl.twitch import ClipsPeriod, VideosSort, VideosType
# Tweak the Click context # Tweak the Click context
@ -81,12 +79,11 @@ def validate_rate(_ctx: click.Context, _param: click.Parameter, value: str) -> O
@click.group(context_settings=CONTEXT) @click.group(context_settings=CONTEXT)
@click.option("--debug/--no-debug", default=False, help="Enable debug logging to stderr") @click.option("--debug/--no-debug", default=False, help="Log debug info to stderr")
@click.option("--verbose/--no-verbose", default=False, help="More verbose debug logging")
@click.option("--color/--no-color", default=sys.stdout.isatty(), help="Use ANSI color in output") @click.option("--color/--no-color", default=sys.stdout.isatty(), help="Use ANSI color in output")
@click.version_option(package_name="twitch-dl") @click.version_option(package_name="twitch-dl")
@click.pass_context @click.pass_context
def cli(ctx: click.Context, color: bool, debug: bool, verbose: bool): def cli(ctx: click.Context, color: bool, debug: bool):
"""twitch-dl - twitch.tv downloader """twitch-dl - twitch.tv downloader
https://twitch-dl.bezdomni.net/ https://twitch-dl.bezdomni.net/
@ -94,7 +91,7 @@ def cli(ctx: click.Context, color: bool, debug: bool, verbose: bool):
ctx.color = color ctx.color = color
if debug: if debug:
logging.basicConfig(level=logging.DEBUG if verbose else logging.INFO) logging.basicConfig(level=logging.DEBUG)
logging.getLogger("httpx").setLevel(logging.WARN) logging.getLogger("httpx").setLevel(logging.WARN)
logging.getLogger("httpcore").setLevel(logging.WARN) logging.getLogger("httpcore").setLevel(logging.WARN)
@ -142,18 +139,6 @@ def cli(ctx: click.Context, color: bool, debug: bool, verbose: bool):
default="all_time", default="all_time",
type=click.Choice(["last_day", "last_week", "last_month", "all_time"]), type=click.Choice(["last_day", "last_week", "last_month", "all_time"]),
) )
@click.option(
"-t",
"--target-dir",
help="Target directory when downloading clips",
type=click.Path(
file_okay=False,
readable=False,
writable=True,
path_type=Path,
),
default=Path(),
)
@json_option @json_option
def clips( def clips(
channel_name: str, channel_name: str,
@ -164,14 +149,10 @@ def clips(
limit: Optional[int], limit: Optional[int],
pager: Optional[int], pager: Optional[int],
period: ClipsPeriod, period: ClipsPeriod,
target_dir: Path,
): ):
"""List or download clips for given CHANNEL_NAME.""" """List or download clips for given CHANNEL_NAME."""
from twitchdl.commands.clips import clips from twitchdl.commands.clips import clips
if not target_dir.exists():
target_dir.mkdir(parents=True, exist_ok=True)
clips( clips(
channel_name, channel_name,
all=all, all=all,
@ -181,7 +162,6 @@ def clips(
limit=limit, limit=limit,
pager=pager, pager=pager,
period=period, period=period,
target_dir=target_dir,
) )
@ -249,7 +229,7 @@ def clips(
"-o", "-o",
"--output", "--output",
help="Output file name template. See docs for details.", help="Output file name template. See docs for details.",
default=DEFAULT_OUTPUT_TEMPLATE, default="{date}_{id}_{channel_login}_{title_slug}.{format}",
) )
@click.option( @click.option(
"-q", "-q",

View File

@ -1,15 +1,13 @@
import re import re
import sys import sys
from os import path from os import path
from pathlib import Path from typing import Callable, Generator, Optional
from typing import Callable, Generator, List, Optional
import click import click
from twitchdl import twitch, utils from twitchdl import twitch, utils
from twitchdl.commands.download import get_clip_authenticated_url from twitchdl.commands.download import get_clip_authenticated_url
from twitchdl.entities import VideoQuality from twitchdl.download import download_file
from twitchdl.http import download_file
from twitchdl.output import green, print_clip, print_clip_compact, print_json, print_paged, yellow from twitchdl.output import green, print_clip, print_clip_compact, print_json, print_paged, yellow
from twitchdl.twitch import Clip, ClipsPeriod from twitchdl.twitch import Clip, ClipsPeriod
@ -24,7 +22,6 @@ def clips(
limit: Optional[int] = None, limit: Optional[int] = None,
pager: Optional[int] = None, pager: Optional[int] = None,
period: ClipsPeriod = "all_time", period: ClipsPeriod = "all_time",
target_dir: Path = Path(),
): ):
# Set different defaults for limit for compact display # Set different defaults for limit for compact display
default_limit = 40 if compact else 10 default_limit = 40 if compact else 10
@ -38,7 +35,7 @@ def clips(
return print_json(list(generator)) return print_json(list(generator))
if download: if download:
return _download_clips(target_dir, generator) return _download_clips(generator)
print_fn = print_clip_compact if compact else print_clip print_fn = print_clip_compact if compact else print_clip
@ -48,8 +45,8 @@ def clips(
return _print_all(generator, print_fn, all) return _print_all(generator, print_fn, all)
def _target_filename(clip: Clip, video_qualities: List[VideoQuality]): def _target_filename(clip: Clip):
url = video_qualities[0]["sourceURL"] url = clip["videoQualities"][0]["sourceURL"]
_, ext = path.splitext(url) _, ext = path.splitext(url)
ext = ext.lstrip(".") ext = ext.lstrip(".")
@ -70,27 +67,16 @@ def _target_filename(clip: Clip, video_qualities: List[VideoQuality]):
return f"{name}.{ext}" return f"{name}.{ext}"
def _download_clips(target_dir: Path, generator: Generator[Clip, None, None]): def _download_clips(generator: Generator[Clip, None, None]):
if not target_dir.exists():
target_dir.mkdir(parents=True, exist_ok=True)
for clip in generator: for clip in generator:
# videoQualities can be null in some circumstances, see: target = _target_filename(clip)
# https://github.com/ihabunek/twitch-dl/issues/160
if not clip["videoQualities"]:
continue
target = target_dir / _target_filename(clip, clip["videoQualities"]) if path.exists(target):
if target.exists():
click.echo(f"Already downloaded: {green(target)}") click.echo(f"Already downloaded: {green(target)}")
else: else:
try: url = get_clip_authenticated_url(clip["slug"], "source")
url = get_clip_authenticated_url(clip["slug"], "source") click.echo(f"Downloading: {yellow(target)}")
click.echo(f"Downloading: {yellow(target)}") download_file(url, target)
download_file(url, target)
except Exception as ex:
click.secho(ex, err=True, fg="red")
def _print_all( def _print_all(

View File

@ -1,39 +1,35 @@
import asyncio import asyncio
import os
import platform import platform
import re import re
import shlex
import shutil import shutil
import subprocess import subprocess
import tempfile import tempfile
from os import path
from pathlib import Path from pathlib import Path
from typing import List, Optional from typing import Dict, List
from urllib.parse import urlencode, urlparse from urllib.parse import urlencode, urlparse
import click import click
import httpx import httpx
from twitchdl import twitch, utils from twitchdl import twitch, utils
from twitchdl.download import download_file
from twitchdl.entities import DownloadOptions from twitchdl.entities import DownloadOptions
from twitchdl.exceptions import ConsoleError from twitchdl.exceptions import ConsoleError
from twitchdl.http import download_all, download_file from twitchdl.http import download_all
from twitchdl.naming import clip_filename, video_filename
from twitchdl.output import blue, bold, green, print_log, yellow from twitchdl.output import blue, bold, green, print_log, yellow
from twitchdl.playlists import ( from twitchdl.playlists import (
enumerate_vods, enumerate_vods,
get_init_sections,
load_m3u8, load_m3u8,
make_join_playlist, make_join_playlist,
parse_playlists, parse_playlists,
select_playlist, select_playlist,
) )
from twitchdl.twitch import Chapter, ClipAccessToken, Video from twitchdl.twitch import Chapter, Clip, ClipAccessToken, Video
def download(ids: List[str], args: DownloadOptions): def download(ids: List[str], args: DownloadOptions):
if not ids:
print_log("No IDs to downlad given")
return
for video_id in ids: for video_id in ids:
download_one(video_id, args) download_one(video_id, args)
@ -50,14 +46,14 @@ def download_one(video: str, args: DownloadOptions):
raise ConsoleError(f"Invalid input: {video}") raise ConsoleError(f"Invalid input: {video}")
def _join_vods(playlist_path: Path, target: Path, overwrite: bool, video: Video): def _join_vods(playlist_path: str, target: str, overwrite: bool, video: Video):
description = video["description"] or "" description = video["description"] or ""
description = description.strip() description = description.strip()
command: List[str] = [ command = [
"ffmpeg", "ffmpeg",
"-i", "-i",
str(playlist_path), playlist_path,
"-c", "-c",
"copy", "copy",
"-metadata", "-metadata",
@ -77,15 +73,15 @@ def _join_vods(playlist_path: Path, target: Path, overwrite: bool, video: Video)
if overwrite: if overwrite:
command.append("-y") command.append("-y")
click.secho(f"{shlex.join(command)}", dim=True) click.secho(f"{' '.join(command)}", dim=True)
result = subprocess.run(command) result = subprocess.run(command)
if result.returncode != 0: if result.returncode != 0:
raise ConsoleError("Joining files failed") raise ConsoleError("Joining files failed")
def _concat_vods(vod_paths: List[Path], target: Path): def _concat_vods(vod_paths: List[str], target: str):
tool = "type" if platform.system() == "Windows" else "cat" tool = "type" if platform.system() == "Windows" else "cat"
command = [tool] + [str(p) for p in vod_paths] command = [tool] + vod_paths
with open(target, "wb") as target_file: with open(target, "wb") as target_file:
result = subprocess.run(command, stdout=target_file) result = subprocess.run(command, stdout=target_file)
@ -93,15 +89,74 @@ def _concat_vods(vod_paths: List[Path], target: Path):
raise ConsoleError(f"Joining files failed: {result.stderr}") raise ConsoleError(f"Joining files failed: {result.stderr}")
def _crete_temp_dir(base_uri: str) -> Path: def get_video_placeholders(video: Video, format: str) -> Dict[str, str]:
date, time = video["publishedAt"].split("T")
game = video["game"]["name"] if video["game"] else "Unknown"
return {
"channel": video["creator"]["displayName"],
"channel_login": video["creator"]["login"],
"date": date,
"datetime": video["publishedAt"],
"format": format,
"game": game,
"game_slug": utils.slugify(game),
"id": video["id"],
"time": time,
"title": utils.titlify(video["title"]),
"title_slug": utils.slugify(video["title"]),
}
def _video_target_filename(video: Video, args: DownloadOptions):
subs = get_video_placeholders(video, args.format)
try:
return args.output.format(**subs)
except KeyError as e:
supported = ", ".join(subs.keys())
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")
def _clip_target_filename(clip: Clip, args: DownloadOptions):
date, time = clip["createdAt"].split("T")
game = clip["game"]["name"] if clip["game"] else "Unknown"
url = clip["videoQualities"][0]["sourceURL"]
_, ext = path.splitext(url)
ext = ext.lstrip(".")
subs = {
"channel": clip["broadcaster"]["displayName"],
"channel_login": clip["broadcaster"]["login"],
"date": date,
"datetime": clip["createdAt"],
"format": ext,
"game": game,
"game_slug": utils.slugify(game),
"id": clip["id"],
"slug": clip["slug"],
"time": time,
"title": utils.titlify(clip["title"]),
"title_slug": utils.slugify(clip["title"]),
}
try:
return args.output.format(**subs)
except KeyError as e:
supported = ", ".join(subs.keys())
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")
def _crete_temp_dir(base_uri: str) -> str:
"""Create a temp dir to store downloads if it doesn't exist.""" """Create a temp dir to store downloads if it doesn't exist."""
path = urlparse(base_uri).path.lstrip("/") path = urlparse(base_uri).path.lstrip("/")
temp_dir = Path(tempfile.gettempdir(), "twitch-dl", path) temp_dir = Path(tempfile.gettempdir(), "twitch-dl", path)
temp_dir.mkdir(parents=True, exist_ok=True) temp_dir.mkdir(parents=True, exist_ok=True)
return temp_dir return str(temp_dir)
def _get_clip_url(access_token: ClipAccessToken, quality: Optional[str]) -> str: def _get_clip_url(access_token: ClipAccessToken, quality: str) -> str:
qualities = access_token["videoQualities"] qualities = access_token["videoQualities"]
# Quality given as an argument # Quality given as an argument
@ -129,7 +184,7 @@ def _get_clip_url(access_token: ClipAccessToken, quality: Optional[str]) -> str:
return selected_quality["sourceURL"] return selected_quality["sourceURL"]
def get_clip_authenticated_url(slug: str, quality: Optional[str]): def get_clip_authenticated_url(slug: str, quality: str):
print_log("Fetching access token...") print_log("Fetching access token...")
access_token = twitch.get_clip_access_token(slug) access_token = twitch.get_clip_access_token(slug)
@ -161,10 +216,10 @@ def _download_clip(slug: str, args: DownloadOptions) -> None:
duration = utils.format_duration(clip["durationSeconds"]) duration = utils.format_duration(clip["durationSeconds"])
click.echo(f"Found: {green(title)} by {yellow(user)}, playing {blue(game)} ({duration})") click.echo(f"Found: {green(title)} by {yellow(user)}, playing {blue(game)} ({duration})")
target = Path(clip_filename(clip, args.output)) target = _clip_target_filename(clip, args)
click.echo(f"Target: {blue(target)}") click.echo(f"Target: {blue(target)}")
if not args.overwrite and target.exists(): if not args.overwrite and path.exists(target):
response = click.prompt("File exists. Overwrite? [Y/n]", default="Y", show_default=False) response = click.prompt("File exists. Overwrite? [Y/n]", default="Y", show_default=False)
if response.lower().strip() != "y": if response.lower().strip() != "y":
raise click.Abort() raise click.Abort()
@ -193,10 +248,10 @@ def _download_video(video_id: str, args: DownloadOptions) -> None:
click.echo(f"Found: {blue(video['title'])} by {yellow(video['creator']['displayName'])}") click.echo(f"Found: {blue(video['title'])} by {yellow(video['creator']['displayName'])}")
target = Path(video_filename(video, args.format, args.output)) target = _video_target_filename(video, args)
click.echo(f"Output: {blue(target)}") click.echo(f"Output: {blue(target)}")
if not args.overwrite and target.exists(): if not args.overwrite and path.exists(target):
response = click.prompt("File exists. Overwrite? [Y/n]", default="Y", show_default=False) response = click.prompt("File exists. Overwrite? [Y/n]", default="Y", show_default=False)
if response.lower().strip() != "y": if response.lower().strip() != "y":
raise click.Abort() raise click.Abort()
@ -226,32 +281,19 @@ def _download_video(video_id: str, args: DownloadOptions) -> None:
target_dir = _crete_temp_dir(base_uri) target_dir = _crete_temp_dir(base_uri)
# Save playlists for debugging purposes # Save playlists for debugging purposes
with open(target_dir / "playlists.m3u8", "w") as f: with open(path.join(target_dir, "playlists.m3u8"), "w") as f:
f.write(playlists_text) f.write(playlists_text)
with open(target_dir / "playlist.m3u8", "w") as f: with open(path.join(target_dir, "playlist.m3u8"), "w") as f:
f.write(vods_text) f.write(vods_text)
init_sections = get_init_sections(vods_m3u8) click.echo(f"\nDownloading {len(vods)} VODs using {args.max_workers} workers to {target_dir}")
for uri in init_sections:
print_log(f"Downloading init section {uri}...")
download_file(f"{base_uri}{uri}", target_dir / uri)
print_log(f"Downloading {len(vods)} VODs using {args.max_workers} workers to {target_dir}")
sources = [base_uri + vod.path for vod in vods] sources = [base_uri + vod.path for vod in vods]
targets = [target_dir / f"{vod.index:05d}.ts" for vod in vods] targets = [os.path.join(target_dir, f"{vod.index:05d}.ts") for vod in vods]
asyncio.run(download_all(sources, targets, args.max_workers, rate_limit=args.rate_limit))
asyncio.run(
download_all(
zip(sources, targets),
args.max_workers,
rate_limit=args.rate_limit,
count=len(vods),
)
)
join_playlist = make_join_playlist(vods_m3u8, vods, targets) join_playlist = make_join_playlist(vods_m3u8, vods, targets)
join_playlist_path = target_dir / "playlist_downloaded.m3u8" join_playlist_path = path.join(target_dir, "playlist_downloaded.m3u8")
join_playlist.dump(join_playlist_path) # type: ignore join_playlist.dump(join_playlist_path) # type: ignore
click.echo() click.echo()
@ -270,12 +312,12 @@ def _download_video(video_id: str, args: DownloadOptions) -> None:
click.echo() click.echo()
if args.keep: if args.keep:
click.echo(f"Temporary files not deleted: {yellow(target_dir)}") click.echo(f"Temporary files not deleted: {target_dir}")
else: else:
print_log("Deleting temporary files...") print_log("Deleting temporary files...")
shutil.rmtree(target_dir) shutil.rmtree(target_dir)
click.echo(f"Downloaded: {green(target)}") click.echo(f"\nDownloaded: {green(target)}")
def http_get(url: str) -> str: def http_get(url: str) -> str:

View File

@ -4,9 +4,9 @@ import click
import m3u8 import m3u8
from twitchdl import twitch, utils from twitchdl import twitch, utils
from twitchdl.commands.download import get_video_placeholders
from twitchdl.exceptions import ConsoleError from twitchdl.exceptions import ConsoleError
from twitchdl.naming import video_placeholders from twitchdl.output import bold, print_clip, print_json, print_log, print_table, print_video
from twitchdl.output import bold, dim, print_clip, print_json, print_log, print_table, print_video
from twitchdl.playlists import parse_playlists from twitchdl.playlists import parse_playlists
from twitchdl.twitch import Chapter, Clip, Video from twitchdl.twitch import Chapter, Clip, Video
@ -55,19 +55,9 @@ def video_info(video: Video, playlists: str, chapters: List[Chapter]):
click.echo() click.echo()
print_video(video) print_video(video)
click.echo("Playlists:\n") click.echo("Playlists:")
for p in parse_playlists(playlists):
playlist_headers = ["Name", "Group", "Resolution", "URL"] click.echo(f"{bold(p.name)} {p.url}")
playlist_data = [
[
f"{p.name} {dim('source')}" if p.is_source else p.name,
p.group_id,
f"{p.resolution}",
p.url,
]
for p in parse_playlists(playlists)
]
print_table(playlist_headers, playlist_data)
if chapters: if chapters:
click.echo() click.echo()
@ -77,7 +67,7 @@ def video_info(video: Video, playlists: str, chapters: List[Chapter]):
duration = utils.format_time(chapter["durationMilliseconds"] // 1000) duration = utils.format_time(chapter["durationMilliseconds"] // 1000)
click.echo(f'{start} {bold(chapter["description"])} ({duration})') click.echo(f'{start} {bold(chapter["description"])} ({duration})')
placeholders = video_placeholders(video, format="mkv") placeholders = get_video_placeholders(video, format="mkv")
placeholders = [[f"{{{k}}}", v] for k, v in placeholders.items()] placeholders = [[f"{{{k}}}", v] for k, v in placeholders.items()]
click.echo("") click.echo("")
print_table(["Placeholder", "Value"], placeholders) print_table(["Placeholder", "Value"], placeholders)
@ -108,8 +98,5 @@ def clip_info(clip: Clip):
click.echo() click.echo()
click.echo("Download links:") click.echo("Download links:")
if clip["videoQualities"]: for q in clip["videoQualities"]:
for q in clip["videoQualities"]: click.echo(f"{bold(q['quality'])} [{q['frameRate']} fps] {q['sourceURL']}")
click.echo(f"{bold(q['quality'])} [{q['frameRate']} fps] {q['sourceURL']}")
else:
click.echo("No download URLs found")

37
twitchdl/download.py Normal file
View File

@ -0,0 +1,37 @@
import os
import httpx
from twitchdl.exceptions import ConsoleError
CHUNK_SIZE = 1024
CONNECT_TIMEOUT = 5
RETRY_COUNT = 5
def _download(url: str, path: str):
tmp_path = path + ".tmp"
size = 0
with httpx.stream("GET", url, timeout=CONNECT_TIMEOUT) as response:
with open(tmp_path, "wb") as target:
for chunk in response.iter_bytes(chunk_size=CHUNK_SIZE):
target.write(chunk)
size += len(chunk)
os.rename(tmp_path, path)
return size
def download_file(url: str, path: str, retries: int = RETRY_COUNT):
if os.path.exists(path):
from_disk = True
return (os.path.getsize(path), from_disk)
from_disk = False
for _ in range(retries):
try:
return (_download(url, path), from_disk)
except httpx.RequestError:
pass
raise ConsoleError(f"Failed downloading after {retries} attempts: {url}")

View File

@ -1,5 +1,5 @@
from dataclasses import dataclass from dataclasses import dataclass
from typing import Any, List, Literal, Mapping, Optional, TypedDict from typing import Any, Mapping, Optional
@dataclass @dataclass
@ -20,73 +20,6 @@ class DownloadOptions:
max_workers: int max_workers: int
ClipsPeriod = Literal["last_day", "last_week", "last_month", "all_time"]
VideosSort = Literal["views", "time"]
VideosType = Literal["archive", "highlight", "upload"]
class AccessToken(TypedDict):
signature: str
value: str
class User(TypedDict):
login: str
displayName: str
class Game(TypedDict):
id: str
name: str
class VideoQuality(TypedDict):
frameRate: str
quality: str
sourceURL: str
class ClipAccessToken(TypedDict):
id: str
playbackAccessToken: AccessToken
videoQualities: List[VideoQuality]
class Clip(TypedDict):
id: str
slug: str
title: str
createdAt: str
viewCount: int
durationSeconds: int
url: str
videoQualities: Optional[List[VideoQuality]]
game: Game
broadcaster: User
class Video(TypedDict):
id: str
title: str
description: str
publishedAt: str
broadcastType: str
lengthSeconds: int
game: Game
creator: User
class Chapter(TypedDict):
id: str
durationMilliseconds: int
positionMilliseconds: int
type: str
description: str
subDescription: str
thumbnailURL: str
game: Game
# Type for annotating decoded JSON # Type for annotating decoded JSON
# TODO: make data classes for common structs # TODO: make data classes for common structs
Data = Mapping[str, Any] Data = Mapping[str, Any]

View File

@ -3,12 +3,10 @@ import logging
import os import os
import time import time
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from pathlib import Path from typing import List, Optional
from typing import Iterable, Optional, Tuple
import httpx import httpx
from twitchdl.exceptions import ConsoleError
from twitchdl.progress import Progress from twitchdl.progress import Progress
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -73,7 +71,7 @@ async def download(
client: httpx.AsyncClient, client: httpx.AsyncClient,
task_id: int, task_id: int,
source: str, source: str,
target: Path, target: str,
progress: Progress, progress: Progress,
token_bucket: TokenBucket, token_bucket: TokenBucket,
): ):
@ -98,12 +96,12 @@ async def download_with_retries(
semaphore: asyncio.Semaphore, semaphore: asyncio.Semaphore,
task_id: int, task_id: int,
source: str, source: str,
target: Path, target: str,
progress: Progress, progress: Progress,
token_bucket: TokenBucket, token_bucket: TokenBucket,
): ):
async with semaphore: async with semaphore:
if target.exists(): if os.path.exists(target):
size = os.path.getsize(target) size = os.path.getsize(target)
progress.already_downloaded(task_id, size) progress.already_downloaded(task_id, size)
return return
@ -121,13 +119,13 @@ async def download_with_retries(
async def download_all( async def download_all(
source_targets: Iterable[Tuple[str, Path]], sources: List[str],
targets: List[str],
workers: int, workers: int,
*, *,
count: Optional[int] = None,
rate_limit: Optional[int] = None, rate_limit: Optional[int] = None,
): ):
progress = Progress(count) progress = Progress(len(sources))
token_bucket = LimitingTokenBucket(rate_limit) if rate_limit else EndlessTokenBucket() token_bucket = LimitingTokenBucket(rate_limit) if rate_limit else EndlessTokenBucket()
async with httpx.AsyncClient(timeout=TIMEOUT) as client: async with httpx.AsyncClient(timeout=TIMEOUT) as client:
semaphore = asyncio.Semaphore(workers) semaphore = asyncio.Semaphore(workers)
@ -141,36 +139,6 @@ async def download_all(
progress, progress,
token_bucket, token_bucket,
) )
for task_id, (source, target) in enumerate(source_targets) for task_id, (source, target) in enumerate(zip(sources, targets))
] ]
await asyncio.gather(*tasks) await asyncio.gather(*tasks)
def download_file(url: str, target: Path, retries: int = RETRY_COUNT) -> None:
"""Download URL to given target path with retries"""
error_message = ""
for r in range(retries):
try:
retry_info = f" (retry {r})" if r > 0 else ""
logger.info(f"Downloading {url} to {target}{retry_info}")
return _do_download_file(url, target)
except httpx.HTTPStatusError as ex:
logger.error(ex)
error_message = f"Server responded with HTTP {ex.response.status_code}"
except httpx.RequestError as ex:
logger.error(ex)
error_message = str(ex)
raise ConsoleError(f"Failed downloading after {retries} attempts: {error_message}")
def _do_download_file(url: str, target: Path) -> None:
tmp_path = Path(str(target) + ".tmp")
with httpx.stream("GET", url, timeout=TIMEOUT, follow_redirects=True) as response:
response.raise_for_status()
with open(tmp_path, "wb") as f:
for chunk in response.iter_bytes(chunk_size=CHUNK_SIZE):
f.write(chunk)
os.rename(tmp_path, target)

View File

@ -1,72 +0,0 @@
import os
from typing import Dict
from twitchdl import utils
from twitchdl.entities import Clip, Video
from twitchdl.exceptions import ConsoleError
DEFAULT_OUTPUT_TEMPLATE = "{date}_{id}_{channel_login}_{title_slug}.{format}"
def video_filename(video: Video, format: str, output: str) -> str:
subs = video_placeholders(video, format)
return _format(output, subs)
def video_placeholders(video: Video, format: str) -> Dict[str, str]:
date, time = video["publishedAt"].split("T")
game = video["game"]["name"] if video["game"] else "Unknown"
return {
"channel": video["creator"]["displayName"],
"channel_login": video["creator"]["login"],
"date": date,
"datetime": video["publishedAt"],
"format": format,
"game": game,
"game_slug": utils.slugify(game),
"id": video["id"],
"time": time,
"title": utils.titlify(video["title"]),
"title_slug": utils.slugify(video["title"]),
}
def clip_filename(clip: Clip, output: str):
subs = clip_placeholders(clip)
return _format(output, subs)
def clip_placeholders(clip: Clip) -> Dict[str, str]:
date, time = clip["createdAt"].split("T")
game = clip["game"]["name"] if clip["game"] else "Unknown"
if clip["videoQualities"]:
url = clip["videoQualities"][0]["sourceURL"]
_, ext = os.path.splitext(url)
ext = ext.lstrip(".")
else:
ext = "mp4"
return {
"channel": clip["broadcaster"]["displayName"],
"channel_login": clip["broadcaster"]["login"],
"date": date,
"datetime": clip["createdAt"],
"format": ext,
"game": game,
"game_slug": utils.slugify(game),
"id": clip["id"],
"slug": clip["slug"],
"time": time,
"title": utils.titlify(clip["title"]),
"title_slug": utils.slugify(clip["title"]),
}
def _format(output: str, subs: Dict[str, str]) -> str:
try:
return output.format(**subs)
except KeyError as e:
supported = ", ".join(subs.keys())
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")

View File

@ -1,21 +1,15 @@
import json import json
import sys
from itertools import islice from itertools import islice
from typing import Any, Callable, Generator, List, Optional, TypeVar from typing import Any, Callable, Generator, List, Optional, TypeVar
import click import click
from twitchdl import utils from twitchdl import utils
from twitchdl.entities import Clip, Video from twitchdl.twitch import Clip, Video
T = TypeVar("T") T = TypeVar("T")
def clear_line():
sys.stdout.write("\033[1K")
sys.stdout.write("\r")
def truncate(string: str, length: int) -> str: def truncate(string: str, length: int) -> str:
if len(string) > length: if len(string) > length:
return string[: length - 1] + "" return string[: length - 1] + ""
@ -31,23 +25,17 @@ def print_log(message: Any):
click.secho(message, err=True, dim=True) click.secho(message, err=True, dim=True)
def visual_len(text: str):
return len(click.unstyle(text))
def ljust(text: str, width: int):
diff = width - visual_len(text)
return text + (" " * diff) if diff > 0 else text
def print_table(headers: List[str], data: List[List[str]]): def print_table(headers: List[str], data: List[List[str]]):
widths = [[visual_len(cell) for cell in row] for row in data + [headers]] widths = [[len(cell) for cell in row] for row in data + [headers]]
widths = [max(width) for width in zip(*widths)] widths = [max(width) for width in zip(*widths)]
underlines = ["-" * width for width in widths] underlines = ["-" * width for width in widths]
def print_row(row: List[str]): def print_row(row: List[str]):
parts = (ljust(cell, widths[idx]) for idx, cell in enumerate(row)) for idx, cell in enumerate(row):
click.echo(" ".join(parts).strip()) width = widths[idx]
click.echo(cell.ljust(width), nl=False)
click.echo(" ", nl=False)
click.echo()
print_row(headers) print_row(headers)
print_row(underlines) print_row(underlines)
@ -105,12 +93,11 @@ def print_video(video: Video):
if channel or playing: if channel or playing:
click.echo(" ".join([channel, playing])) click.echo(" ".join([channel, playing]))
if video["description"]:
click.echo(f"Description: {video['description']}")
click.echo(f"Published {blue(published_at)} Length: {blue(length)} ") click.echo(f"Published {blue(published_at)} Length: {blue(length)} ")
click.secho(url, italic=True) click.secho(url, italic=True)
if video["description"]:
click.echo(f"\nDescription:\n{video['description']}")
click.echo() click.echo()

View File

@ -3,23 +3,20 @@ Parse and manipulate m3u8 playlists.
""" """
from dataclasses import dataclass from dataclasses import dataclass
from pathlib import Path from typing import Generator, List, Optional, OrderedDict
from typing import Generator, List, Optional, OrderedDict, Set
import click import click
import m3u8 import m3u8
from twitchdl import utils from twitchdl import utils
from twitchdl.output import bold, dim, print_table from twitchdl.output import bold, dim
@dataclass @dataclass
class Playlist: class Playlist:
name: str name: str
group_id: str
resolution: Optional[str] resolution: Optional[str]
url: str url: str
is_source: bool
@dataclass @dataclass
@ -37,17 +34,17 @@ def parse_playlists(playlists_m3u8: str) -> List[Playlist]:
document = load_m3u8(source) document = load_m3u8(source)
for p in document.playlists: for p in document.playlists:
resolution = ( if p.stream_info.resolution:
"x".join(str(r) for r in p.stream_info.resolution) name = p.media[0].name
if p.stream_info.resolution resolution = "x".join(str(r) for r in p.stream_info.resolution)
else None else:
) name = p.media[0].group_id
resolution = None
media = p.media[0] yield Playlist(name, resolution, p.uri)
is_source = media.group_id == "chunked"
yield Playlist(media.name, media.group_id, resolution, p.uri, is_source)
return list(_parse(playlists_m3u8)) # Move audio to bottom, it has no resolution
return sorted(_parse(playlists_m3u8), key=lambda p: p.resolution is None)
def load_m3u8(playlist_m3u8: str) -> m3u8.M3U8: def load_m3u8(playlist_m3u8: str) -> m3u8.M3U8:
@ -82,7 +79,7 @@ def enumerate_vods(
def make_join_playlist( def make_join_playlist(
playlist: m3u8.M3U8, playlist: m3u8.M3U8,
vods: List[Vod], vods: List[Vod],
targets: List[Path], targets: List[str],
) -> m3u8.Playlist: ) -> m3u8.Playlist:
""" """
Make a modified playlist which references downloaded VODs Make a modified playlist which references downloaded VODs
@ -94,7 +91,7 @@ def make_join_playlist(
playlist.segments.clear() playlist.segments.clear()
for segment in org_segments: for segment in org_segments:
if segment.uri in path_map: if segment.uri in path_map:
segment.uri = str(path_map[segment.uri].name) segment.uri = path_map[segment.uri]
playlist.segments.append(segment) playlist.segments.append(segment)
return playlist return playlist
@ -110,13 +107,10 @@ def select_playlist(playlists: List[Playlist], quality: Optional[str]) -> Playli
def select_playlist_by_name(playlists: List[Playlist], quality: str) -> Playlist: def select_playlist_by_name(playlists: List[Playlist], quality: str) -> Playlist:
if quality == "source": if quality == "source":
for playlist in playlists: return playlists[0]
if playlist.is_source:
return playlist
raise click.ClickException("Source quality not found, please report an issue on github.")
for playlist in playlists: for playlist in playlists:
if playlist.name == quality or playlist.group_id == quality: if playlist.name == quality:
return playlist return playlist
available = ", ".join([p.name for p in playlists]) available = ", ".join([p.name for p in playlists])
@ -125,56 +119,13 @@ def select_playlist_by_name(playlists: List[Playlist], quality: str) -> Playlist
def select_playlist_interactive(playlists: List[Playlist]) -> Playlist: def select_playlist_interactive(playlists: List[Playlist]) -> Playlist:
playlists = sorted(playlists, key=_playlist_key) click.echo("\nAvailable qualities:")
headers = ["#", "Name", "Group ID", "Resolution"] for n, playlist in enumerate(playlists):
if playlist.resolution:
click.echo(f"{n + 1}) {bold(playlist.name)} {dim(f'({playlist.resolution})')}")
else:
click.echo(f"{n + 1}) {bold(playlist.name)}")
rows = [ no = utils.read_int("Choose quality", min=1, max=len(playlists) + 1, default=1)
[
f"{n + 1})",
bold(playlist.name),
dim(playlist.group_id),
dim(playlist.resolution or ""),
]
for n, playlist in enumerate(playlists)
]
click.echo()
print_table(headers, rows)
default = 1
for index, playlist in enumerate(playlists):
if playlist.is_source:
default = index + 1
no = utils.read_int("\nChoose quality", min=1, max=len(playlists) + 1, default=default)
playlist = playlists[no - 1] playlist = playlists[no - 1]
return playlist return playlist
MAX = 1_000_000
def _playlist_key(playlist: Playlist) -> int:
"""Attempt to sort playlists so that source quality is on top, audio only
is on bottom and others are sorted descending by resolution."""
if playlist.is_source:
return 0
if playlist.group_id == "audio_only":
return MAX
try:
return MAX - int(playlist.name.split("p")[0])
except Exception:
pass
return MAX
def get_init_sections(playlist: m3u8.M3U8) -> Set[str]:
# TODO: we're ignoring initi_section.base_uri and bytes
return set(
segment.init_section.uri
for segment in playlist.segments
if segment.init_section is not None
)

View File

@ -1,13 +1,13 @@
import logging import logging
import time import time
from collections import deque from collections import deque
from dataclasses import dataclass from dataclasses import dataclass, field
from statistics import mean from statistics import mean
from typing import Deque, Dict, NamedTuple, Optional from typing import Deque, Dict, NamedTuple, Optional
import click import click
from twitchdl.output import blue, clear_line from twitchdl.output import blue
from twitchdl.utils import format_size, format_time from twitchdl.utils import format_size, format_time
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -31,25 +31,28 @@ class Sample(NamedTuple):
timestamp: float timestamp: float
@dataclass
class Progress: class Progress:
def __init__(self, file_count: Optional[int] = None): vod_count: int
self.downloaded: int = 0 downloaded: int = 0
self.estimated_total: Optional[int] = None estimated_total: Optional[int] = None
self.last_printed: Optional[float] = None last_printed: float = field(default_factory=time.time)
self.progress_bytes: int = 0 progress_bytes: int = 0
self.progress_perc: int = 0 progress_perc: int = 0
self.remaining_time: Optional[int] = None remaining_time: Optional[int] = None
self.samples: Deque[Sample] = deque(maxlen=1000) speed: Optional[float] = None
self.speed: Optional[float] = None start_time: float = field(default_factory=time.time)
self.tasks: Dict[TaskId, Task] = {} tasks: Dict[TaskId, Task] = field(default_factory=dict)
self.file_count = file_count vod_downloaded_count: int = 0
self.downloaded_count: int = 0 samples: Deque[Sample] = field(default_factory=lambda: deque(maxlen=100))
def start(self, task_id: int, size: int): def start(self, task_id: int, size: int):
if task_id in self.tasks: if task_id in self.tasks:
raise ValueError(f"Task {task_id}: cannot start, already started") raise ValueError(f"Task {task_id}: cannot start, already started")
self.tasks[task_id] = Task(task_id, size) self.tasks[task_id] = Task(task_id, size)
self._calculate_total()
self._calculate_progress()
self.print() self.print()
def advance(self, task_id: int, size: int): def advance(self, task_id: int, size: int):
@ -60,6 +63,7 @@ class Progress:
self.progress_bytes += size self.progress_bytes += size
self.tasks[task_id].advance(size) self.tasks[task_id].advance(size)
self.samples.append(Sample(self.downloaded, time.time())) self.samples.append(Sample(self.downloaded, time.time()))
self._calculate_progress()
self.print() self.print()
def already_downloaded(self, task_id: int, size: int): def already_downloaded(self, task_id: int, size: int):
@ -68,7 +72,9 @@ class Progress:
self.tasks[task_id] = Task(task_id, size) self.tasks[task_id] = Task(task_id, size)
self.progress_bytes += size self.progress_bytes += size
self.downloaded_count += 1 self.vod_downloaded_count += 1
self._calculate_total()
self._calculate_progress()
self.print() self.print()
def abort(self, task_id: int): def abort(self, task_id: int):
@ -77,6 +83,9 @@ class Progress:
del self.tasks[task_id] del self.tasks[task_id]
self.progress_bytes = sum(t.downloaded for t in self.tasks.values()) self.progress_bytes = sum(t.downloaded for t in self.tasks.values())
self._calculate_total()
self._calculate_progress()
self.print() self.print()
def end(self, task_id: int): def end(self, task_id: int):
@ -89,15 +98,15 @@ class Progress:
f"Taks {task_id} ended with {task.downloaded}b downloaded, expected {task.size}b." f"Taks {task_id} ended with {task.downloaded}b downloaded, expected {task.size}b."
) )
self.downloaded_count += 1 self.vod_downloaded_count += 1
self.print() self.print()
def _recalculate(self): def _calculate_total(self):
if self.tasks and self.file_count: self.estimated_total = (
self.estimated_total = int(mean(t.size for t in self.tasks.values()) * self.file_count) int(mean(t.size for t in self.tasks.values()) * self.vod_count) if self.tasks else None
else: )
self.estimated_total = None
def _calculate_progress(self):
self.speed = self._calculate_speed() self.speed = self._calculate_speed()
self.progress_perc = ( self.progress_perc = (
int(100 * self.progress_bytes / self.estimated_total) if self.estimated_total else 0 int(100 * self.progress_bytes / self.estimated_total) if self.estimated_total else 0
@ -124,14 +133,10 @@ class Progress:
now = time.time() now = time.time()
# Don't print more often than 10 times per second # Don't print more often than 10 times per second
if self.last_printed and now - self.last_printed < 0.1: if now - self.last_printed < 0.1:
return return
self._recalculate() click.echo(f"\rDownloaded {self.vod_downloaded_count}/{self.vod_count} VODs", nl=False)
clear_line()
total_label = f"/{self.file_count}" if self.file_count else ""
click.echo(f"Downloaded {self.downloaded_count}{total_label} VODs", nl=False)
click.secho(f" {self.progress_perc}%", fg="blue", nl=False) click.secho(f" {self.progress_perc}%", fg="blue", nl=False)
if self.estimated_total is not None: if self.estimated_total is not None:
@ -145,4 +150,6 @@ class Progress:
if self.remaining_time is not None: if self.remaining_time is not None:
click.echo(f" ETA {blue(format_time(self.remaining_time))}", nl=False) click.echo(f" ETA {blue(format_time(self.remaining_time))}", nl=False)
click.echo(" ", nl=False)
self.last_printed = now self.last_printed = now

View File

@ -2,28 +2,83 @@
Twitch API access. Twitch API access.
""" """
import json
import logging import logging
import random
import time import time
from typing import Any, Dict, Generator, List, Mapping, Optional, Tuple, Union from typing import Any, Dict, Generator, List, Literal, Mapping, Optional, Tuple, TypedDict, Union
import click import click
import httpx import httpx
from twitchdl import CLIENT_ID from twitchdl import CLIENT_ID
from twitchdl.entities import ( from twitchdl.entities import Data
AccessToken,
Chapter,
Clip,
ClipAccessToken,
ClipsPeriod,
Data,
Video,
VideosSort,
VideosType,
)
from twitchdl.exceptions import ConsoleError from twitchdl.exceptions import ConsoleError
from twitchdl.utils import format_size
ClipsPeriod = Literal["last_day", "last_week", "last_month", "all_time"]
VideosSort = Literal["views", "time"]
VideosType = Literal["archive", "highlight", "upload"]
class AccessToken(TypedDict):
signature: str
value: str
class User(TypedDict):
login: str
displayName: str
class Game(TypedDict):
id: str
name: str
class VideoQuality(TypedDict):
frameRate: str
quality: str
sourceURL: str
class ClipAccessToken(TypedDict):
id: str
playbackAccessToken: AccessToken
videoQualities: List[VideoQuality]
class Clip(TypedDict):
id: str
slug: str
title: str
createdAt: str
viewCount: int
durationSeconds: int
url: str
videoQualities: List[VideoQuality]
game: Game
broadcaster: User
class Video(TypedDict):
id: str
title: str
description: str
publishedAt: str
broadcastType: str
lengthSeconds: int
game: Game
creator: User
class Chapter(TypedDict):
id: str
durationMilliseconds: int
positionMilliseconds: int
type: str
description: str
subDescription: str
thumbnailURL: str
game: Game
class GQLError(click.ClickException): class GQLError(click.ClickException):
@ -80,23 +135,22 @@ logger = logging.getLogger(__name__)
def log_request(request: httpx.Request): def log_request(request: httpx.Request):
logger.info(f"--> {request.method} {request.url}") logger.debug(f"--> {request.method} {request.url}")
if request.content: if request.content:
logger.debug(f"--> {request.content}") logger.debug(f"--> {request.content}")
def log_response(response: httpx.Response, duration_seconds: float): def log_response(response: httpx.Response, duration: float):
request = response.request request = response.request
duration = f"{int(1000 * duration_seconds)}ms" duration_ms = int(1000 * duration)
size = format_size(len(response.content)) logger.debug(f"<-- {request.method} {request.url} HTTP {response.status_code} {duration_ms}ms")
logger.info(f"<-- {request.method} {request.url} HTTP {response.status_code} {duration} {size}")
if response.content: if response.content:
logger.debug(f"<-- {response.content}") logger.debug(f"<-- {response.content}")
def gql_persisted_query(query: Data): def gql_post(query: str):
url = "https://gql.twitch.tv/gql" url = "https://gql.twitch.tv/gql"
response = authenticated_post(url, json=query) response = authenticated_post(url, content=query)
gql_raise_on_error(response) gql_raise_on_error(response)
return response.json() return response.json()
@ -184,18 +238,22 @@ def get_clip(slug: str) -> Optional[Clip]:
def get_clip_access_token(slug: str) -> ClipAccessToken: def get_clip_access_token(slug: str) -> ClipAccessToken:
query = { query = f"""
{{
"operationName": "VideoAccessToken_Clip", "operationName": "VideoAccessToken_Clip",
"variables": {"slug": slug}, "variables": {{
"extensions": { "slug": "{slug}"
"persistedQuery": { }},
"extensions": {{
"persistedQuery": {{
"version": 1, "version": 1,
"sha256Hash": "36b89d2507fce29e5ca551df756d27c1cfe079e2609642b4390aa4c35796eb11", "sha256Hash": "36b89d2507fce29e5ca551df756d27c1cfe079e2609642b4390aa4c35796eb11"
} }}
}, }}
} }}
"""
response = gql_persisted_query(query) response = gql_post(query.strip())
return response["data"]["clip"] return response["data"]["clip"]
@ -267,6 +325,23 @@ def channel_clips_generator(
return _generator(clips, limit) return _generator(clips, limit)
def channel_clips_generator_old(channel_id: str, period: ClipsPeriod, limit: int):
cursor = ""
while True:
clips = get_channel_clips(channel_id, period, limit, after=cursor)
if not clips["edges"]:
break
has_next = clips["pageInfo"]["hasNextPage"]
cursor = clips["edges"][-1]["cursor"] if has_next else None
yield clips, has_next
if not cursor:
break
def get_channel_videos( def get_channel_videos(
channel_id: str, channel_id: str,
limit: int, limit: int,
@ -347,7 +422,7 @@ def get_access_token(video_id: str, auth_token: Optional[str] = None) -> AccessT
query = f""" query = f"""
{{ {{
videoPlaybackAccessToken( videoPlaybackAccessToken(
id: "{video_id}", id: {video_id},
params: {{ params: {{
platform: "web", platform: "web",
playerBackend: "mediaplayer", playerBackend: "mediaplayer",
@ -392,12 +467,8 @@ def get_playlists(video_id: str, access_token: AccessToken) -> str:
"allow_audio_only": "true", "allow_audio_only": "true",
"allow_source": "true", "allow_source": "true",
"player": "twitchweb", "player": "twitchweb",
"platform": "web",
"supported_codecs": "av1,h265,h264",
"p": random.randint(1000000, 10000000),
}, },
) )
response.raise_for_status() response.raise_for_status()
return response.content.decode("utf-8") return response.content.decode("utf-8")
@ -432,7 +503,7 @@ def get_video_chapters(video_id: str) -> List[Chapter]:
}, },
} }
response = gql_persisted_query(query) response = gql_post(json.dumps(query))
return list(_chapter_nodes(response["data"]["video"]["moments"])) return list(_chapter_nodes(response["data"]["video"]["moments"]))

View File

@ -84,14 +84,13 @@ def titlify(value: str) -> str:
VIDEO_PATTERNS = [ VIDEO_PATTERNS = [
r"^(?P<id>\d+)?$", r"^(?P<id>\d+)?$",
r"^https://(www\.|m\.)?twitch\.tv/videos/(?P<id>\d+)(\?.+)?$", r"^https://(www.)?twitch.tv/videos/(?P<id>\d+)(\?.+)?$",
r"^https://(www\.|m\.)?twitch\.tv/\w+/video/(?P<id>\d+)(\?.+)?$",
] ]
CLIP_PATTERNS = [ CLIP_PATTERNS = [
r"^(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)$", r"^(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)$",
r"^https://(www\.|m\.)?twitch\.tv/\w+/clip/(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)(\?.+)?$", r"^https://(www.)?twitch.tv/\w+/clip/(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)(\?.+)?$",
r"^https://clips\.twitch\.tv/(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)(\?.+)?$", r"^https://clips.twitch.tv/(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)(\?.+)?$",
] ]