Compare commits

..

1 Commits
2.3.0 ... foo

Author SHA1 Message Date
a21e965e04 wip 2024-03-29 09:58:36 +01:00
26 changed files with 574 additions and 1166 deletions

View File

@ -1,27 +0,0 @@
name: Run tests
on: [push, pull_request]
jobs:
test:
runs-on: ubuntu-22.04
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -e ".[test]"
- name: Run tests
run: |
pytest
- name: Validate minimum required version
run: |
vermin --no-tips twitchdl

View File

@ -3,43 +3,11 @@ twitch-dl changelog
<!-- Do not edit. This file is automatically generated from changelog.yaml.-->
### [2.3.0 (2024-04-27)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.0)
### [2.2.0 (TBA)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.0)
* Show more playlist data when choosing quality
* Improve detection of 'source' quality for Twitch Enhanced Broadcast Streams
(#154)
### [2.2.4 (2024-04-25)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.4)
* Add m dot url support to video and clip regexes (thanks @localnerve)
### [2.2.3 (2024-04-24)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.3)
* Respect --dry-run option when downloading videos
* Add automated tests on github actions
### [2.2.2 (2024-04-23)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.2)
* Fix more compat issues Python < 3.10 (#152)
### [2.2.1 (2024-04-23)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.1)
* Fix compat with Python < 3.10 (#152)
* Fix division by zero in progress calculation when video duration is reported
as 0
### [2.2.0 (2024-04-10)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.0)
* **Requires Python 3.8+**
* Migrated to Click library for generating the commandline interface
* Add shell auto completion, see 'Shell completion' in docs.
* Add setting defaults via environment variables, see 'Environment variables' in
docs
* Add `download --concat` option to avoid using ffmeg for joinig vods and concat
them instead. This will produce a `.ts` file by default.
* Add `download --dry-run` option to skip actual download (thanks @metacoma)
* Add video description to metadata (#129)
* Add `clips --compact` option for listing clips in one-per-line mode
* **Requires python 3.8 or later**
* Migrated to click lib for cli parsing
* Add shell auto completion
### [2.1.4 (2024-01-06)](https://github.com/ihabunek/twitch-dl/releases/tag/2.1.4)

View File

@ -7,7 +7,7 @@ dist:
clean :
find . -name "*pyc" | xargs rm -rf $1
rm -rf build dist book bundle MANIFEST htmlcov deb_dist twitch-dl.*.pyz twitch-dl.1.man twitch_dl.egg-info
rm -rf build dist bundle MANIFEST htmlcov deb_dist twitch-dl.*.pyz twitch-dl.1.man twitch_dl.egg-info
bundle:
mkdir bundle
@ -24,7 +24,7 @@ publish :
twine upload dist/*.tar.gz dist/*.whl
coverage:
pytest --cov=twitchdl --cov-report html tests/
py.test --cov=toot --cov-report html tests/
man:
scdoc < twitch-dl.1.scd > twitch-dl.1.man

View File

@ -1,42 +1,12 @@
2.3.0:
date: 2024-04-27
changes:
- "Show more playlist data when choosing quality"
- "Improve detection of 'source' quality for Twitch Enhanced Broadcast Streams (#154)"
2.2.4:
date: 2024-04-25
changes:
- "Add m dot url support to video and clip regexes (thanks @localnerve)"
2.2.3:
date: 2024-04-24
changes:
- "Respect --dry-run option when downloading videos"
- "Add automated tests on github actions"
2.2.2:
date: 2024-04-23
changes:
- "Fix more compat issues Python < 3.10 (#152)"
2.2.1:
date: 2024-04-23
changes:
- "Fix compat with Python < 3.10 (#152)"
- "Fix division by zero in progress calculation when video duration is reported as 0"
2.2.0:
date: 2024-04-10
date: TBA
changes:
- "**Requires Python 3.8+**"
- "**Requires python 3.8 or later**"
- "Migrated to Click library for generating the commandline interface"
- "Add shell auto completion, see 'Shell completion' in docs."
- "Add setting defaults via environment variables, see 'Environment variables' in docs"
- "Add shell auto completion, see: https://twitch-dl.bezdomni.net/shell_completion.html"
- "Add setting defaults via environment variables, see: https://twitch-dl.bezdomni.net/environment_variables.html"
- "Add `download --concat` option to avoid using ffmeg for joinig vods and concat them instead. This will produce a `.ts` file by default."
- "Add `download --dry-run` option to skip actual download (thanks @metacoma)"
- "Add video description to metadata (#129)"
- "Add `clips --compact` option for listing clips in one-per-line mode"
2.1.4:
date: 2024-01-06

View File

@ -3,43 +3,11 @@ twitch-dl changelog
<!-- Do not edit. This file is automatically generated from changelog.yaml.-->
### [2.3.0 (2024-04-27)](https://github.com/ihabunek/twitch-dl/releases/tag/2.3.0)
### [2.2.0 (TBA)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.0)
* Show more playlist data when choosing quality
* Improve detection of 'source' quality for Twitch Enhanced Broadcast Streams
(#154)
### [2.2.4 (2024-04-25)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.4)
* Add m dot url support to video and clip regexes (thanks @localnerve)
### [2.2.3 (2024-04-24)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.3)
* Respect --dry-run option when downloading videos
* Add automated tests on github actions
### [2.2.2 (2024-04-23)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.2)
* Fix more compat issues Python < 3.10 (#152)
### [2.2.1 (2024-04-23)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.1)
* Fix compat with Python < 3.10 (#152)
* Fix division by zero in progress calculation when video duration is reported
as 0
### [2.2.0 (2024-04-10)](https://github.com/ihabunek/twitch-dl/releases/tag/2.2.0)
* **Requires Python 3.8+**
* Migrated to Click library for generating the commandline interface
* Add shell auto completion, see 'Shell completion' in docs.
* Add setting defaults via environment variables, see 'Environment variables' in
docs
* Add `download --concat` option to avoid using ffmeg for joinig vods and concat
them instead. This will produce a `.ts` file by default.
* Add `download --dry-run` option to skip actual download (thanks @metacoma)
* Add video description to metadata (#129)
* Add `clips --compact` option for listing clips in one-per-line mode
* **Requires python 3.8 or later**
* Migrated to click lib for cli parsing
* Add shell auto completion
### [2.1.4 (2024-01-06)](https://github.com/ihabunek/twitch-dl/releases/tag/2.1.4)

View File

@ -18,11 +18,6 @@ twitch-dl clips [OPTIONS] CHANNEL_NAME
<td>Fetch all clips, overrides --limit</td>
</tr>
<tr>
<td class="code">-c, --compact</td>
<td>Show clips in compact mode, one line per video</td>
</tr>
<tr>
<td class="code">-d, --download</td>
<td>Download clips in given period (in source quality)</td>
@ -30,7 +25,7 @@ twitch-dl clips [OPTIONS] CHANNEL_NAME
<tr>
<td class="code">-l, --limit INTEGER</td>
<td>Number of clips to fetch. Defaults to 40 in compact mode, 10 otherwise.</td>
<td>Number of clips to fetch [max: 100] [default: <code>10</code>]</td>
</tr>
<tr>

View File

@ -28,7 +28,7 @@ twitch-dl download [OPTIONS] [IDS]...
<tr>
<td class="code">--concat</td>
<td>Do not use ffmpeg to join files, concat them instead. This will produce a .ts file by default.</td>
<td>Do not use ffmpeg to join files, concat them instead</td>
</tr>
<tr>

View File

@ -43,11 +43,6 @@ dev = [
"vermin",
]
test = [
"pytest",
"vermin",
]
[project.urls]
"Homepage" = "https://twitch-dl.bezdomni.net/"
"Source" = "https://github.com/ihabunek/twitch-dl"
@ -56,9 +51,8 @@ test = [
twitch-dl = "twitchdl.cli:cli"
[tool.pyright]
include = ["twitchdl"]
typeCheckingMode = "strict"
pythonVersion = "3.8"
[tool.ruff]
line-length = 100
target-version = "py38"

View File

@ -11,10 +11,12 @@ Usage: tag_version [version]
import subprocess
import sys
import textwrap
import yaml
import twitchdl
from datetime import date
from os import path
import yaml
from pkg_resources import get_distribution
path = path.join(path.dirname(path.dirname(path.abspath(__file__))), "changelog.yaml")
with open(path, "r") as f:
@ -31,6 +33,15 @@ if not changelog_item:
print(f"Version `{version}` not found in changelog.", file=sys.stderr)
sys.exit(1)
if twitchdl.__version__ != version:
print(f"twitchdl.__version__ is `{twitchdl.__version__}`, expected {version}.", file=sys.stderr)
sys.exit(1)
dist_version = get_distribution('twitch-dl').version
if dist_version != version:
print(f"Version in setup.py is `{dist_version}`, expected {version}.", file=sys.stderr)
sys.exit(1)
release_date = changelog_item["date"]
changes = changelog_item["changes"]
description = changelog_item["description"] if "description" in changelog_item else None

View File

@ -3,13 +3,9 @@ These tests depend on the channel having some videos and clips published.
"""
import httpx
import pytest
import m3u8
from twitchdl import twitch
from twitchdl.commands.download import get_clip_authenticated_url
from twitchdl.commands.videos import get_game_ids
from twitchdl.exceptions import ConsoleError
from twitchdl.playlists import enumerate_vods, load_m3u8, parse_playlists
from twitchdl.commands.download import _parse_playlists, get_clip_authenticated_url
TEST_CHANNEL = "bananasaurus_rex"
@ -21,25 +17,22 @@ def test_get_videos():
video_id = videos["edges"][0]["node"]["id"]
video = twitch.get_video(video_id)
assert video is not None
assert video["id"] == video_id
access_token = twitch.get_access_token(video_id)
assert "signature" in access_token
assert "value" in access_token
playlists_txt = twitch.get_playlists(video_id, access_token)
assert playlists_txt.startswith("#EXTM3U")
playlists = twitch.get_playlists(video_id, access_token)
assert playlists.startswith("#EXTM3U")
playlists = parse_playlists(playlists_txt)
playlist_url = playlists[0].url
name, res, url = next(_parse_playlists(playlists))
playlist = httpx.get(url).text
assert playlist.startswith("#EXTM3U")
playlist_txt = httpx.get(playlist_url).text
assert playlist_txt.startswith("#EXTM3U")
playlist_m3u8 = load_m3u8(playlist_txt)
vods = enumerate_vods(playlist_m3u8)
assert vods[0].path == "0.ts"
playlist = m3u8.loads(playlist)
vod_path = playlist.segments[0].uri
assert vod_path == "0.ts"
def test_get_clips():
@ -52,19 +45,6 @@ def test_get_clips():
slug = clips["edges"][0]["node"]["slug"]
clip = twitch.get_clip(slug)
assert clip is not None
assert clip["slug"] == slug
assert get_clip_authenticated_url(slug, "source")
def test_get_games():
assert get_game_ids([]) == []
assert get_game_ids(["Bioshock"]) == ["15866"]
assert get_game_ids(["Bioshock", "Portal"]) == ["15866", "6187"]
def test_get_games_not_found():
with pytest.raises(ConsoleError) as ex:
get_game_ids(["the game which does not exist"])
assert str(ex.value) == "Game 'the game which does not exist' not found"

View File

@ -1,156 +0,0 @@
import json
import pytest
from click.testing import CliRunner, Result
from twitchdl import cli
@pytest.fixture(scope="session")
def runner():
return CliRunner(mix_stderr=False)
def assert_ok(result: Result):
if result.exit_code != 0:
raise AssertionError(
f"Command failed with exit code {result.exit_code}\nStderr: {result.stderr}"
)
def test_info_video(runner: CliRunner):
result = runner.invoke(cli.info, ["2090131595"])
assert_ok(result)
assert "Frost Fatales 2024 Day 1" in result.stdout
assert "frozenflygone playing Tomb Raider" in result.stdout
def test_info_video_json(runner: CliRunner):
result = runner.invoke(cli.info, ["2090131595", "--json"])
assert_ok(result)
video = json.loads(result.stdout)
assert video["title"] == "Frost Fatales 2024 Day 1"
assert video["game"] == {"id": "2770", "name": "Tomb Raider"}
assert video["creator"] == {"login": "frozenflygone", "displayName": "frozenflygone"}
def test_info_clip(runner: CliRunner):
result = runner.invoke(cli.info, ["PoisedTalentedPuddingChefFrank"])
assert_ok(result)
assert "AGDQ Crashes during Bioshock run" in result.stdout
assert "GamesDoneQuick playing BioShock" in result.stdout
def test_info_clip_json(runner: CliRunner):
result = runner.invoke(cli.info, ["PoisedTalentedPuddingChefFrank", "--json"])
assert_ok(result)
clip = json.loads(result.stdout)
assert clip["slug"] == "PoisedTalentedPuddingChefFrank"
assert clip["title"] == "AGDQ Crashes during Bioshock run"
assert clip["game"] == {"id": "15866", "name": "BioShock"}
assert clip["broadcaster"] == {"displayName": "GamesDoneQuick", "login": "gamesdonequick"}
def test_info_not_found(runner: CliRunner):
result = runner.invoke(cli.info, ["banana"])
assert result.exit_code == 1
assert "Clip banana not found" in result.stderr
result = runner.invoke(cli.info, ["12345"])
assert result.exit_code == 1
assert "Video 12345 not found" in result.stderr
result = runner.invoke(cli.info, [""])
assert result.exit_code == 1
assert "Invalid input" in result.stderr
def test_download_clip(runner: CliRunner):
result = runner.invoke(
cli.download,
[
"PoisedTalentedPuddingChefFrank",
"-q",
"source",
"--dry-run",
],
)
assert_ok(result)
assert (
"Found: AGDQ Crashes during Bioshock run by GamesDoneQuick, playing BioShock (30 sec)"
in result.stdout
)
assert (
"Target: 2020-01-10_3099545841_gamesdonequick_agdq_crashes_during_bioshock_run.mp4"
in result.stdout
)
assert "Dry run, clip not downloaded." in result.stdout
def test_download_video(runner: CliRunner):
result = runner.invoke(
cli.download,
[
"2090131595",
"-q",
"source",
"--dry-run",
],
)
assert_ok(result)
assert "Found: Frost Fatales 2024 Day 1 by frozenflygone" in result.stdout
assert (
"Output: 2024-03-14_2090131595_frozenflygone_frost_fatales_2024_day_1.mkv" in result.stdout
)
assert "Dry run, video not downloaded." in result.stdout
def test_videos(runner: CliRunner):
result = runner.invoke(cli.videos, ["gamesdonequick", "--json"])
assert_ok(result)
videos = json.loads(result.stdout)
assert videos["count"] == 10
assert videos["totalCount"] > 0
video = videos["videos"][0]
result = runner.invoke(cli.videos, "gamesdonequick")
assert_ok(result)
assert f"Video {video['id']}" in result.stdout
assert video["title"] in result.stdout
result = runner.invoke(cli.videos, ["gamesdonequick", "--compact"])
assert_ok(result)
assert video["id"] in result.stdout
assert video["title"][:60] in result.stdout
def test_videos_channel_not_found(runner: CliRunner):
result = runner.invoke(cli.videos, ["doesnotexisthopefully"])
assert result.exit_code == 1
assert result.stderr.strip() == "Error: Channel doesnotexisthopefully not found"
def test_clips(runner: CliRunner):
result = runner.invoke(cli.clips, ["gamesdonequick", "--json"])
assert_ok(result)
clips = json.loads(result.stdout)
clip = clips[0]
result = runner.invoke(cli.clips, "gamesdonequick")
assert_ok(result)
assert f"Clip {clip['slug']}" in result.stdout
assert clip["title"] in result.stdout
result = runner.invoke(cli.clips, ["gamesdonequick", "--compact"])
assert_ok(result)
assert clip["slug"] in result.stdout
assert clip["title"][:60] in result.stdout

View File

@ -1,38 +1,35 @@
import pytest
from twitchdl.utils import parse_clip_identifier, parse_video_identifier
from twitchdl.utils import parse_video_identifier, parse_clip_identifier
TEST_VIDEO_PATTERNS = [
("702689313", "702689313"),
("702689313", "https://twitch.tv/videos/702689313"),
("702689313", "https://www.twitch.tv/videos/702689313"),
("702689313", "https://m.twitch.tv/videos/702689313"),
]
TEST_CLIP_PATTERNS = {
("AbrasivePlayfulMangoMau5", "AbrasivePlayfulMangoMau5"),
("AbrasivePlayfulMangoMau5", "https://clips.twitch.tv/AbrasivePlayfulMangoMau5"),
("AbrasivePlayfulMangoMau5", "https://www.twitch.tv/dracul1nx/clip/AbrasivePlayfulMangoMau5"),
("AbrasivePlayfulMangoMau5", "https://m.twitch.tv/dracul1nx/clip/AbrasivePlayfulMangoMau5"),
("AbrasivePlayfulMangoMau5", "https://twitch.tv/dracul1nx/clip/AbrasivePlayfulMangoMau5"),
("HungryProudRadicchioDoggo", "HungryProudRadicchioDoggo"),
("HungryProudRadicchioDoggo", "https://clips.twitch.tv/HungryProudRadicchioDoggo"),
("HungryProudRadicchioDoggo", "https://www.twitch.tv/bananasaurus_rex/clip/HungryProudRadicchioDoggo?filter=clips&range=7d&sort=time"),
("HungryProudRadicchioDoggo", "https://m.twitch.tv/bananasaurus_rex/clip/HungryProudRadicchioDoggo?filter=clips&range=7d&sort=time"),
("HungryProudRadicchioDoggo", "https://twitch.tv/bananasaurus_rex/clip/HungryProudRadicchioDoggo?filter=clips&range=7d&sort=time"),
("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ"),
("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ"),
("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ?filter=clips&range=7d&sort=time"),
("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://www.twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ?filter=clips&range=7d&sort=time"),
("GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ", "https://m.twitch.tv/dracul1nx/clip/GloriousColdbloodedTortoiseRuleFive-E017utJ4DZmHVpfQ?filter=clips&range=7d&sort=time"),
}
@pytest.mark.parametrize("expected,input", TEST_VIDEO_PATTERNS)
def test_video_patterns(expected: str, input: str):
def test_video_patterns(expected, input):
assert parse_video_identifier(input) == expected
@pytest.mark.parametrize("expected,input", TEST_CLIP_PATTERNS)
def test_clip_patterns(expected: str, input: str):
def test_clip_patterns(expected, input):
assert parse_clip_identifier(input) == expected

View File

@ -1,14 +1,12 @@
import click
import logging
import platform
import re
import sys
from typing import Optional, Tuple
import click
from twitchdl import __version__
from twitchdl.commands.clips import ClipsPeriod
from twitchdl.entities import DownloadOptions
from twitchdl.twitch import ClipsPeriod, VideosSort, VideosType
# Tweak the Click context
# https://click.palletsprojects.com/en/8.1.x/api/#context
@ -31,13 +29,13 @@ json_option = click.option(
)
def validate_positive(_ctx: click.Context, _param: click.Parameter, value: Optional[int]):
def validate_positive(_ctx: click.Context, _param: click.Parameter, value: int | None):
if value is not None and value <= 0:
raise click.BadParameter("must be greater than 0")
return value
def validate_time(_ctx: click.Context, _param: click.Parameter, value: str) -> Optional[int]:
def validate_time(_ctx: click.Context, _param: click.Parameter, value: str) -> int | None:
"""Parse a time string (hh:mm or hh:mm:ss) to number of seconds."""
if not value:
return None
@ -57,7 +55,7 @@ def validate_time(_ctx: click.Context, _param: click.Parameter, value: str) -> O
return hours * 3600 + minutes * 60 + seconds
def validate_rate(_ctx: click.Context, _param: click.Parameter, value: str) -> Optional[int]:
def validate_rate(_ctx: click.Context, _param: click.Parameter, value: str) -> int | None:
if not value:
return None
@ -86,14 +84,12 @@ def validate_rate(_ctx: click.Context, _param: click.Parameter, value: str) -> O
def cli(ctx: click.Context, color: bool, debug: bool):
"""twitch-dl - twitch.tv downloader
https://twitch-dl.bezdomni.net/
https://toot.bezdomni.net/
"""
ctx.color = color
if debug:
logging.basicConfig(level=logging.DEBUG)
logging.getLogger("httpx").setLevel(logging.WARN)
logging.getLogger("httpcore").setLevel(logging.WARN)
logging.basicConfig(level=logging.INFO)
@cli.command()
@ -104,12 +100,6 @@ def cli(ctx: click.Context, color: bool, debug: bool):
help="Fetch all clips, overrides --limit",
is_flag=True,
)
@click.option(
"-c",
"--compact",
help="Show clips in compact mode, one line per video",
is_flag=True,
)
@click.option(
"-d",
"--download",
@ -119,8 +109,9 @@ def cli(ctx: click.Context, color: bool, debug: bool):
@click.option(
"-l",
"--limit",
help="Number of clips to fetch. Defaults to 40 in compact mode, 10 otherwise.",
help="Number of clips to fetch [max: 100]",
type=int,
default=10,
callback=validate_positive,
)
@click.option(
@ -143,11 +134,10 @@ def cli(ctx: click.Context, color: bool, debug: bool):
def clips(
channel_name: str,
all: bool,
compact: bool,
download: bool,
json: bool,
limit: Optional[int],
pager: Optional[int],
limit: int,
pager: int | None,
period: ClipsPeriod,
):
"""List or download clips for given CHANNEL_NAME."""
@ -156,7 +146,6 @@ def clips(
clips(
channel_name,
all=all,
compact=compact,
download=download,
json=json,
limit=limit,
@ -257,20 +246,20 @@ def clips(
default=5,
)
def download(
ids: Tuple[str, ...],
auth_token: Optional[str],
chapter: Optional[int],
ids: tuple[str, ...],
auth_token: str | None,
chapter: int | None,
concat: bool,
dry_run: bool,
end: Optional[int],
end: int | None,
format: str,
keep: bool,
no_join: bool,
overwrite: bool,
output: str,
quality: Optional[str],
rate_limit: Optional[int],
start: Optional[int],
quality: str | None,
rate_limit: str | None,
start: int | None,
max_workers: int,
):
"""Download videos or clips.
@ -376,12 +365,12 @@ def videos(
channel_name: str,
all: bool,
compact: bool,
games_tuple: Tuple[str, ...],
games_tuple: tuple[str, ...],
json: bool,
limit: Optional[int],
pager: Optional[int],
sort: VideosSort,
type: VideosType,
limit: int | None,
pager: int | None,
sort: str,
type: str,
):
"""List or download clips for given CHANNEL_NAME."""
from twitchdl.commands.videos import videos

View File

@ -1,33 +1,30 @@
import re
import sys
from os import path
from typing import Callable, Generator, Optional
import click
from typing import Literal
from itertools import islice
from os import path
from twitchdl import twitch, utils
from twitchdl.commands.download import get_clip_authenticated_url
from twitchdl.download import download_file
from twitchdl.output import green, print_clip, print_clip_compact, print_json, print_paged, yellow
from twitchdl.twitch import Clip, ClipsPeriod
from twitchdl.output import print_out, print_clip, print_json
ClipsPeriod = Literal["last_day", "last_week", "last_month", "all_time"]
def clips(
channel_name: str,
*,
all: bool = False,
compact: bool = False,
download: bool = False,
json: bool = False,
limit: Optional[int] = None,
pager: Optional[int] = None,
limit: int = 10,
pager: int | None = None,
period: ClipsPeriod = "all_time",
):
# Set different defaults for limit for compact display
default_limit = 40 if compact else 10
# Ignore --limit if --pager or --all are given
limit = sys.maxsize if all or pager else (limit or default_limit)
limit = sys.maxsize if all or pager else limit
generator = twitch.channel_clips_generator(channel_name, period, limit)
@ -37,15 +34,24 @@ def clips(
if download:
return _download_clips(generator)
print_fn = print_clip_compact if compact else print_clip
if pager:
return print_paged("Clips", generator, print_fn, pager)
return _print_paged(generator, pager)
return _print_all(generator, print_fn, all)
return _print_all(generator, all)
def _target_filename(clip: Clip):
def _continue():
print_out("Press <green><b>Enter</green> to continue, <yellow><b>Ctrl+C</yellow> to break.")
try:
input()
except KeyboardInterrupt:
return False
return True
def _target_filename(clip):
url = clip["videoQualities"][0]["sourceURL"]
_, ext = path.splitext(url)
ext = ext.lstrip(".")
@ -55,41 +61,63 @@ def _target_filename(clip: Clip):
raise ValueError(f"Failed parsing date from: {clip['createdAt']}")
date = "".join(match.groups())
name = "_".join(
[
date,
clip["id"],
clip["broadcaster"]["login"],
utils.slugify(clip["title"]),
]
)
name = "_".join([
date,
clip["id"],
clip["broadcaster"]["login"],
utils.slugify(clip["title"]),
])
return f"{name}.{ext}"
def _download_clips(generator: Generator[Clip, None, None]):
def _download_clips(generator):
for clip in generator:
target = _target_filename(clip)
if path.exists(target):
click.echo(f"Already downloaded: {green(target)}")
print_out(f"Already downloaded: <green>{target}</green>")
else:
url = get_clip_authenticated_url(clip["slug"], "source")
click.echo(f"Downloading: {yellow(target)}")
print_out(f"Downloading: <yellow>{target}</yellow>")
download_file(url, target)
def _print_all(
generator: Generator[Clip, None, None],
print_fn: Callable[[Clip], None],
all: bool,
):
def _print_all(generator, all: bool):
for clip in generator:
print_fn(clip)
print_out()
print_clip(clip)
if not all:
click.secho(
"\nThere may be more clips. "
+ "Increase the --limit, use --all or --pager to see the rest.",
dim=True,
print_out(
"\n<dim>There may be more clips. " +
"Increase the --limit, use --all or --pager to see the rest.</dim>"
)
def _print_paged(generator, page_size):
iterator = iter(generator)
page = list(islice(iterator, page_size))
first = 1
last = first + len(page) - 1
while True:
print_out("-" * 80)
print_out()
for clip in page:
print_clip(clip)
print_out()
last = first + len(page) - 1
print_out("-" * 80)
print_out(f"<yellow>Clips {first}-{last}</yellow>")
first = first + len(page)
last = first + 1
page = list(islice(iterator, page_size))
if not page or not _continue():
break

View File

@ -1,35 +1,27 @@
import asyncio
import os
import platform
import httpx
import m3u8
import os
import re
import shutil
import subprocess
import tempfile
from os import path
from pathlib import Path
from typing import Dict, List
from urllib.parse import urlencode, urlparse
import click
import httpx
from typing import List, Optional, OrderedDict
from urllib.parse import urlparse, urlencode
from twitchdl import twitch, utils
from twitchdl.download import download_file
from twitchdl.entities import DownloadOptions
from twitchdl.entities import Data, DownloadOptions
from twitchdl.exceptions import ConsoleError
from twitchdl.http import download_all
from twitchdl.output import blue, bold, green, print_log, yellow
from twitchdl.playlists import (
enumerate_vods,
load_m3u8,
make_join_playlist,
parse_playlists,
select_playlist,
)
from twitchdl.twitch import Chapter, Clip, ClipAccessToken, Video
from twitchdl.output import print_out
def download(ids: List[str], args: DownloadOptions):
def download(ids: list[str], args: DownloadOptions):
for video_id in ids:
download_one(video_id, args)
@ -46,40 +38,73 @@ def download_one(video: str, args: DownloadOptions):
raise ConsoleError(f"Invalid input: {video}")
def _join_vods(playlist_path: str, target: str, overwrite: bool, video: Video):
def _parse_playlists(playlists_m3u8):
playlists = m3u8.loads(playlists_m3u8)
for p in sorted(playlists.playlists, key=lambda p: p.stream_info.resolution is None):
if p.stream_info.resolution:
name = p.media[0].name
description = "x".join(str(r) for r in p.stream_info.resolution)
else:
name = p.media[0].group_id
description = None
yield name, description, p.uri
def _get_playlist_by_name(playlists, quality):
if quality == "source":
_, _, uri = playlists[0]
return uri
for name, _, uri in playlists:
if name == quality:
return uri
available = ", ".join([name for (name, _, _) in playlists])
msg = f"Quality '{quality}' not found. Available qualities are: {available}"
raise ConsoleError(msg)
def _select_playlist_interactive(playlists):
print_out("\nAvailable qualities:")
for n, (name, resolution, uri) in enumerate(playlists):
if resolution:
print_out(f"{n + 1}) <b>{name}</b> <dim>({resolution})</dim>")
else:
print_out(f"{n + 1}) <b>{name}</b>")
no = utils.read_int("Choose quality", min=1, max=len(playlists) + 1, default=1)
_, _, uri = playlists[no - 1]
return uri
def _join_vods(playlist_path: str, target: str, overwrite: bool, video):
description = video["description"] or ""
description = description.strip()
command = [
"ffmpeg",
"-i",
playlist_path,
"-c",
"copy",
"-metadata",
f"artist={video['creator']['displayName']}",
"-metadata",
f"title={video['title']}",
"-metadata",
f"description={description}",
"-metadata",
"encoded_by=twitch-dl",
"-i", playlist_path,
"-c", "copy",
"-metadata", f"artist={video['creator']['displayName']}",
"-metadata", f"title={video['title']}",
"-metadata", f"description={description}",
"-metadata", "encoded_by=twitch-dl",
"-stats",
"-loglevel",
"warning",
"-loglevel", "warning",
f"file:{target}",
]
if overwrite:
command.append("-y")
click.secho(f"{' '.join(command)}", dim=True)
print_out(f"<dim>{' '.join(command)}</dim>")
result = subprocess.run(command)
if result.returncode != 0:
raise ConsoleError("Joining files failed")
def _concat_vods(vod_paths: List[str], target: str):
def _concat_vods(vod_paths: list[str], target: str):
tool = "type" if platform.system() == "Windows" else "cat"
command = [tool] + vod_paths
@ -89,8 +114,8 @@ def _concat_vods(vod_paths: List[str], target: str):
raise ConsoleError(f"Joining files failed: {result.stderr}")
def get_video_placeholders(video: Video, format: str) -> Dict[str, str]:
date, time = video["publishedAt"].split("T")
def get_video_substitutions(video: Data, format: str) -> Data:
date, time = video['publishedAt'].split("T")
game = video["game"]["name"] if video["game"] else "Unknown"
return {
@ -108,8 +133,8 @@ def get_video_placeholders(video: Video, format: str) -> Dict[str, str]:
}
def _video_target_filename(video: Video, args: DownloadOptions):
subs = get_video_placeholders(video, args.format)
def _video_target_filename(video: Data, args: DownloadOptions):
subs = get_video_substitutions(video, args.format)
try:
return args.output.format(**subs)
@ -118,7 +143,7 @@ def _video_target_filename(video: Video, args: DownloadOptions):
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")
def _clip_target_filename(clip: Clip, args: DownloadOptions):
def _clip_target_filename(clip, args: DownloadOptions):
date, time = clip["createdAt"].split("T")
game = clip["game"]["name"] if clip["game"] else "Unknown"
@ -148,6 +173,26 @@ def _clip_target_filename(clip: Clip, args: DownloadOptions):
raise ConsoleError(f"Invalid key {e} used in --output. Supported keys are: {supported}")
def _get_vod_paths(playlist, start: Optional[int], end: Optional[int]) -> List[str]:
"""Extract unique VOD paths for download from playlist."""
files = []
vod_start = 0
for segment in playlist.segments:
vod_end = vod_start + segment.duration
# `vod_end > start` is used here becuase it's better to download a bit
# more than a bit less, similar for the end condition
start_condition = not start or vod_end > start
end_condition = not end or vod_start < end
if start_condition and end_condition and segment.uri not in files:
files.append(segment.uri)
vod_start = vod_end
return files
def _crete_temp_dir(base_uri: str) -> str:
"""Create a temp dir to store downloads if it doesn't exist."""
path = urlparse(base_uri).path.lstrip("/")
@ -156,8 +201,8 @@ def _crete_temp_dir(base_uri: str) -> str:
return str(temp_dir)
def _get_clip_url(access_token: ClipAccessToken, quality: str) -> str:
qualities = access_token["videoQualities"]
def _get_clip_url(clip, quality):
qualities = clip["videoQualities"]
# Quality given as an argument
if quality:
@ -174,18 +219,18 @@ def _get_clip_url(access_token: ClipAccessToken, quality: str) -> str:
raise ConsoleError(msg)
# Ask user to select quality
click.echo("\nAvailable qualities:")
print_out("\nAvailable qualities:")
for n, q in enumerate(qualities):
click.echo(f"{n + 1}) {bold(q['quality'])} [{q['frameRate']} fps]")
click.echo()
print_out(f"{n + 1}) {q['quality']} [{q['frameRate']} fps]")
print_out()
no = utils.read_int("Choose quality", min=1, max=len(qualities), default=1)
selected_quality = qualities[no - 1]
return selected_quality["sourceURL"]
def get_clip_authenticated_url(slug: str, quality: str):
print_log("Fetching access token...")
def get_clip_authenticated_url(slug, quality):
print_out("<dim>Fetching access token...</dim>")
access_token = twitch.get_clip_access_token(slug)
if not access_token:
@ -193,145 +238,150 @@ def get_clip_authenticated_url(slug: str, quality: str):
url = _get_clip_url(access_token, quality)
query = urlencode(
{
"sig": access_token["playbackAccessToken"]["signature"],
"token": access_token["playbackAccessToken"]["value"],
}
)
query = urlencode({
"sig": access_token["playbackAccessToken"]["signature"],
"token": access_token["playbackAccessToken"]["value"],
})
return f"{url}?{query}"
def _download_clip(slug: str, args: DownloadOptions) -> None:
print_log("Looking up clip...")
print_out("<dim>Looking up clip...</dim>")
clip = twitch.get_clip(slug)
if not clip:
raise ConsoleError(f"Clip '{slug}' not found")
title = clip["title"]
user = clip["broadcaster"]["displayName"]
game = clip["game"]["name"] if clip["game"] else "Unknown"
duration = utils.format_duration(clip["durationSeconds"])
click.echo(f"Found: {green(title)} by {yellow(user)}, playing {blue(game)} ({duration})")
print_out(
f"Found: <green>{title}</green> by <yellow>{user}</yellow>, "+
f"playing <blue>{game}</blue> ({duration})"
)
target = _clip_target_filename(clip, args)
click.echo(f"Target: {blue(target)}")
print_out(f"Target: <blue>{target}</blue>")
if not args.overwrite and path.exists(target):
response = click.prompt("File exists. Overwrite? [Y/n]", default="Y", show_default=False)
if response.lower().strip() != "y":
raise click.Abort()
response = input("File exists. Overwrite? [Y/n]: ")
if response.lower().strip() not in ["", "y"]:
raise ConsoleError("Aborted")
args.overwrite = True
url = get_clip_authenticated_url(slug, args.quality)
print_log(f"Selected URL: {url}")
print_out(f"<dim>Selected URL: {url}</dim>")
if args.dry_run:
click.echo("Dry run, clip not downloaded.")
else:
print_log("Downloading clip...")
print_out("<dim>Downloading clip...</dim>")
if (args.dry_run is False):
download_file(url, target)
click.echo(f"Downloaded: {blue(target)}")
print_out(f"Downloaded: <blue>{target}</blue>")
def _download_video(video_id: str, args: DownloadOptions) -> None:
def _download_video(video_id, args: DownloadOptions) -> None:
if args.start and args.end and args.end <= args.start:
raise ConsoleError("End time must be greater than start time")
print_log("Looking up video...")
print_out("<dim>Looking up video...</dim>")
video = twitch.get_video(video_id)
if not video:
raise ConsoleError(f"Video {video_id} not found")
click.echo(f"Found: {blue(video['title'])} by {yellow(video['creator']['displayName'])}")
title = video['title']
user = video['creator']['displayName']
print_out(f"Found: <blue>{title}</blue> by <yellow>{user}</yellow>")
target = _video_target_filename(video, args)
click.echo(f"Output: {blue(target)}")
print_out(f"Output: <blue>{target}</blue>")
if not args.overwrite and path.exists(target):
response = click.prompt("File exists. Overwrite? [Y/n]", default="Y", show_default=False)
if response.lower().strip() != "y":
raise click.Abort()
response = input("File exists. Overwrite? [Y/n]: ")
if response.lower().strip() not in ["", "y"]:
raise ConsoleError("Aborted")
args.overwrite = True
# Chapter select or manual offset
start, end = _determine_time_range(video_id, args)
print_log("Fetching access token...")
print_out("<dim>Fetching access token...</dim>")
access_token = twitch.get_access_token(video_id, auth_token=args.auth_token)
print_log("Fetching playlists...")
playlists_text = twitch.get_playlists(video_id, access_token)
playlists = parse_playlists(playlists_text)
playlist = select_playlist(playlists, args.quality)
print_out("<dim>Fetching playlists...</dim>")
playlists_m3u8 = twitch.get_playlists(video_id, access_token)
playlists = list(_parse_playlists(playlists_m3u8))
playlist_uri = (_get_playlist_by_name(playlists, args.quality) if args.quality
else _select_playlist_interactive(playlists))
print_log("Fetching playlist...")
vods_text = http_get(playlist.url)
vods_m3u8 = load_m3u8(vods_text)
vods = enumerate_vods(vods_m3u8, start, end)
print_out("<dim>Fetching playlist...</dim>")
response = httpx.get(playlist_uri)
response.raise_for_status()
playlist = m3u8.loads(response.text)
if args.dry_run:
click.echo("Dry run, video not downloaded.")
return
base_uri = re.sub("/[^/]+$", "/", playlist.url)
base_uri = re.sub("/[^/]+$", "/", playlist_uri)
target_dir = _crete_temp_dir(base_uri)
vod_paths = _get_vod_paths(playlist, start, end)
# Save playlists for debugging purposes
with open(path.join(target_dir, "playlists.m3u8"), "w") as f:
f.write(playlists_text)
f.write(playlists_m3u8)
with open(path.join(target_dir, "playlist.m3u8"), "w") as f:
f.write(vods_text)
f.write(response.text)
click.echo(f"\nDownloading {len(vods)} VODs using {args.max_workers} workers to {target_dir}")
sources = [base_uri + vod.path for vod in vods]
targets = [os.path.join(target_dir, f"{vod.index:05d}.ts") for vod in vods]
print_out(f"\nDownloading {len(vod_paths)} VODs using {args.max_workers} workers to {target_dir}")
sources = [base_uri + path for path in vod_paths]
targets = [os.path.join(target_dir, f"{k:05d}.ts") for k, _ in enumerate(vod_paths)]
asyncio.run(download_all(sources, targets, args.max_workers, rate_limit=args.rate_limit))
join_playlist = make_join_playlist(vods_m3u8, vods, targets)
join_playlist_path = path.join(target_dir, "playlist_downloaded.m3u8")
join_playlist.dump(join_playlist_path) # type: ignore
click.echo()
# Make a modified playlist which references downloaded VODs
# Keep only the downloaded segments and skip the rest
org_segments = playlist.segments.copy()
path_map = OrderedDict(zip(vod_paths, targets))
playlist.segments.clear()
for segment in org_segments:
if segment.uri in path_map:
segment.uri = path_map[segment.uri]
playlist.segments.append(segment)
playlist_path = path.join(target_dir, "playlist_downloaded.m3u8")
playlist.dump(playlist_path)
print_out("")
if args.no_join:
print_log("Skipping joining files...")
click.echo(f"VODs downloaded to:\n{blue(target_dir)}")
print_out("<dim>Skipping joining files...</dim>")
print_out(f"VODs downloaded to:\n<blue>{target_dir}</blue>")
return
if args.concat:
print_log("Concating files...")
print_out("<dim>Concating files...</dim>")
_concat_vods(targets, target)
else:
print_log("Joining files...")
_join_vods(join_playlist_path, target, args.overwrite, video)
click.echo()
print_out("<dim>Joining files...</dim>")
_join_vods(playlist_path, target, args.overwrite, video)
if args.keep:
click.echo(f"Temporary files not deleted: {target_dir}")
print_out(f"\n<dim>Temporary files not deleted: {target_dir}</dim>")
else:
print_log("Deleting temporary files...")
print_out("\n<dim>Deleting temporary files...</dim>")
shutil.rmtree(target_dir)
click.echo(f"\nDownloaded: {green(target)}")
print_out(f"\nDownloaded: <green>{target}</green>")
def http_get(url: str) -> str:
response = httpx.get(url)
response.raise_for_status()
return response.text
def _determine_time_range(video_id: str, args: DownloadOptions):
def _determine_time_range(video_id, args: DownloadOptions):
if args.start or args.end:
return args.start, args.end
if args.chapter is not None:
print_log("Fetching chapters...")
print_out("<dim>Fetching chapters...</dim>")
chapters = twitch.get_video_chapters(video_id)
if not chapters:
@ -343,11 +393,9 @@ def _determine_time_range(video_id: str, args: DownloadOptions):
try:
chapter = chapters[args.chapter - 1]
except IndexError:
raise ConsoleError(
f"Chapter {args.chapter} does not exist. This video has {len(chapters)} chapters."
)
raise ConsoleError(f"Chapter {args.chapter} does not exist. This video has {len(chapters)} chapters.")
click.echo(f'Chapter selected: {blue(chapter["description"])}\n')
print_out(f'Chapter selected: <blue>{chapter["description"]}</blue>\n')
start = chapter["positionMilliseconds"] // 1000
duration = chapter["durationMilliseconds"] // 1000
return start, start + duration
@ -355,11 +403,11 @@ def _determine_time_range(video_id: str, args: DownloadOptions):
return None, None
def _choose_chapter_interactive(chapters: List[Chapter]):
click.echo("\nChapters:")
def _choose_chapter_interactive(chapters):
print_out("\nChapters:")
for index, chapter in enumerate(chapters):
duration = utils.format_time(chapter["durationMilliseconds"] // 1000)
click.echo(f'{index + 1}) {bold(chapter["description"])} ({duration})')
print_out(f'{index + 1}) <b>{chapter["description"]}</b> <dim>({duration})</dim>')
index = utils.read_int("Select a chapter", 1, len(chapters))
chapter = chapters[index - 1]
return chapter

View File

@ -1,17 +1,11 @@
from typing import List
import click
import m3u8
from twitchdl import twitch, utils
from twitchdl.commands.download import get_video_placeholders
from twitchdl import utils, twitch
from twitchdl.commands.download import get_video_substitutions
from twitchdl.exceptions import ConsoleError
from twitchdl.output import bold, print_clip, print_json, print_log, print_table, print_video
from twitchdl.playlists import parse_playlists
from twitchdl.twitch import Chapter, Clip, Video
from twitchdl.output import print_video, print_clip, print_json, print_out, print_log
def info(id: str, *, json: bool = False):
def info(id: str, *, json: bool = False, format="mkv"):
video_id = utils.parse_video_identifier(id)
if video_id:
print_log("Fetching video...")
@ -29,10 +23,16 @@ def info(id: str, *, json: bool = False):
print_log("Fetching chapters...")
chapters = twitch.get_video_chapters(video_id)
substitutions = get_video_substitutions(video, format)
if json:
video_json(video, playlists, chapters)
else:
video_info(video, playlists, chapters)
print_out("\nOutput format placeholders:")
for k, v in substitutions.items():
print(f" * {k} = {v}")
return
clip_slug = utils.parse_clip_identifier(id)
@ -51,29 +51,25 @@ def info(id: str, *, json: bool = False):
raise ConsoleError(f"Invalid input: {id}")
def video_info(video: Video, playlists: str, chapters: List[Chapter]):
click.echo()
def video_info(video, playlists, chapters):
print_out()
print_video(video)
click.echo("Playlists:")
for p in parse_playlists(playlists):
click.echo(f"{bold(p.name)} {p.url}")
print_out()
print_out("Playlists:")
for p in m3u8.loads(playlists).playlists:
print_out(f"<b>{p.stream_info.video}</b> {p.uri}")
if chapters:
click.echo()
click.echo("Chapters:")
print_out()
print_out("Chapters:")
for chapter in chapters:
start = utils.format_time(chapter["positionMilliseconds"] // 1000, force_hours=True)
duration = utils.format_time(chapter["durationMilliseconds"] // 1000)
click.echo(f'{start} {bold(chapter["description"])} ({duration})')
placeholders = get_video_placeholders(video, format="mkv")
placeholders = [[f"{{{k}}}", v] for k, v in placeholders.items()]
click.echo("")
print_table(["Placeholder", "Value"], placeholders)
print_out(f'{start} <b>{chapter["description"]}</b> ({duration})')
def video_json(video: Video, playlists: str, chapters: List[Chapter]):
def video_json(video, playlists, chapters):
playlists = m3u8.loads(playlists).playlists
video["playlists"] = [
@ -82,9 +78,8 @@ def video_json(video: Video, playlists: str, chapters: List[Chapter]):
"resolution": p.stream_info.resolution,
"codecs": p.stream_info.codecs,
"video": p.stream_info.video,
"uri": p.uri,
}
for p in playlists
"uri": p.uri
} for p in playlists
]
video["chapters"] = chapters
@ -92,11 +87,11 @@ def video_json(video: Video, playlists: str, chapters: List[Chapter]):
print_json(video)
def clip_info(clip: Clip):
click.echo()
def clip_info(clip):
print_out()
print_clip(clip)
click.echo()
click.echo("Download links:")
print_out()
print_out("Download links:")
for q in clip["videoQualities"]:
click.echo(f"{bold(q['quality'])} [{q['frameRate']} fps] {q['sourceURL']}")
print_out("<b>{quality}p{frameRate}</b> {sourceURL}".format(**q))

View File

@ -1,11 +1,8 @@
import sys
from typing import List, Optional
import click
from twitchdl import twitch
from twitchdl.exceptions import ConsoleError
from twitchdl.output import print_json, print_log, print_paged, print_video, print_video_compact
from twitchdl.output import print_out, print_paged_videos, print_video, print_json, print_video_compact
def videos(
@ -13,14 +10,14 @@ def videos(
*,
all: bool,
compact: bool,
games: List[str],
games: list[str],
json: bool,
limit: Optional[int],
pager: Optional[int],
sort: twitch.VideosSort,
type: twitch.VideosType,
limit: int | None,
pager: int | None,
sort: str,
type: str,
):
game_ids = get_game_ids(games)
game_ids = _get_game_ids(games)
# Set different defaults for limit for compact display
limit = limit or (40 if compact else 10)
@ -29,21 +26,23 @@ def videos(
max_videos = sys.maxsize if all or pager else limit
total_count, generator = twitch.channel_videos_generator(
channel_name, max_videos, sort, type, game_ids=game_ids
)
channel_name, max_videos, sort, type, game_ids=game_ids)
if json:
videos = list(generator)
print_json({"count": len(videos), "totalCount": total_count, "videos": videos})
print_json({
"count": len(videos),
"totalCount": total_count,
"videos": videos
})
return
if total_count == 0:
click.echo("No videos found")
print_out("<yellow>No videos found</yellow>")
return
if pager:
print_fn = print_video_compact if compact else print_video
print_paged("Videos", generator, print_fn, pager, total_count)
print_paged_videos(generator, pager, total_count)
return
count = 0
@ -51,29 +50,31 @@ def videos(
if compact:
print_video_compact(video)
else:
click.echo()
print_out()
print_video(video)
count += 1
click.echo()
click.echo("-" * 80)
click.echo(f"Videos 1-{count} of {total_count}")
print_out()
print_out("-" * 80)
print_out(f"<yellow>Videos 1-{count} of {total_count}</yellow>")
if total_count > count:
click.secho(
"\nThere are more videos. "
+ "Increase the --limit, use --all or --pager to see the rest.",
dim=True,
print_out()
print_out(
"<dim>There are more videos. Increase the --limit, use --all or --pager to see the rest.</dim>"
)
def get_game_ids(names: List[str]) -> List[str]:
return [get_game_id(name) for name in names]
def _get_game_ids(names):
if not names:
return []
game_ids = []
for name in names:
print_out(f"<dim>Looking up game '{name}'...</dim>")
game_id = twitch.get_game_id(name)
if not game_id:
raise ConsoleError(f"Game '{name}' not found")
game_ids.append(int(game_id))
def get_game_id(name: str) -> str:
print_log(f"Looking up game '{name}'...")
game_id = twitch.get_game_id(name)
if not game_id:
raise ConsoleError(f"Game '{name}' not found")
return game_id
return game_ids

View File

@ -1,5 +1,4 @@
import os
import httpx
from twitchdl.exceptions import ConsoleError

View File

@ -1,25 +1,25 @@
from dataclasses import dataclass
from typing import Any, Mapping, Optional
from typing import Any
@dataclass
class DownloadOptions:
auth_token: Optional[str]
chapter: Optional[int]
auth_token: str | None
chapter: int | None
concat: bool
dry_run: bool
end: Optional[int]
end: int | None
format: str
keep: bool
no_join: bool
overwrite: bool
output: str
quality: Optional[str]
rate_limit: Optional[int]
start: Optional[int]
quality: str | None
rate_limit: str | None
start: int | None
max_workers: int
# Type for annotating decoded JSON
# TODO: make data classes for common structs
Data = Mapping[str, Any]
Data = dict[str, Any]

View File

@ -1,7 +1,5 @@
import click
class ConsoleError(click.ClickException):
"""Raised when an error occurs and script exectuion should halt."""
pass

View File

@ -1,12 +1,12 @@
import asyncio
import httpx
import logging
import os
import time
from abc import ABC, abstractmethod
from typing import List, Optional
import httpx
from twitchdl.progress import Progress
logger = logging.getLogger(__name__)
@ -62,7 +62,6 @@ class LimitingTokenBucket(TokenBucket):
class EndlessTokenBucket(TokenBucket):
"""Used when download speed is not limited."""
def advance(self, size: int):
pass
@ -123,22 +122,12 @@ async def download_all(
targets: List[str],
workers: int,
*,
rate_limit: Optional[int] = None,
rate_limit: Optional[int] = None
):
progress = Progress(len(sources))
token_bucket = LimitingTokenBucket(rate_limit) if rate_limit else EndlessTokenBucket()
async with httpx.AsyncClient(timeout=TIMEOUT) as client:
semaphore = asyncio.Semaphore(workers)
tasks = [
download_with_retries(
client,
semaphore,
task_id,
source,
target,
progress,
token_bucket,
)
for task_id, (source, target) in enumerate(zip(sources, targets))
]
tasks = [download_with_retries(client, semaphore, task_id, source, target, progress, token_bucket)
for task_id, (source, target) in enumerate(zip(sources, targets))]
await asyncio.gather(*tasks)

View File

@ -1,65 +1,112 @@
import json
import sys
import re
from itertools import islice
from typing import Any, Callable, Generator, List, Optional, TypeVar
import click
from twitchdl import utils
from twitchdl.twitch import Clip, Video
from typing import Any, Match
T = TypeVar("T")
START_CODES = {
'b': '\033[1m',
'dim': '\033[2m',
'i': '\033[3m',
'u': '\033[4m',
'red': '\033[91m',
'green': '\033[92m',
'yellow': '\033[93m',
'blue': '\033[94m',
'magenta': '\033[95m',
'cyan': '\033[96m',
}
END_CODE = '\033[0m'
START_PATTERN = "<(" + "|".join(START_CODES.keys()) + ")>"
END_PATTERN = "</(" + "|".join(START_CODES.keys()) + ")>"
USE_ANSI_COLOR = "--no-color" not in sys.argv
def start_code(match: Match[str]) -> str:
name = match.group(1)
return START_CODES[name]
def colorize(text: str) -> str:
text = re.sub(START_PATTERN, start_code, text)
text = re.sub(END_PATTERN, END_CODE, text)
return text
def strip_tags(text: str) -> str:
text = re.sub(START_PATTERN, '', text)
text = re.sub(END_PATTERN, '', text)
return text
def truncate(string: str, length: int) -> str:
if len(string) > length:
return string[: length - 1] + ""
return string[:length - 1] + ""
return string
def print_out(*args, **kwargs):
args = [colorize(a) if USE_ANSI_COLOR else strip_tags(a) for a in args]
print(*args, **kwargs)
def print_json(data: Any):
click.echo(json.dumps(data))
print(json.dumps(data))
def print_log(message: Any):
click.secho(message, err=True, dim=True)
def print_err(*args, **kwargs):
args = [f"<red>{arg}</red>" for arg in args]
args = [colorize(a) if USE_ANSI_COLOR else strip_tags(a) for a in args]
print(*args, file=sys.stderr, **kwargs)
def visual_len(text: str):
return len(click.unstyle(text))
def print_log(*args, **kwargs):
args = [f"<dim>{a}</dim>" for a in args]
args = [colorize(a) if USE_ANSI_COLOR else strip_tags(a) for a in args]
print(*args, file=sys.stderr, **kwargs)
def ljust(text: str, width: int):
diff = width - visual_len(text)
return text + (" " * diff) if diff > 0 else text
def print_video(video):
published_at = video["publishedAt"].replace("T", " @ ").replace("Z", "")
length = utils.format_duration(video["lengthSeconds"])
channel = f"<blue>{video['creator']['displayName']}</blue>" if video["creator"] else ""
playing = f"playing <blue>{video['game']['name']}</blue>" if video["game"] else ""
# Can't find URL in video object, strange
url = f"https://www.twitch.tv/videos/{video['id']}"
print_out(f"<b>Video {video['id']}</b>")
print_out(f"<green>{video['title']}</green>")
if channel or playing:
print_out(" ".join([channel, playing]))
if video["description"]:
print_out(f"Description: {video['description']}")
print_out(f"Published <blue>{published_at}</blue> Length: <blue>{length}</blue> ")
print_out(f"<i>{url}</i>")
def print_table(headers: List[str], data: List[List[str]]):
widths = [[visual_len(cell) for cell in row] for row in data + [headers]]
widths = [max(width) for width in zip(*widths)]
underlines = ["-" * width for width in widths]
def print_row(row: List[str]):
for idx, cell in enumerate(row):
width = widths[idx]
click.echo(ljust(cell, width), nl=False)
click.echo(" ", nl=False)
click.echo()
print_row(headers)
print_row(underlines)
for row in data:
print_row(row)
def print_video_compact(video):
id = video["id"]
date = video["publishedAt"][:10]
game = video["game"]["name"] if video["game"] else ""
title = truncate(video["title"], 80).ljust(80)
print_out(f'<b>{id}</b> {date} <green>{title}</green> <blue>{game}</blue>')
def print_paged(
label: str,
generator: Generator[T, Any, Any],
print_fn: Callable[[T], None],
page_size: int,
total_count: Optional[int] = None,
):
def print_paged_videos(generator, page_size, total_count):
iterator = iter(generator)
page = list(islice(iterator, page_size))
@ -67,89 +114,48 @@ def print_paged(
last = first + len(page) - 1
while True:
click.echo("-" * 80)
print_out("-" * 80)
click.echo()
for item in page:
print_fn(item)
print_out()
for video in page:
print_video(video)
print_out()
last = first + len(page) - 1
click.echo("-" * 80)
click.echo(f"{label} {first}-{last} of {total_count or '???'}")
print_out("-" * 80)
print_out(f"<yellow>Videos {first}-{last} of {total_count}</yellow>")
first = first + len(page)
last = first + 1
page = list(islice(iterator, page_size))
if not page or not prompt_continue():
if not page or not _continue():
break
def print_video(video: Video):
published_at = video["publishedAt"].replace("T", " @ ").replace("Z", "")
length = utils.format_duration(video["lengthSeconds"])
channel = blue(video["creator"]["displayName"]) if video["creator"] else ""
playing = f"playing {blue(video['game']['name'])}" if video["game"] else ""
# Can't find URL in video object, strange
url = f"https://www.twitch.tv/videos/{video['id']}"
click.secho(f"Video {video['id']}", bold=True)
click.secho(video["title"], fg="green")
if channel or playing:
click.echo(" ".join([channel, playing]))
if video["description"]:
click.echo(f"Description: {video['description']}")
click.echo(f"Published {blue(published_at)} Length: {blue(length)} ")
click.secho(url, italic=True)
click.echo()
def print_video_compact(video: Video):
id = video["id"]
date = video["publishedAt"][:10]
game = video["game"]["name"] if video["game"] else ""
title = truncate(video["title"], 80).ljust(80)
click.echo(f"{bold(id)} {date} {green(title)} {blue(game)}")
def print_clip(clip: Clip):
def print_clip(clip):
published_at = clip["createdAt"].replace("T", " @ ").replace("Z", "")
length = utils.format_duration(clip["durationSeconds"])
channel = clip["broadcaster"]["displayName"]
playing = f"playing {blue(clip['game']['name'])}" if clip["game"] else ""
click.echo(f"Clip {bold(clip['slug'])}")
click.secho(clip["title"], fg="green")
click.echo(f"{blue(channel)} {playing}")
click.echo(
f"Published {blue(published_at)}"
+ f" Length: {blue(length)}"
+ f" Views: {blue(clip['viewCount'])}"
playing = (
f"playing <blue>{clip['game']['name']}</blue>"
if clip["game"] else ""
)
click.secho(clip["url"], italic=True)
click.echo()
print_out(f"Clip <b>{clip['slug']}</b>")
print_out(f"<green>{clip['title']}</green>")
print_out(f"<blue>{channel}</blue> {playing}")
print_out(
f"Published <blue>{published_at}</blue>" +
f" Length: <blue>{length}</blue>" +
f" Views: <blue>{clip["viewCount"]}</blue>"
)
print_out(f"<i>{clip['url']}</i>")
def print_clip_compact(clip: Clip):
slug = clip["slug"]
date = clip["createdAt"][:10]
title = truncate(clip["title"], 50).ljust(50)
game = clip["game"]["name"] if clip["game"] else ""
game = truncate(game, 30).ljust(30)
click.echo(f"{date} {green(title)} {blue(game)} {bold(slug)}")
def prompt_continue():
enter = click.style("Enter", bold=True, fg="green")
ctrl_c = click.style("Ctrl+C", bold=True, fg="yellow")
click.echo(f"Press {enter} to continue, {ctrl_c} to break.")
def _continue():
print_out("Press <green><b>Enter</green> to continue, <yellow><b>Ctrl+C</yellow> to break.")
try:
input()
@ -157,30 +163,3 @@ def prompt_continue():
return False
return True
# Shorthand functions for coloring output
def blue(text: Any) -> str:
return click.style(text, fg="blue")
def cyan(text: Any) -> str:
return click.style(text, fg="cyan")
def green(text: Any) -> str:
return click.style(text, fg="green")
def yellow(text: Any) -> str:
return click.style(text, fg="yellow")
def bold(text: Any) -> str:
return click.style(text, bold=True)
def dim(text: Any) -> str:
return click.style(text, dim=True)

View File

@ -1,170 +0,0 @@
"""
Parse and manipulate m3u8 playlists.
"""
from dataclasses import dataclass
from typing import Generator, List, Optional, OrderedDict
import click
import m3u8
from twitchdl import utils
from twitchdl.output import bold, dim, print_table
@dataclass
class Playlist:
name: str
group_id: str
resolution: Optional[str]
url: str
is_source: bool
@dataclass
class Vod:
index: int
"""Ordinal number of the VOD in the playlist"""
path: str
"""Path part of the VOD URL"""
duration: int
"""Segment duration in seconds"""
def parse_playlists(playlists_m3u8: str) -> List[Playlist]:
def _parse(source: str) -> Generator[Playlist, None, None]:
document = load_m3u8(source)
for p in document.playlists:
resolution = (
"x".join(str(r) for r in p.stream_info.resolution)
if p.stream_info.resolution
else None
)
media = p.media[0]
is_source = media.group_id == "chunked"
yield Playlist(media.name, media.group_id, resolution, p.uri, is_source)
return list(_parse(playlists_m3u8))
def load_m3u8(playlist_m3u8: str) -> m3u8.M3U8:
return m3u8.loads(playlist_m3u8)
def enumerate_vods(
document: m3u8.M3U8,
start: Optional[int] = None,
end: Optional[int] = None,
) -> List[Vod]:
"""Extract VODs for download from document."""
vods = []
vod_start = 0
for index, segment in enumerate(document.segments):
vod_end = vod_start + segment.duration
# `vod_end > start` is used here becuase it's better to download a bit
# more than a bit less, similar for the end condition
start_condition = not start or vod_end > start
end_condition = not end or vod_start < end
if start_condition and end_condition:
vods.append(Vod(index, segment.uri, segment.duration))
vod_start = vod_end
return vods
def make_join_playlist(
playlist: m3u8.M3U8,
vods: List[Vod],
targets: List[str],
) -> m3u8.Playlist:
"""
Make a modified playlist which references downloaded VODs
Keep only the downloaded segments and skip the rest
"""
org_segments = playlist.segments.copy()
path_map = OrderedDict(zip([v.path for v in vods], targets))
playlist.segments.clear()
for segment in org_segments:
if segment.uri in path_map:
segment.uri = path_map[segment.uri]
playlist.segments.append(segment)
return playlist
def select_playlist(playlists: List[Playlist], quality: Optional[str]) -> Playlist:
return (
select_playlist_by_name(playlists, quality)
if quality is not None
else select_playlist_interactive(playlists)
)
def select_playlist_by_name(playlists: List[Playlist], quality: str) -> Playlist:
if quality == "source":
for playlist in playlists:
if playlist.is_source:
return playlist
raise click.ClickException("Source quality not found, please report an issue on github.")
for playlist in playlists:
if playlist.name == quality or playlist.group_id == quality:
return playlist
available = ", ".join([p.name for p in playlists])
msg = f"Quality '{quality}' not found. Available qualities are: {available}"
raise click.ClickException(msg)
def select_playlist_interactive(playlists: List[Playlist]) -> Playlist:
playlists = sorted(playlists, key=_playlist_key)
headers = ["#", "Name", "Group ID", "Resolution"]
rows = [
[
f"{n + 1})",
bold(playlist.name),
dim(playlist.group_id),
dim(playlist.resolution or ""),
]
for n, playlist in enumerate(playlists)
]
click.echo()
print_table(headers, rows)
default = 1
for index, playlist in enumerate(playlists):
if playlist.is_source:
default = index + 1
no = utils.read_int("\nChoose quality", min=1, max=len(playlists) + 1, default=default)
playlist = playlists[no - 1]
return playlist
MAX = 1_000_000
def _playlist_key(playlist: Playlist) -> int:
"""Attempt to sort playlists so that source quality is on top, audio only
is on bottom and others are sorted descending by resolution."""
if playlist.is_source:
return 0
if playlist.group_id == "audio_only":
return MAX
try:
return MAX - int(playlist.name.split("p")[0])
except Exception:
pass
return MAX

View File

@ -1,13 +1,12 @@
import logging
import time
from collections import deque
from dataclasses import dataclass, field
from statistics import mean
from typing import Deque, Dict, NamedTuple, Optional
from typing import Dict, NamedTuple, Optional, Deque
import click
from twitchdl.output import blue
from twitchdl.output import print_out
from twitchdl.utils import format_size, format_time
logger = logging.getLogger(__name__)
@ -94,28 +93,18 @@ class Progress:
task = self.tasks[task_id]
if task.size != task.downloaded:
logger.warn(
f"Taks {task_id} ended with {task.downloaded}b downloaded, expected {task.size}b."
)
logger.warn(f"Taks {task_id} ended with {task.downloaded}b downloaded, expected {task.size}b.")
self.vod_downloaded_count += 1
self.print()
def _calculate_total(self):
self.estimated_total = (
int(mean(t.size for t in self.tasks.values()) * self.vod_count) if self.tasks else None
)
self.estimated_total = int(mean(t.size for t in self.tasks.values()) * self.vod_count) if self.tasks else None
def _calculate_progress(self):
self.speed = self._calculate_speed()
self.progress_perc = (
int(100 * self.progress_bytes / self.estimated_total) if self.estimated_total else 0
)
self.remaining_time = (
int((self.estimated_total - self.progress_bytes) / self.speed)
if self.estimated_total and self.speed
else None
)
self.progress_perc = int(100 * self.progress_bytes / self.estimated_total) if self.estimated_total else 0
self.remaining_time = int((self.estimated_total - self.progress_bytes) / self.speed) if self.estimated_total and self.speed else None
def _calculate_speed(self):
if len(self.samples) < 2:
@ -127,7 +116,7 @@ class Progress:
size = last_sample.downloaded - first_sample.downloaded
duration = last_sample.timestamp - first_sample.timestamp
return size / duration if duration > 0 else None
return size / duration
def print(self):
now = time.time()
@ -136,20 +125,13 @@ class Progress:
if now - self.last_printed < 0.1:
return
click.echo(f"\rDownloaded {self.vod_downloaded_count}/{self.vod_count} VODs", nl=False)
click.secho(f" {self.progress_perc}%", fg="blue", nl=False)
if self.estimated_total is not None:
total = f"~{format_size(self.estimated_total)}"
click.echo(f" of {blue(total)}", nl=False)
if self.speed is not None:
speed = f"{format_size(self.speed)}/s"
click.echo(f" at {blue(speed)}", nl=False)
if self.remaining_time is not None:
click.echo(f" ETA {blue(format_time(self.remaining_time))}", nl=False)
click.echo(" ", nl=False)
progress = " ".join([
f"Downloaded {self.vod_downloaded_count}/{self.vod_count} VODs",
f"<blue>{self.progress_perc}%</blue>",
f"of <blue>~{format_size(self.estimated_total)}</blue>" if self.estimated_total else "",
f"at <blue>{format_size(self.speed)}/s</blue>" if self.speed else "",
f"ETA <blue>{format_time(self.remaining_time)}</blue>" if self.remaining_time is not None else "",
])
print_out(f"\r{progress} ", end="")
self.last_printed = now

View File

@ -2,109 +2,27 @@
Twitch API access.
"""
import json
import logging
import time
from typing import Any, Dict, Generator, List, Literal, Mapping, Optional, Tuple, TypedDict, Union
import click
import httpx
import json
import click
from typing import Dict
from twitchdl import CLIENT_ID
from twitchdl.entities import Data
from twitchdl.exceptions import ConsoleError
ClipsPeriod = Literal["last_day", "last_week", "last_month", "all_time"]
VideosSort = Literal["views", "time"]
VideosType = Literal["archive", "highlight", "upload"]
class AccessToken(TypedDict):
signature: str
value: str
class User(TypedDict):
login: str
displayName: str
class Game(TypedDict):
id: str
name: str
class VideoQuality(TypedDict):
frameRate: str
quality: str
sourceURL: str
class ClipAccessToken(TypedDict):
id: str
playbackAccessToken: AccessToken
videoQualities: List[VideoQuality]
class Clip(TypedDict):
id: str
slug: str
title: str
createdAt: str
viewCount: int
durationSeconds: int
url: str
videoQualities: List[VideoQuality]
game: Game
broadcaster: User
class Video(TypedDict):
id: str
title: str
description: str
publishedAt: str
broadcastType: str
lengthSeconds: int
game: Game
creator: User
class Chapter(TypedDict):
id: str
durationMilliseconds: int
positionMilliseconds: int
type: str
description: str
subDescription: str
thumbnailURL: str
game: Game
class GQLError(click.ClickException):
def __init__(self, errors: List[str]):
def __init__(self, errors: list[str]):
message = "GraphQL query failed."
for error in errors:
message += f"\n* {error}"
super().__init__(message)
Content = Union[str, bytes]
Headers = Dict[str, str]
def authenticated_post(url, data=None, json=None, headers={}):
headers['Client-ID'] = CLIENT_ID
def authenticated_post(
url: str,
*,
json: Any = None,
content: Optional[Content] = None,
auth_token: Optional[str] = None,
):
headers = {"Client-ID": CLIENT_ID}
if auth_token is not None:
headers["authorization"] = f"OAuth {auth_token}"
response = request("POST", url, content=content, json=json, headers=headers)
response = httpx.post(url, data=data, json=json, headers=headers)
if response.status_code == 400:
data = response.json()
raise ConsoleError(data["message"])
@ -114,50 +32,16 @@ def authenticated_post(
return response
def request(
method: str,
url: str,
json: Any = None,
content: Optional[Content] = None,
headers: Optional[Mapping[str, str]] = None,
):
with httpx.Client() as client:
request = client.build_request(method, url, json=json, content=content, headers=headers)
log_request(request)
start = time.time()
response = client.send(request)
duration = time.time() - start
log_response(response, duration)
return response
logger = logging.getLogger(__name__)
def log_request(request: httpx.Request):
logger.debug(f"--> {request.method} {request.url}")
if request.content:
logger.debug(f"--> {request.content}")
def log_response(response: httpx.Response, duration: float):
request = response.request
duration_ms = int(1000 * duration)
logger.debug(f"<-- {request.method} {request.url} HTTP {response.status_code} {duration_ms}ms")
if response.content:
logger.debug(f"<-- {response.content}")
def gql_post(query: str):
url = "https://gql.twitch.tv/gql"
response = authenticated_post(url, content=query)
response = authenticated_post(url, data=query)
gql_raise_on_error(response)
return response.json()
def gql_query(query: str, auth_token: Optional[str] = None):
def gql_query(query: str, headers: Dict[str, str] = {}):
url = "https://gql.twitch.tv/gql"
response = authenticated_post(url, json={"query": query}, auth_token=auth_token)
response = authenticated_post(url, json={"query": query}, headers=headers)
gql_raise_on_error(response)
return response.json()
@ -177,7 +61,6 @@ VIDEO_FIELDS = """
broadcastType
lengthSeconds
game {
id
name
}
creator {
@ -211,7 +94,7 @@ CLIP_FIELDS = """
"""
def get_video(video_id: str) -> Optional[Video]:
def get_video(video_id: str):
query = f"""
{{
video(id: "{video_id}") {{
@ -224,7 +107,7 @@ def get_video(video_id: str) -> Optional[Video]:
return response["data"]["video"]
def get_clip(slug: str) -> Optional[Clip]:
def get_clip(slug: str):
query = f"""
{{
clip(slug: "{slug}") {{
@ -237,7 +120,7 @@ def get_clip(slug: str) -> Optional[Clip]:
return response["data"]["clip"]
def get_clip_access_token(slug: str) -> ClipAccessToken:
def get_clip_access_token(slug: str):
query = f"""
{{
"operationName": "VideoAccessToken_Clip",
@ -257,12 +140,7 @@ def get_clip_access_token(slug: str) -> ClipAccessToken:
return response["data"]["clip"]
def get_channel_clips(
channel_id: str,
period: ClipsPeriod,
limit: int,
after: Optional[str] = None,
):
def get_channel_clips(channel_id: str, period: str, limit: int, after: str | None= None):
"""
List channel clips.
@ -299,12 +177,8 @@ def get_channel_clips(
return response["data"]["user"]["clips"]
def channel_clips_generator(
channel_id: str,
period: ClipsPeriod,
limit: int,
) -> Generator[Clip, None, None]:
def _generator(clips: Data, limit: int) -> Generator[Clip, None, None]:
def channel_clips_generator(channel_id, period, limit):
def _generator(clips, limit):
for clip in clips["edges"]:
if limit < 1:
return
@ -325,10 +199,11 @@ def channel_clips_generator(
return _generator(clips, limit)
def channel_clips_generator_old(channel_id: str, period: ClipsPeriod, limit: int):
def channel_clips_generator_old(channel_id, period, limit):
cursor = ""
while True:
clips = get_channel_clips(channel_id, period, limit, after=cursor)
clips = get_channel_clips(
channel_id, period, limit, after=cursor)
if not clips["edges"]:
break
@ -347,11 +222,10 @@ def get_channel_videos(
limit: int,
sort: str,
type: str = "archive",
game_ids: Optional[List[str]] = None,
after: Optional[str] = None,
game_ids: list[str] | None = None,
after: str | None = None
):
game_ids = game_ids or []
game_ids_str = f"[{','.join(game_ids)}]"
query = f"""
{{
@ -362,7 +236,7 @@ def get_channel_videos(
sort: {sort.upper()},
after: "{after or ''}",
options: {{
gameIDs: {game_ids_str}
gameIDs: {game_ids}
}}
) {{
totalCount
@ -388,16 +262,8 @@ def get_channel_videos(
return response["data"]["user"]["videos"]
def channel_videos_generator(
channel_id: str,
max_videos: int,
sort: VideosSort,
type: VideosType,
game_ids: Optional[List[str]] = None,
) -> Tuple[int, Generator[Video, None, None]]:
game_ids = game_ids or []
def _generator(videos: Data, max_videos: int) -> Generator[Video, None, None]:
def channel_videos_generator(channel_id, max_videos, sort, type, game_ids=[]):
def _generator(videos, max_videos):
for video in videos["edges"]:
if max_videos < 1:
return
@ -418,7 +284,7 @@ def channel_videos_generator(
return videos["totalCount"], _generator(videos, max_videos)
def get_access_token(video_id: str, auth_token: Optional[str] = None) -> AccessToken:
def get_access_token(video_id, auth_token=None):
query = f"""
{{
videoPlaybackAccessToken(
@ -435,8 +301,12 @@ def get_access_token(video_id: str, auth_token: Optional[str] = None) -> AccessT
}}
"""
headers = {}
if auth_token is not None:
headers['authorization'] = f'OAuth {auth_token}'
try:
response = gql_query(query, auth_token=auth_token)
response = gql_query(query, headers=headers)
return response["data"]["videoPlaybackAccessToken"]
except httpx.HTTPStatusError as error:
# Provide a more useful error message when server returns HTTP 401
@ -453,27 +323,24 @@ def get_access_token(video_id: str, auth_token: Optional[str] = None) -> AccessT
raise
def get_playlists(video_id: str, access_token: AccessToken) -> str:
def get_playlists(video_id, access_token):
"""
For a given video return a playlist which contains possible video qualities.
"""
url = f"https://usher.ttvnw.net/vod/{video_id}"
response = httpx.get(
url,
params={
"nauth": access_token["value"],
"nauthsig": access_token["signature"],
"allow_audio_only": "true",
"allow_source": "true",
"player": "twitchweb",
},
)
response = httpx.get(url, params={
"nauth": access_token['value'],
"nauthsig": access_token['signature'],
"allow_audio_only": "true",
"allow_source": "true",
"player": "twitchweb",
})
response.raise_for_status()
return response.content.decode("utf-8")
return response.content.decode('utf-8')
def get_game_id(name: str):
def get_game_id(name):
query = f"""
{{
game(name: "{name.strip()}") {{
@ -488,29 +355,30 @@ def get_game_id(name: str):
return game["id"]
def get_video_chapters(video_id: str) -> List[Chapter]:
def get_video_chapters(video_id: str):
query = {
"operationName": "VideoPlayer_ChapterSelectButtonVideo",
"variables": {
"variables":
{
"includePrivate": False,
"videoID": video_id,
"videoID": video_id
},
"extensions": {
"persistedQuery": {
"extensions":
{
"persistedQuery":
{
"version": 1,
"sha256Hash": "8d2793384aac3773beab5e59bd5d6f585aedb923d292800119e03d40cd0f9b41",
"sha256Hash": "8d2793384aac3773beab5e59bd5d6f585aedb923d292800119e03d40cd0f9b41"
}
},
}
}
response = gql_post(json.dumps(query))
return list(_chapter_nodes(response["data"]["video"]["moments"]))
def _chapter_nodes(moments: Data) -> Generator[Chapter, None, None]:
for edge in moments["edges"]:
def _chapter_nodes(collection):
for edge in collection["edges"]:
node = edge["node"]
node["game"] = node["details"]["game"]
del node["details"]
del node["moments"]
yield node

View File

@ -1,8 +1,5 @@
import re
import unicodedata
from typing import Optional, Union
import click
def _format_size(value: float, digits: int, unit: str):
@ -12,7 +9,7 @@ def _format_size(value: float, digits: int, unit: str):
return f"{int(value)}{unit}"
def format_size(bytes_: Union[int, float], digits: int = 1):
def format_size(bytes_: int, digits: int = 1):
if bytes_ < 1024:
return _format_size(bytes_, digits, "B")
@ -27,7 +24,7 @@ def format_size(bytes_: Union[int, float], digits: int = 1):
return _format_size(mega / 1024, digits, "GB")
def format_duration(total_seconds: Union[int, float]) -> str:
def format_duration(total_seconds: int | float) -> str:
total_seconds = int(total_seconds)
hours = total_seconds // 3600
remainder = total_seconds % 3600
@ -43,7 +40,7 @@ def format_duration(total_seconds: Union[int, float]) -> str:
return f"{seconds} sec"
def format_time(total_seconds: Union[int, float], force_hours: bool = False) -> str:
def format_time(total_seconds: int | float, force_hours: bool = False) -> str:
total_seconds = int(total_seconds)
hours = total_seconds // 3600
remainder = total_seconds % 3600
@ -56,10 +53,15 @@ def format_time(total_seconds: Union[int, float], force_hours: bool = False) ->
return f"{minutes:02}:{seconds:02}"
def read_int(msg: str, min: int, max: int, default: Optional[int] = None) -> int:
def read_int(msg: str, min: int, max: int, default: int | None = None) -> int:
if default:
msg = msg + f" [default {default}]"
msg += ": "
while True:
try:
val = click.prompt(msg, default=default, type=int)
val = input(msg)
if default and not val:
return default
if min <= int(val) <= max:
@ -69,32 +71,32 @@ def read_int(msg: str, min: int, max: int, default: Optional[int] = None) -> int
def slugify(value: str) -> str:
value = unicodedata.normalize("NFKC", str(value))
value = re.sub(r"[^\w\s_-]", "", value)
value = re.sub(r"[\s_-]+", "_", value)
value = unicodedata.normalize('NFKC', str(value))
value = re.sub(r'[^\w\s_-]', '', value)
value = re.sub(r'[\s_-]+', '_', value)
return value.strip("_").lower()
def titlify(value: str) -> str:
value = unicodedata.normalize("NFKC", str(value))
value = re.sub(r"[^\w\s\[\]().-]", "", value)
value = re.sub(r"\s+", " ", value)
value = unicodedata.normalize('NFKC', str(value))
value = re.sub(r'[^\w\s\[\]().-]', '', value)
value = re.sub(r'\s+', ' ', value)
return value.strip()
VIDEO_PATTERNS = [
r"^(?P<id>\d+)?$",
r"^https://(www\.|m\.)?twitch\.tv/videos/(?P<id>\d+)(\?.+)?$",
r"^https://(www.)?twitch.tv/videos/(?P<id>\d+)(\?.+)?$",
]
CLIP_PATTERNS = [
r"^(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)$",
r"^https://(www\.|m\.)?twitch\.tv/\w+/clip/(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)(\?.+)?$",
r"^https://clips\.twitch\.tv/(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)(\?.+)?$",
r"^https://(www.)?twitch.tv/\w+/clip/(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)(\?.+)?$",
r"^https://clips.twitch.tv/(?P<slug>[A-Za-z0-9]+(?:-[A-Za-z0-9_-]{16})?)(\?.+)?$",
]
def parse_video_identifier(identifier: str) -> Optional[str]:
def parse_video_identifier(identifier: str) -> str | None:
"""Given a video ID or URL returns the video ID, or null if not matched"""
for pattern in VIDEO_PATTERNS:
match = re.match(pattern, identifier)
@ -102,7 +104,7 @@ def parse_video_identifier(identifier: str) -> Optional[str]:
return match.group("id")
def parse_clip_identifier(identifier: str) -> Optional[str]:
def parse_clip_identifier(identifier: str) -> str | None:
"""Given a clip slug or URL returns the clip slug, or null if not matched"""
for pattern in CLIP_PATTERNS:
match = re.match(pattern, identifier)