diff --git a/bbbdl/__main__.py b/bbbdl/__main__.py index accc52a..6bb96c0 100644 --- a/bbbdl/__main__.py +++ b/bbbdl/__main__.py @@ -30,14 +30,14 @@ def download(input_url, output_file, overwrite=False, verbose_ffmpeg=False, debu meeting = Meeting.from_url(input_url) click.secho(f"Downloading: {input_url} -> {output_file}", err=True, fg="green") - streams = compose_lesson(meeting) + tracks = compose_lesson(meeting, 1280, 720) - output = ffmpeg.output(*streams, output_file) + output = ffmpeg.output(tracks.video, tracks.audio, output_file) if debug: click.echo(" ".join(output.compile())) - else: - output.run(quiet=not verbose_ffmpeg, overwrite_output=True) + + output.run(quiet=not verbose_ffmpeg, overwrite_output=True) @main.command() diff --git a/bbbdl/composer.py b/bbbdl/composer.py index 1fc2a3c..1a0801f 100644 --- a/bbbdl/composer.py +++ b/bbbdl/composer.py @@ -1,24 +1,30 @@ from typing import * import ffmpeg from .resources import Meeting +from .tracks import Tracks -def compose_screensharing(meeting: Meeting) -> Tuple[ffmpeg.Stream, ffmpeg.Stream]: - """Keep the deskshare video and the webcam audio, while discarding the rest.""" +def compose_screensharing(meeting: Meeting, width: int, height: int) -> Tracks: + tracks = Tracks() - return ( - meeting.deskshare.as_stream().video, - meeting.webcams.as_stream().audio, - ) + if meeting.webcams: + tracks.overlay(meeting.webcams.get_video().filter("scale", width, height).filter("setsar", 1, 1)) + tracks.amerge(meeting.webcams.get_audio()) + + if meeting.deskshare: + tracks.overlay(meeting.deskshare.get_video().filter("scale", width, height).filter("setsar", 1, 1)) + + return tracks -def compose_lesson(meeting: Meeting) -> Tuple[ffmpeg.Stream, ffmpeg.Stream]: - """Keep slides, deskshare video and webcam audio, while discarding the rest.""" - - video_stream, audio_stream = compose_screensharing(meeting) +def compose_lesson(meeting: Meeting, width: int, height: int) -> Tracks: + tracks = compose_screensharing(meeting, width, height) for shape in meeting.shapes: - video_stream = ffmpeg.overlay(video_stream, shape.resource.as_stream().video.filter("scale", 1280, 720), - enable=f"between(t, {shape.start}, {shape.end})") + scaled_split_shape = shape.resource.get_video().filter("scale", width, height).filter("setsar", 1, 1).split() + count = 0 + for enable in shape.enables: + count += 1 + tracks.overlay(scaled_split_shape.stream(count), enable=f"between(t, {enable[0]}, {enable[1]})") - return video_stream, audio_stream + return tracks diff --git a/bbbdl/resources.py b/bbbdl/resources.py index f2410fe..abd5252 100644 --- a/bbbdl/resources.py +++ b/bbbdl/resources.py @@ -7,35 +7,42 @@ import ffmpeg from .urlhandler import playback_to_data -@dataclasses.dataclass() class Resource: - href: str + def __init__(self, href: str): + self.href: str = href + self._video: Optional[ffmpeg.Stream] = None + self._video_count: int = 0 + self._audio: Optional[ffmpeg.Stream] = None + self._audio_count: int = 0 - def as_stream(self, **kwargs) -> ffmpeg.Stream: - return ffmpeg.input(self.href, **kwargs) + def __repr__(self): + return f"<{self.__class__.__qualname__} href={self.href}>" + + @classmethod + def check_and_create(cls, href: str) -> Optional[Resource]: + """Check if the resource exists, and create it if it does.""" + r = requests.head(href) + if not (200 <= r.status_code < 400): + return None + return cls(href=href) + + def get_audio(self) -> ffmpeg.nodes.FilterableStream: + if self._audio is None: + self._audio = ffmpeg.input(self.href).audio.asplit() + self._audio_count += 1 + return self._audio.stream(self._audio_count) + + def get_video(self) -> ffmpeg.nodes.FilterableStream: + if self._video is None: + self._video = ffmpeg.input(self.href).video.split() + self._video_count += 1 + return self._video.stream(self._video_count) @dataclasses.dataclass() class Shape: resource: Resource - start: float - end: float - - @classmethod - def from_tag(cls, tag: bs4.Tag, *, base_url: str) -> Shape: - # No, `"in" not in tag` does not work - if not tag["in"]: - raise ValueError("Tag has no 'in' parameter") - if not tag["out"]: - raise ValueError("Tag has no 'out' parameter") - if not tag["xlink:href"]: - raise ValueError("Tag has no 'xlink:href' parameter") - - return cls( - resource=Resource(href=f"{base_url}/{tag['xlink:href']}"), - start=float(tag["in"]), - end=float(tag["out"]), - ) + enables: List[Tuple[float, float]] @dataclasses.dataclass() @@ -49,22 +56,30 @@ class Meeting: r = requests.get(f"{base_url}/presentation/{meeting_id}/metadata.xml") r.raise_for_status() - deskshare = Resource(href=f"{base_url}/presentation/{meeting_id}/deskshare/deskshare.webm") - webcams = Resource(href=f"{base_url}/presentation/{meeting_id}/video/webcams.mp4") + deskshare = Resource.check_and_create(href=f"{base_url}/presentation/{meeting_id}/deskshare/deskshare.mp4") + webcams = Resource.check_and_create(href=f"{base_url}/presentation/{meeting_id}/video/webcams.mp4") shape_soup = bs4.BeautifulSoup(requests.get(f"{base_url}/presentation/{meeting_id}/shapes.svg").text, "lxml") - shapes: List[Shape] = [] + + shapes: Dict[str, Shape] = {} for tag in shape_soup.find_all("image"): - try: - shapes.append(Shape.from_tag(tag, base_url=f"{base_url}/presentation/{meeting_id}")) - except ValueError: - continue + if not tag["in"]: + raise ValueError("Tag has no 'in' parameter") + if not tag["out"]: + raise ValueError("Tag has no 'out' parameter") + if not tag["xlink:href"]: + raise ValueError("Tag has no 'xlink:href' parameter") + + url = tag["xlink:href"] + if url not in shapes: + shapes[url] = Shape(resource=Resource(f"{base_url}/presentation/{meeting_id}/{url}"), enables=[]) + shapes[url].enables.append((tag["in"], tag["out"])) return cls( deskshare=deskshare, webcams=webcams, - shapes=shapes + shapes=list(shapes.values()) ) @classmethod diff --git a/bbbdl/tracks.py b/bbbdl/tracks.py new file mode 100644 index 0000000..42a0929 --- /dev/null +++ b/bbbdl/tracks.py @@ -0,0 +1,20 @@ +from typing import Optional +import ffmpeg.nodes + + +class Tracks: + def __init__(self): + self.video: Optional[ffmpeg.nodes.FilterableStream] = None + self.audio: Optional[ffmpeg.nodes.FilterableStream] = None + + def overlay(self, other: ffmpeg.nodes.FilterableStream, **kwargs): + if self.video is None: + self.video = other + else: + self.video = self.video.overlay(other, **kwargs) + + def amerge(self, other: ffmpeg.nodes.FilterableStream, **kwargs): + if self.audio is None: + self.audio = other + else: + self.audio = ffmpeg.filter((self.audio, other), "amerge", **kwargs)