2 Copyright (C) 2013-2021 Carl Hetherington <cth@carlh.net>
4 This file is part of DCP-o-matic.
6 DCP-o-matic is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 2 of the License, or
9 (at your option) any later version.
11 DCP-o-matic is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with DCP-o-matic. If not, see <http://www.gnu.org/licenses/>.
22 #include "atmos_decoder.h"
23 #include "atmos_metadata.h"
24 #include "audio_content.h"
25 #include "audio_decoder.h"
27 #include "dcp_content.h"
28 #include "dcpomatic_log.h"
32 #include "piece_video.h"
33 #include "player_video.h"
34 #include "resampler.h"
35 #include "text_content.h"
36 #include "text_decoder.h"
37 #include "video_content.h"
38 #include "video_decoder.h"
41 using std::dynamic_pointer_cast;
43 using std::make_shared;
45 using std::shared_ptr;
48 using boost::optional;
49 using namespace dcpomatic;
52 Piece::Piece (weak_ptr<const Film> film, vector<Pair> content, FrameRateChange frc, bool fast)
54 , _content (std::move(content))
58 auto const rep = representative();
59 if (rep.content->audio) {
60 int const streams = rep.content->audio->streams().size();
61 for (int i = 0; i < streams; ++i) {
62 _audio_streams.push_back(Stream(rep.content->position(), rep.content->audio->mapping()));
66 for (auto const& i: _content) {
67 if (i.decoder->video) {
68 i.decoder->video->Data.connect (boost::bind(&Piece::video, this, i.content, _1, _2, _3, _4));
71 if (i.decoder->audio) {
72 i.decoder->audio->Data.connect (boost::bind(&Piece::audio, this, i.content, _1, _2, _3));
75 for (auto j: i.decoder->text) {
76 j->BitmapStart.connect (boost::bind(&Piece::bitmap_start, this, i.content, j->content(), _1, _2, _3));
77 j->StringStart.connect (boost::bind(&Piece::string_start, this, i.content, j->content(), _1, _2));
78 j->Stop.connect (boost::bind(&Piece::stop, this, i.content, j->content(), _1));
81 if (i.decoder->atmos) {
82 i.decoder->atmos->Data.connect (boost::bind(&Piece::atmos, this, _1, _2, _3));
85 i.decoder->Flush.connect (boost::bind(&Piece::flush, this));
91 Piece::video (shared_ptr<const Content> content, shared_ptr<const ImageProxy> image, Frame frame, Eyes eyes, Part part)
97 if (_frc.skip && (frame % 2) == 1) {
101 auto const time = content_video_to_dcp (content, frame);
103 if (_ignore_video && _ignore_video->contains(time)) {
107 Video (PieceVideo(image, frame, time, eyes, part));
111 /** @param frame Frame position in the content, as if there were no trim */
113 Piece::audio (shared_ptr<const Content> content, AudioStreamPtr stream_ptr, shared_ptr<const AudioBuffers> audio, Frame frame)
115 auto film = _film.lock ();
116 DCPOMATIC_ASSERT (film);
118 /* Here we have the frame index into the content, and shortly we're going to lose that information and
119 * start thinking about frame indices into the piece. Here's the last chance for us to apply this content's
120 * trim, so we'll take it.
122 auto const start_trim = content->trim_start().frames_round(stream_ptr->frame_rate());
123 auto const remove_from_start = std::max(Frame(0), start_trim - frame);
124 if (remove_from_start > 0) {
125 auto trimmed = make_shared<AudioBuffers>(audio);
126 trimmed->trim_start (remove_from_start);
127 frame += remove_from_start;
130 auto const end_trim = content->trim_send().frames_round(stream_ptr->frame_rate());
133 auto content_streams = content->audio->streams();
134 auto content_stream_iter = std::find(content_streams.begin(), content_streams.end(), stream_ptr);
135 DCPOMATIC_ASSERT (content_stream_iter != content_streams.end());
136 int index = std::distance(content_streams.begin(), content_stream_iter);
137 DCPOMATIC_ASSERT (index >= 0 && index < static_cast<int>(_audio_streams.size()));
138 auto& stream = _audio_streams[index];
140 int const resampled_rate = representative().content->audio->resampled_frame_rate(film);
142 auto resampler = stream.resampler;
143 if (!resampler && stream_ptr->frame_rate() != resampled_rate) {
145 "Creating new resampler from %1 to %2 with %3 channels",
146 stream_ptr->frame_rate(),
148 stream_ptr->channels()
151 resampler = make_shared<Resampler>(stream_ptr->frame_rate(), resampled_rate, stream_ptr->channels());
153 resampler->set_fast ();
155 stream.resampler = resampler;
159 auto ro = resampler->run (audio);
160 if (ro->frames() == 0) {
166 if (stream.position == 0) {
167 stream.position = frame;
170 std::cout << "piece sends frame " << stream.position << " " << to_string(resampled_audio_to_dcp(stream.position) << "\n";
171 Audio (PieceAudio(index, audio, resampled_audio_to_dcp(stream.position), stream_ptr->mapping()));
172 stream.position += audio->frames();
177 Piece::bitmap_start (weak_ptr<const Content> content, weak_ptr<const TextContent> text, dcpomatic::ContentTime time, shared_ptr<Image> image, dcpomatic::Rect<double> area)
179 BitmapTextStart (PieceBitmapTextStart(content, text, time, image, area));
184 Piece::string_start (weak_ptr<const Content> content, weak_ptr<const TextContent> text, dcpomatic::ContentTime time, list<dcp::SubtitleString> subs)
186 StringTextStart (PieceStringTextStart(content, text, time, subs));
191 Piece::stop (weak_ptr<const Content> content, weak_ptr<const TextContent> text, dcpomatic::ContentTime time)
193 TextStop (PieceTextStop(content, text, time));
198 Piece::atmos (shared_ptr<const dcp::AtmosFrame> data, Frame frame, AtmosMetadata metadata)
200 Atmos (PieceAtmos(data, frame, metadata));
205 Piece::update_pull_to (DCPTime& pull_to) const
207 if (done() || _audio_streams.empty()) {
211 for (auto const& i: _audio_streams) {
212 pull_to = std::min(pull_to, i.last_push_end);
218 Piece::set_last_push_end (int stream, DCPTime end)
220 DCPOMATIC_ASSERT (stream >= 0 && stream < static_cast<int>(_audio_streams.size()));
221 _audio_streams[stream].last_push_end = end;
226 Piece::content_video_to_dcp (shared_ptr<const Content> content, Frame f) const
228 /* It might seem more logical here to convert s to a ContentTime (using the FrameRateChange)
229 then convert that ContentTime to frames at the content's rate. However this fails for
230 situations like content at 29.9978733fps, DCP at 30fps. The accuracy of the Time type is not
231 enough to distinguish between the two with low values of time (e.g. 3200 in Time units).
233 Instead we convert the DCPTime using the DCP video rate then account for any skip/repeat.
235 auto const d = DCPTime::from_frames(f * _frc.factor(), _frc.dcp) - DCPTime(content->trim_start(), _frc);
236 return d + content->position();
240 /** @param f Frame offset from start of the piece */
242 Piece::resampled_audio_to_dcp (Frame f) const
244 auto film = _film.lock ();
245 DCPOMATIC_ASSERT (film);
247 return DCPTime::from_frames(f, film->audio_frame_rate()) + position();
252 Piece::dcp_to_content_time (shared_ptr<const Content> content, DCPTime t) const
254 auto film = _film.lock ();
255 DCPOMATIC_ASSERT (film);
257 auto s = t - content->position ();
258 s = min (content->length_after_trim(film), s);
259 return max (ContentTime(), ContentTime(s, _frc) + content->trim_start());
264 Piece::content_time_to_dcp (shared_ptr<const Content> content, ContentTime t) const
266 if (std::find_if(_content.begin(), _content.end(), [content](Pair const& p) { return p.content == content; }) == _content.end()) {
270 return max (DCPTime(), DCPTime(t - content->trim_start(), _frc) + content->position());
275 Piece::use_video () const
277 for (auto const& i: _content) {
278 if (i.content->video && i.content->video->use()) {
288 Piece::video_frame_type () const
290 DCPOMATIC_ASSERT (representative().content->video);
291 return representative().content->video->frame_type();
296 Piece::position () const
298 return _content.front().content->position();
305 auto film = _film.lock ();
306 DCPOMATIC_ASSERT (film);
307 return _content.back().content->end(film);
311 shared_ptr<PlayerVideo>
312 Piece::player_video (PieceVideo video, dcp::Size container_size) const
314 auto film = _film.lock ();
315 DCPOMATIC_ASSERT (film);
317 auto const rep = representative().content;
319 return std::make_shared<PlayerVideo>(
322 rep->video->fade(film, video.frame),
323 scale_for_display(rep->video->scaled_size(film->frame_size()), container_size, film->frame_size()),
327 rep->video->colour_conversion(),
329 /* XXX: this isn't really right, but it's just for doing the reset_metadata() tricks so it's
330 * OK that it won't work for sound-only content.
340 Piece::resampled_audio_frame_rate () const
342 auto film = _film.lock ();
343 DCPOMATIC_ASSERT (film);
345 DCPOMATIC_ASSERT (representative().content->audio);
346 return representative().content->audio->resampled_frame_rate(film);
351 Piece::audio_gain () const
353 DCPOMATIC_ASSERT (representative().content->audio);
354 return representative().content->audio->gain();
359 Piece::decoder_for (shared_ptr<Content> content) const
361 for (auto const& i: _content) {
362 if (i.content == content) {
374 auto film = _film.lock ();
375 DCPOMATIC_ASSERT (film);
377 for (auto& i: _content) {
378 if (i.decoder->done()) {
381 LOG_DEBUG_PLAYER ("Calling pass() on %1", i.content->path(0));
389 Piece::reference_dcp_audio () const
391 auto dcp = dynamic_pointer_cast<DCPContent>(representative().content);
392 return dcp && dcp->reference_audio();
397 Piece::seek (DCPTime time, bool accurate)
399 for (auto& i: _audio_streams) {
401 i.resampler->flush ();
402 i.resampler->reset ();
407 for (auto& i: _content) {
408 if (time < i.content->position()) {
409 /* Before; seek to the start of the content. Even if this request is for an inaccurate seek
410 we must seek this (following) content accurately, otherwise when we come to the end of the current
411 content we may not start right at the beginning of the next, causing a gap (if the next content has
412 been trimmed to a point between keyframes, or something).
414 i.decoder->seek (i.content->trim_start(), true);
415 } else if (position() <= time && time < end()) {
416 /* During; seek to position */
417 i.decoder->seek (dcp_to_content_time(i.content, time), accurate);
423 optional<dcpomatic::DCPTime>
424 Piece::decoder_before(optional<dcpomatic::DCPTime> time)
426 auto film = _film.lock ();
427 DCPOMATIC_ASSERT (film);
429 for (auto const& i: _content) {
430 if (!i.decoder->done()) {
431 auto t = content_time_to_dcp(i.content, std::max(i.decoder->position(), i.content->trim_start()));
432 DCPOMATIC_ASSERT (t);
433 /* This is the first unfinished decoder we have, so we'll make a decision with it.
434 Given two choices at the same time, pick the one with texts so we see it before
437 if (!time || t < *time || (t == *time && !i.decoder->text.empty())) {
449 vector<dcpomatic::FontData>
450 Piece::fonts () const
452 vector<dcpomatic::FontData> data;
453 for (auto const& i: _content) {
454 auto f = i.decoder->fonts();
455 std::copy (f.begin(), f.end(), std::back_inserter(data));
462 Piece::period () const
464 return DCPTimePeriod(position(), end());
472 for (auto& i: _audio_streams) {
474 auto ro = i.resampler->flush ();
475 if (ro->frames() > 0) {
476 Audio (PieceAudio(index, ro, resampled_audio_to_dcp(i.position), i.mapping));
477 i.position += ro->frames();
488 for (auto const& i: _content) {
489 if (!i.decoder->done()) {