wip: Error when failing to read MXF frame.
[dcpomatic.git] / src / lib / dcp_decoder.cc
1 /*
2     Copyright (C) 2014-2022 Carl Hetherington <cth@carlh.net>
3
4     This file is part of DCP-o-matic.
5
6     DCP-o-matic is free software; you can redistribute it and/or modify
7     it under the terms of the GNU General Public License as published by
8     the Free Software Foundation; either version 2 of the License, or
9     (at your option) any later version.
10
11     DCP-o-matic is distributed in the hope that it will be useful,
12     but WITHOUT ANY WARRANTY; without even the implied warranty of
13     MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14     GNU General Public License for more details.
15
16     You should have received a copy of the GNU General Public License
17     along with DCP-o-matic.  If not, see <http://www.gnu.org/licenses/>.
18
19 */
20
21
22 #include "atmos_decoder.h"
23 #include "audio_content.h"
24 #include "audio_decoder.h"
25 #include "config.h"
26 #include "dcp_content.h"
27 #include "dcp_decoder.h"
28 #include "digester.h"
29 #include "ffmpeg_image_proxy.h"
30 #include "frame_interval_checker.h"
31 #include "image.h"
32 #include "j2k_image_proxy.h"
33 #include "text_decoder.h"
34 #include "video_decoder.h"
35 #include <dcp/cpl.h>
36 #include <dcp/dcp.h>
37 #include <dcp/decrypted_kdm.h>
38 #include <dcp/mono_picture_asset.h>
39 #include <dcp/mono_picture_asset_reader.h>
40 #include <dcp/mono_picture_frame.h>
41 #include <dcp/reel.h>
42 #include <dcp/reel_atmos_asset.h>
43 #include <dcp/reel_closed_caption_asset.h>
44 #include <dcp/reel_picture_asset.h>
45 #include <dcp/reel_sound_asset.h>
46 #include <dcp/reel_subtitle_asset.h>
47 #include <dcp/search.h>
48 #include <dcp/sound_asset_reader.h>
49 #include <dcp/sound_frame.h>
50 #include <dcp/stereo_picture_asset.h>
51 #include <dcp/stereo_picture_asset_reader.h>
52 #include <dcp/stereo_picture_frame.h>
53 #include <dcp/subtitle_image.h>
54 #include <iostream>
55
56 #include "i18n.h"
57
58
59 using std::cout;
60 using std::dynamic_pointer_cast;
61 using std::list;
62 using std::make_shared;
63 using std::map;
64 using std::shared_ptr;
65 using std::string;
66 using std::vector;
67 using boost::optional;
68 using namespace dcpomatic;
69
70
71 DCPDecoder::DCPDecoder (shared_ptr<const Film> film, shared_ptr<const DCPContent> content, bool fast, bool tolerant, shared_ptr<DCPDecoder> old)
72         : Decoder (film)
73         , _dcp_content (content)
74 {
75         if (content->can_be_played()) {
76                 if (content->video) {
77                         video = make_shared<VideoDecoder>(this, content);
78                 }
79                 if (content->audio) {
80                         audio = make_shared<AudioDecoder>(this, content->audio, fast);
81                 }
82                 for (auto i: content->text) {
83                         text.push_back (make_shared<TextDecoder>(this, i));
84                         /* We should really call maybe_set_position() on this TextDecoder to set the time
85                          * of the first subtitle, but it probably doesn't matter since we'll always
86                          * have regularly occurring video (and maybe audio) content.
87                          */
88                 }
89                 if (content->atmos) {
90                         atmos = make_shared<AtmosDecoder>(this, content);
91                 }
92         }
93
94         /* We try to avoid re-scanning the DCP's files every time we make a new DCPDecoder; we do this
95            by re-using the _reels list.  Before we do this we need to check that nothing too serious
96            has changed in the DCPContent.
97
98            We do this by storing a digest of the important bits of the DCPContent and then checking that's
99            the same before we re-use _reels.
100         */
101
102         _lazy_digest = calculate_lazy_digest (content);
103
104         if (old && old->lazy_digest() == _lazy_digest) {
105                 _reels = old->_reels;
106         } else {
107
108                 auto cpl_list = dcp::find_and_resolve_cpls(content->directories(), tolerant);
109
110                 if (cpl_list.empty()) {
111                         throw DCPError (_("No CPLs found in DCP."));
112                 }
113
114                 shared_ptr<dcp::CPL> cpl;
115                 for (auto i: cpl_list) {
116                         if (_dcp_content->cpl() && i->id() == _dcp_content->cpl().get()) {
117                                 cpl = i;
118                         }
119                 }
120
121                 if (!cpl) {
122                         /* No CPL found; probably an old file that doesn't specify it;
123                            just use the first one.
124                         */
125                         cpl = cpl_list.front();
126                 }
127
128                 if (content->kdm()) {
129                         cpl->add (decrypt_kdm_with_helpful_error(content->kdm().get()));
130                 }
131
132                 _reels = cpl->reels ();
133         }
134
135         set_decode_referenced (false);
136
137         _reel = _reels.begin ();
138         get_readers ();
139 }
140
141
142 void
143 DCPDecoder::pass_video(Frame frame, dcp::Size size)
144 {
145         auto const entry_point = (*_reel)->main_picture()->entry_point().get_value_or(0);
146
147         if (_mono_reader) {
148                 video->emit (
149                         film(),
150                         std::make_shared<J2KImageProxy>(
151                                 _mono_reader->get_frame (entry_point + frame),
152                                 size,
153                                 AV_PIX_FMT_XYZ12LE,
154                                 _forced_reduction
155                                 ),
156                         _offset + frame
157                         );
158         } else {
159                 video->emit (
160                         film(),
161                         std::make_shared<J2KImageProxy>(
162                                 _stereo_reader->get_frame (entry_point + frame),
163                                 size,
164                                 dcp::Eye::LEFT,
165                                 AV_PIX_FMT_XYZ12LE,
166                                 _forced_reduction
167                                 ),
168                         _offset + frame
169                         );
170
171                 video->emit (
172                         film(),
173                         std::make_shared<J2KImageProxy>(
174                                 _stereo_reader->get_frame (entry_point + frame),
175                                 size,
176                                 dcp::Eye::RIGHT,
177                                 AV_PIX_FMT_XYZ12LE,
178                                 _forced_reduction
179                                 ),
180                         _offset + frame
181                         );
182         }
183 }
184
185
186 void
187 DCPDecoder::pass_audio(Frame frame, double video_frame_rate)
188 {
189         auto const entry_point = (*_reel)->main_sound()->entry_point().get_value_or(0);
190         auto sf = _sound_reader->get_frame (entry_point + frame);
191         auto from = sf->data ();
192
193         int const channels = _dcp_content->audio->stream()->channels ();
194         int const frames = sf->size() / (3 * channels);
195         auto data = make_shared<AudioBuffers>(channels, frames);
196         auto data_data = data->data();
197         for (int i = 0; i < frames; ++i) {
198                 for (int j = 0; j < channels; ++j) {
199                         data_data[j][i] = static_cast<int> ((from[0] << 8) | (from[1] << 16) | (from[2] << 24)) / static_cast<float> (INT_MAX - 256);
200                         from += 3;
201                 }
202         }
203
204         audio->emit(film(), _dcp_content->audio->stream(), data, ContentTime::from_frames(_offset, video_frame_rate) + _next);
205 }
206
207
208 void
209 DCPDecoder::pass_atmos(Frame frame)
210 {
211         DCPOMATIC_ASSERT (_atmos_metadata);
212         auto const entry_point = (*_reel)->atmos()->entry_point().get_value_or(0);
213         atmos->emit (film(), _atmos_reader->get_frame(entry_point + frame), _offset + frame, *_atmos_metadata);
214 }
215
216
217 Decoder::PassResult
218 DCPDecoder::pass ()
219 {
220         if (!_dcp_content->can_be_played()) {
221                 return PassResult::finished();
222         }
223
224         if (_reel == _reels.end()) {
225                 if (audio) {
226                         audio->flush ();
227                 }
228                 return PassResult::finished();
229         }
230
231         PassResult result = PassResult::ok();
232
233         auto const vfr = _dcp_content->active_video_frame_rate (film());
234
235         /* Frame within the (played part of the) reel that is coming up next */
236         auto const frame = _next.frames_round (vfr);
237
238         auto picture_asset = (*_reel)->main_picture()->asset();
239         DCPOMATIC_ASSERT (picture_asset);
240
241         /* We must emit texts first as when we emit the video for this frame
242            it will expect already to have the texts.
243         */
244         pass_texts (_next, picture_asset->size());
245
246         if ((_mono_reader || _stereo_reader) && (_decode_referenced || !_dcp_content->reference_video())) {
247                 try {
248                         pass_video(frame, picture_asset->size());
249                 } catch (dcp::ReadError const &e) {
250                         result = PassResult::error(e.what());
251                 }
252         }
253
254         if (_sound_reader && (_decode_referenced || !_dcp_content->reference_audio())) {
255                 try {
256                         pass_audio(frame, vfr);
257                 } catch (dcp::ReadError const &e) {
258                         result = PassResult::error(e.what());
259                 }
260         }
261
262         if (_atmos_reader) {
263                 try {
264                         pass_atmos(frame);
265                 } catch (dcp::ReadError const &e) {
266                         result = PassResult::error(e.what());
267                 }
268         }
269
270         _next += ContentTime::from_frames (1, vfr);
271
272         if ((*_reel)->main_picture ()) {
273                 if (_next.frames_round (vfr) >= (*_reel)->main_picture()->duration()) {
274                         next_reel ();
275                         _next = ContentTime ();
276                 }
277         }
278
279         return result;
280 }
281
282
283 void
284 DCPDecoder::pass_texts (ContentTime next, dcp::Size size)
285 {
286         auto decoder = text.begin ();
287         if (decoder == text.end()) {
288                 /* It's possible that there is now a main subtitle but no TextDecoders, for example if
289                    the CPL has just changed but the TextContent's texts have not been recreated yet.
290                 */
291                 return;
292         }
293
294         if ((*_reel)->main_subtitle()) {
295                 pass_texts (
296                         next,
297                         (*_reel)->main_subtitle()->asset(),
298                         _dcp_content->reference_text(TextType::OPEN_SUBTITLE),
299                         (*_reel)->main_subtitle()->entry_point().get_value_or(0),
300                         *decoder,
301                         size
302                         );
303                 ++decoder;
304         }
305
306         for (auto i: (*_reel)->closed_captions()) {
307                 pass_texts (
308                         next, i->asset(), _dcp_content->reference_text(TextType::CLOSED_CAPTION), i->entry_point().get_value_or(0), *decoder, size
309                         );
310                 ++decoder;
311         }
312 }
313
314 void
315 DCPDecoder::pass_texts (
316         ContentTime next, shared_ptr<dcp::SubtitleAsset> asset, bool reference, int64_t entry_point, shared_ptr<TextDecoder> decoder, dcp::Size size
317         )
318 {
319         auto const vfr = _dcp_content->active_video_frame_rate (film());
320         /* Frame within the (played part of the) reel that is coming up next */
321         auto const frame = next.frames_round (vfr);
322
323         if (_decode_referenced || !reference) {
324                 auto subs = asset->subtitles_during (
325                         dcp::Time (entry_point + frame, vfr, vfr),
326                         dcp::Time (entry_point + frame + 1, vfr, vfr),
327                         true
328                         );
329
330                 vector<dcp::SubtitleString> strings;
331
332                 for (auto i: subs) {
333                         auto is = dynamic_pointer_cast<const dcp::SubtitleString>(i);
334                         if (is) {
335                                 if (!strings.empty() && (strings.back().in() != is->in() || strings.back().out() != is->out())) {
336                                         auto b = strings.back();
337                                         decoder->emit_plain (
338                                                 ContentTimePeriod (
339                                                         ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()),
340                                                         ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds())
341                                                         ),
342                                                 strings,
343                                                 _dcp_content->standard()
344                                                 );
345                                         strings.clear ();
346                                 }
347
348                                 dcp::SubtitleString is_copy = *is;
349                                 is_copy.set_font(id_for_font_in_reel(is_copy.font().get_value_or(""), _reel - _reels.begin()));
350                                 strings.push_back(is_copy);
351                         }
352
353                         /* XXX: perhaps these image subs should also be collected together like the string ones are;
354                            this would need to be done both here and in DCPSubtitleDecoder.
355                         */
356
357                         auto ii = dynamic_pointer_cast<const dcp::SubtitleImage>(i);
358                         if (ii) {
359                                 emit_subtitle_image (
360                                         ContentTimePeriod (
361                                                 ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->in().as_seconds ()),
362                                                 ContentTime::from_frames (_offset - entry_point, vfr) + ContentTime::from_seconds (i->out().as_seconds ())
363                                                 ),
364                                         *ii,
365                                         size,
366                                         decoder
367                                         );
368                         }
369                 }
370
371                 if (!strings.empty()) {
372                         auto b = strings.back();
373                         decoder->emit_plain (
374                                 ContentTimePeriod (
375                                         ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.in().as_seconds()),
376                                         ContentTime::from_frames(_offset - entry_point, vfr) + ContentTime::from_seconds(b.out().as_seconds())
377                                         ),
378                                 strings,
379                                 _dcp_content->standard()
380                                 );
381                         strings.clear ();
382                 }
383         }
384 }
385
386
387 void
388 DCPDecoder::next_reel ()
389 {
390         _offset += (*_reel)->main_picture()->actual_duration();
391         ++_reel;
392         get_readers ();
393 }
394
395
396 void
397 DCPDecoder::get_readers ()
398 {
399         if (_reel == _reels.end() || !_dcp_content->can_be_played ()) {
400                 _mono_reader.reset ();
401                 _stereo_reader.reset ();
402                 _sound_reader.reset ();
403                 _atmos_reader.reset ();
404                 return;
405         }
406
407         if ((*_reel)->main_picture()) {
408                 auto asset = (*_reel)->main_picture()->asset ();
409                 auto mono = dynamic_pointer_cast<dcp::MonoPictureAsset> (asset);
410                 auto stereo = dynamic_pointer_cast<dcp::StereoPictureAsset> (asset);
411                 DCPOMATIC_ASSERT (mono || stereo);
412                 if (mono) {
413                         _mono_reader = mono->start_read ();
414                         _mono_reader->set_check_hmac (false);
415                         _stereo_reader.reset ();
416                 } else {
417                         _stereo_reader = stereo->start_read ();
418                         _stereo_reader->set_check_hmac (false);
419                         _mono_reader.reset ();
420                 }
421         } else {
422                 _mono_reader.reset ();
423                 _stereo_reader.reset ();
424         }
425
426         if ((*_reel)->main_sound()) {
427                 _sound_reader = (*_reel)->main_sound()->asset()->start_read ();
428                 _sound_reader->set_check_hmac (false);
429         } else {
430                 _sound_reader.reset ();
431         }
432
433         if ((*_reel)->atmos()) {
434                 auto asset = (*_reel)->atmos()->asset();
435                 _atmos_reader = asset->start_read();
436                 _atmos_reader->set_check_hmac (false);
437                 _atmos_metadata = AtmosMetadata (asset);
438         } else {
439                 _atmos_reader.reset ();
440                 _atmos_metadata = boost::none;
441         }
442 }
443
444
445 void
446 DCPDecoder::seek (ContentTime t, bool accurate)
447 {
448         if (!_dcp_content->can_be_played ()) {
449                 return;
450         }
451
452         Decoder::seek (t, accurate);
453
454         _reel = _reels.begin ();
455         _offset = 0;
456         get_readers ();
457
458         int const pre_roll_seconds = 2;
459
460         /* Pre-roll for subs */
461
462         auto pre = t - ContentTime::from_seconds (pre_roll_seconds);
463         if (pre < ContentTime()) {
464                 pre = ContentTime ();
465         }
466
467         /* Seek to pre-roll position */
468
469         while (
470                 _reel != _reels.end() &&
471                 pre >= ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()))
472                 ) {
473
474                 auto rd = ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()));
475                 pre -= rd;
476                 t -= rd;
477                 next_reel ();
478         }
479
480         /* Pass texts in the pre-roll */
481
482         auto const vfr = _dcp_content->active_video_frame_rate (film());
483         for (int i = 0; i < pre_roll_seconds * vfr; ++i) {
484                 pass_texts (pre, (*_reel)->main_picture()->asset()->size());
485                 pre += ContentTime::from_frames (1, vfr);
486         }
487
488         /* Seek to correct position */
489
490         while (
491                 _reel != _reels.end() &&
492                 t >= ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()))
493                 ) {
494
495                 t -= ContentTime::from_frames ((*_reel)->main_picture()->actual_duration(), _dcp_content->active_video_frame_rate(film()));
496                 next_reel ();
497         }
498
499         _next = t;
500 }
501
502
503 void
504 DCPDecoder::set_decode_referenced (bool r)
505 {
506         _decode_referenced = r;
507
508         if (video) {
509                 video->set_ignore (_dcp_content->reference_video() && !_decode_referenced);
510         }
511         if (audio) {
512                 audio->set_ignore (_dcp_content->reference_audio() && !_decode_referenced);
513         }
514 }
515
516
517 void
518 DCPDecoder::set_forced_reduction (optional<int> reduction)
519 {
520         _forced_reduction = reduction;
521 }
522
523
524 string
525 DCPDecoder::calculate_lazy_digest (shared_ptr<const DCPContent> c) const
526 {
527         Digester d;
528         for (auto i: c->paths()) {
529                 d.add (i.string());
530         }
531         if (_dcp_content->kdm()) {
532                 d.add(_dcp_content->kdm()->id());
533         }
534         d.add (static_cast<bool>(c->cpl()));
535         if (c->cpl()) {
536                 d.add (c->cpl().get());
537         }
538         return d.get ();
539 }
540
541
542 ContentTime
543 DCPDecoder::position () const
544 {
545         return ContentTime::from_frames(_offset, _dcp_content->active_video_frame_rate(film())) + _next;
546 }
547