Rename piece_audio -> audio.
[dcpomatic.git] / src / lib / player.cc
index f14f4d0679bc2c7e818f85683cf382c7d20e3335..ee0a07411d157d39c7c956081c3febe3c2452706 100644 (file)
@@ -94,6 +94,10 @@ int const PlayerProperty::DCP_DECODE_REDUCTION = 704;
 int const PlayerProperty::PLAYBACK_LENGTH = 705;
 
 
+/** About 0.01dB */
+#define AUDIO_GAIN_EPSILON 0.001
+
+
 Player::Player (shared_ptr<const Film> film)
        : _film (film)
        , _suspended (0)
@@ -155,6 +159,66 @@ have_audio (shared_ptr<const Content> content)
 }
 
 
+vector<vector<shared_ptr<Content>>>
+collect (shared_ptr<const Film> film, ContentList content)
+{
+       vector<shared_ptr<Content>> ungrouped;
+       vector<vector<shared_ptr<Content>>> grouped;
+
+       auto same_settings = [](shared_ptr<const Film> film, shared_ptr<const AudioContent> a, shared_ptr<const AudioContent> b) {
+
+               auto a_streams = a->streams();
+               auto b_streams = b->streams();
+
+               if (a_streams.size() != b_streams.size()) {
+                       return false;
+               }
+
+               for (size_t i = 0; i < a_streams.size(); ++i) {
+                       auto a_stream = a_streams[i];
+                       auto b_stream = b_streams[i];
+                       if (
+                               !a_stream->mapping().equals(b_stream->mapping(), AUDIO_GAIN_EPSILON) ||
+                               a_stream->frame_rate() != b_stream->frame_rate() ||
+                               a_stream->channels() != b_stream->channels()) {
+                               return false;
+                       }
+               }
+
+               return (
+                       fabs(a->gain() - b->gain()) < AUDIO_GAIN_EPSILON &&
+                       a->delay() == b->delay() &&
+                       a->language() == b->language() &&
+                       a->resampled_frame_rate(film) == b->resampled_frame_rate(film) &&
+                       a->channel_names() == b->channel_names()
+                      );
+       };
+
+       for (auto i: content) {
+               if (i->video || !i->audio || !i->text.empty()) {
+                       ungrouped.push_back (i);
+               } else {
+                       bool done = false;
+                       for (auto& g: grouped) {
+                               if (same_settings(film, g.front()->audio, i->audio) && i->position() == g.back()->end(film)) {
+                                       g.push_back (i);
+                                       done = true;
+                               }
+                       }
+                       if (!done) {
+                               grouped.push_back ({i});
+                       }
+               }
+       }
+
+       for (auto i: ungrouped) {
+               grouped.push_back({i});
+       }
+
+       return grouped;
+}
+
+
 void
 Player::setup_pieces_unlocked ()
 {
@@ -229,25 +293,10 @@ Player::setup_pieces_unlocked ()
                        piece->Audio.connect (bind(&Player::audio, this, weak_ptr<Piece>(piece), _1));
                }
 
-               auto j = decoder->text.begin();
-
-               while (j != decoder->text.end()) {
-                       (*j)->BitmapStart.connect (
-                               bind(&Player::bitmap_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
-                               );
-                       (*j)->PlainStart.connect (
-                               bind(&Player::plain_text_start, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
-                               );
-                       (*j)->Stop.connect (
-                               bind(&Player::subtitle_stop, this, weak_ptr<Piece>(piece), i, weak_ptr<const TextContent>((*j)->content()), _1)
-                               );
-
-                       ++j;
-               }
-
-               if (decoder->atmos) {
-                       decoder->atmos->Data.connect (bind(&Player::atmos, this, weak_ptr<Piece>(piece), _1));
-               }
+               piece->BitmapTextStart.connect (bind(&Player::bitmap_text_start, this, piece, _1));
+               piece->StringTextStart.connect (bind(&Player::string_text_start, this, piece, _1));
+               piece->TextStop.connect (bind(&Player::subtitle_stop, this, piece, _1));
+               piece->Atmos.connect (bind(&Player::atmos, this, piece, _1));
        }
 
        for (auto i = _pieces.begin(); i != _pieces.end(); ++i) {
@@ -772,26 +821,24 @@ Player::video (weak_ptr<Piece> wp, PieceVideo video)
                return;
        }
 
-       /* Time of the first frame we will emit */
-       DCPTime const time = piece->content_video_to_dcp (video.frame);
-       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
+       LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(video.time));
 
        /* Discard if it's before the content's period or the last accurate seek.  We can't discard
           if it's after the content's period here as in that case we still need to fill any gap between
           `now' and the end of the content's period.
        */
-       if (time < piece->position() || (_last_video_time && time < *_last_video_time)) {
+       if (video.time < piece->position() || (_last_video_time && video.time < *_last_video_time)) {
                return;
        }
 
-       if (piece->ignore_video_at(time)) {
+       if (piece->ignore_video_at(video.time)) {
                return;
        }
 
        /* Fill gaps that we discover now that we have some video which needs to be emitted.
           This is where we need to fill to.
        */
-       DCPTime fill_to = min (time, piece->end());
+       DCPTime fill_to = min (video.time, piece->end());
 
        if (_last_video_time) {
                DCPTime fill_from = max (*_last_video_time, piece->position());
@@ -842,7 +889,7 @@ Player::video (weak_ptr<Piece> wp, PieceVideo video)
 
        _last_video[wp] = piece->player_video (video, _video_container_size);
 
-       DCPTime t = time;
+       DCPTime t = video.time;
        for (int i = 0; i < frc.repeat; ++i) {
                if (t < piece->end()) {
                        emit_video (_last_video[wp], t);
@@ -853,9 +900,9 @@ Player::video (weak_ptr<Piece> wp, PieceVideo video)
 
 
 void
-Player::audio (weak_ptr<Piece> wp, PieceAudio piece_audio)
+Player::audio (weak_ptr<Piece> wp, PieceAudio audio)
 {
-       DCPOMATIC_ASSERT (piece_audio.audio->frames() > 0);
+       DCPOMATIC_ASSERT (audio.audio->frames() > 0);
 
        auto piece = wp.lock ();
        if (!piece) {
@@ -865,20 +912,20 @@ Player::audio (weak_ptr<Piece> wp, PieceAudio piece_audio)
        int const rfr = piece->resampled_audio_frame_rate ();
 
        /* Compute time in the DCP */
-       auto time = piece->resampled_audio_to_dcp (piece_audio.frame);
-       LOG_DEBUG_PLAYER("Received audio frame %1 at %2", piece_audio.frame, to_string(time));
+       auto time = piece->resampled_audio_to_dcp (audio.frame);
+       LOG_DEBUG_PLAYER("Received audio frame %1 at %2", audio.frame, to_string(time));
 
        /* And the end of this block in the DCP */
-       auto end = time + DCPTime::from_frames(piece_audio.audio->frames(), rfr);
+       auto end = time + DCPTime::from_frames(audio.audio->frames(), rfr);
 
        /* Remove anything that comes before the start or after the end of the content */
        if (time < piece->position()) {
-               auto cut = discard_audio (piece_audio.audio, time, piece->position());
+               auto cut = discard_audio (audio.audio, time, piece->position());
                if (!cut.first) {
                        /* This audio is entirely discarded */
                        return;
                }
-               piece_audio.audio = cut.first;
+               audio.audio = cut.first;
                time = cut.second;
        } else if (time > piece->end()) {
                /* Discard it all */
@@ -888,42 +935,42 @@ Player::audio (weak_ptr<Piece> wp, PieceAudio piece_audio)
                if (remaining_frames == 0) {
                        return;
                }
-               piece_audio.audio = make_shared<AudioBuffers>(piece_audio.audio, remaining_frames, 0);
+               audio.audio = make_shared<AudioBuffers>(audio.audio, remaining_frames, 0);
        }
 
-       DCPOMATIC_ASSERT (piece_audio.audio->frames() > 0);
+       DCPOMATIC_ASSERT (audio.audio->frames() > 0);
 
        /* Gain */
 
        if (piece->audio_gain() != 0) {
-               auto gain = make_shared<AudioBuffers>(piece_audio.audio);
+               auto gain = make_shared<AudioBuffers>(audio.audio);
                gain->apply_gain (piece->audio_gain());
-               piece_audio.audio = gain;
+               audio.audio = gain;
        }
 
        /* Remap */
 
-       piece_audio.audio = remap (piece_audio.audio, _film->audio_channels(), piece_audio.stream->mapping());
+       audio.audio = remap (audio.audio, _film->audio_channels(), audio.stream->mapping());
 
        /* Process */
 
        if (_audio_processor) {
-               piece_audio.audio = _audio_processor->run (piece_audio.audio, _film->audio_channels ());
+               audio.audio = _audio_processor->run (audio.audio, _film->audio_channels ());
        }
 
        /* Push */
 
-       _audio_merger.push (piece_audio.audio, time);
-       piece->set_last_push_end (piece_audio.stream, time + DCPTime::from_frames(piece_audio.audio->frames(), _film->audio_frame_rate()));
+       _audio_merger.push (audio.audio, time);
+       piece->set_last_push_end (audio.stream, time + DCPTime::from_frames(audio.audio->frames(), _film->audio_frame_rate()));
 }
 
 
 void
-Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentBitmapText subtitle)
+Player::bitmap_text_start (weak_ptr<Piece> wp, PieceBitmapTextStart subtitle)
 {
        auto piece = wp.lock ();
-       auto content = wc.lock ();
-       auto text = wt.lock ();
+       auto content = subtitle.content().lock();
+       auto text = subtitle.text().lock();
        if (!piece || !content || !text) {
                return;
        }
@@ -952,25 +999,25 @@ Player::bitmap_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_
 
        dcp::Size scaled_size (width, height);
        ps.bitmap.push_back (BitmapText(image->scale(scaled_size, dcp::YUVToRGB::REC601, image->pixel_format(), true, _fast), subtitle.sub.rectangle));
-       auto from = piece->content_time_to_dcp(content, subtitle.from());
+       auto from = piece->content_time_to_dcp(content, subtitle.time());
        DCPOMATIC_ASSERT (from);
 
-       _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
+       _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
 }
 
 
 void
-Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentStringText subtitle)
+Player::string_text_start (weak_ptr<Piece> wp, PieceStringTextStart subtitle)
 {
        auto piece = wp.lock ();
-       auto content = wc.lock ();
-       auto text = wt.lock ();
+       auto content = subtitle.content().lock();
+       auto text = subtitle.text().lock();
        if (!piece || !content || !text) {
                return;
        }
 
        PlayerText ps;
-       auto const from = piece->content_time_to_dcp(content, subtitle.from());
+       auto const from = piece->content_time_to_dcp(content, subtitle.time());
        DCPOMATIC_ASSERT (from);
 
        if (from > piece->end()) {
@@ -978,8 +1025,8 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_p
        }
 
        for (auto s: subtitle.subs) {
-               s.set_h_position (s.h_position() + text->x_offset ());
-               s.set_v_position (s.v_position() + text->y_offset ());
+               s.set_h_position (s.h_position() + text->x_offset());
+               s.set_v_position (s.v_position() + text->y_offset());
                float const xs = text->x_scale();
                float const ys = text->y_scale();
                float size = s.size();
@@ -1002,20 +1049,20 @@ Player::plain_text_start (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_p
                ps.add_fonts (text->fonts ());
        }
 
-       _active_texts[static_cast<int>(text->type())].add_from (wt, ps, *from);
+       _active_texts[static_cast<int>(text->type())].add_from(text, ps, *from);
 }
 
 
 void
-Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<const TextContent> wt, ContentTime to)
+Player::subtitle_stop (weak_ptr<Piece> wp, PieceTextStop stop)
 {
-       auto content = wc.lock ();
-       auto text = wt.lock ();
+       auto content = stop.content().lock();
+       auto text = stop.text().lock();
        if (!text) {
                return;
        }
 
-       if (!_active_texts[static_cast<int>(text->type())].have(wt)) {
+       if (!_active_texts[static_cast<int>(text->type())].have(stop.text())) {
                return;
        }
 
@@ -1024,14 +1071,14 @@ Player::subtitle_stop (weak_ptr<Piece> wp, weak_ptr<const Content> wc, weak_ptr<
                return;
        }
 
-       auto const dcp_to = piece->content_time_to_dcp(content, to);
+       auto const dcp_to = piece->content_time_to_dcp(content, stop.time());
        DCPOMATIC_ASSERT (dcp_to);
 
        if (*dcp_to > piece->end()) {
                return;
        }
 
-       auto from = _active_texts[static_cast<int>(text->type())].add_to(wt, *dcp_to);
+       auto from = _active_texts[static_cast<int>(text->type())].add_to(stop.text(), *dcp_to);
 
        bool const always = (text->type() == TextType::OPEN_SUBTITLE && _always_burn_open_subtitles);
        if (text->use() && !always && !text->burn()) {
@@ -1227,7 +1274,7 @@ Player::playlist () const
 
 
 void
-Player::atmos (weak_ptr<Piece>, ContentAtmos data)
+Player::atmos (weak_ptr<Piece>, PieceAtmos data)
 {
        Atmos (data.data, DCPTime::from_frames(data.frame, _film->video_frame_rate()), data.metadata);
 }