Re-add missing audio mapping in butler for preview.
[dcpomatic.git] / src / lib / player.cc
index 0c9bcf363ca32217ad4e61f5661cdf68a1bf3310..2e47da5bfc9306c16077410be99dc7b5d00d7415 100644 (file)
@@ -47,7 +47,6 @@
 #include "content_subtitle.h"
 #include "dcp_decoder.h"
 #include "image_decoder.h"
-#include "resampler.h"
 #include "compose.hpp"
 #include <dcp/reel.h>
 #include <dcp/reel_sound_asset.h>
@@ -519,7 +518,7 @@ Player::pass ()
 
        BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
                if (!i->done) {
-                       DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
+                       DCPTime const t = content_time_to_dcp (i, i->decoder->position());
                        if (!earliest || t < earliest_content) {
                                earliest_content = t;
                                earliest = i;
@@ -527,21 +526,10 @@ Player::pass ()
                }
        }
 
-       if (earliest) {
-               earliest->done = earliest->decoder->pass ();
-               if (earliest->done && earliest->content->audio) {
-                       /* Flush the Player audio system for this piece */
-                       BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
-                               audio_flush (earliest, i);
-                       }
-               }
-       }
-
        /* Fill towards the next thing that might happen (or the end of the playlist).  This is to fill gaps between content,
           NOT to fill gaps within content (the latter is done in ::video())
        */
-       DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
-       fill_towards = fill_towards.ceil (_film->video_frame_rate ());
+       DCPTime fill_towards = earliest ? earliest_content : _playlist->length().ceil(_film->video_frame_rate());
 
        /* Work out where to fill video from */
        optional<DCPTime> video_fill_from;
@@ -572,17 +560,25 @@ Player::pass ()
                audio_fill_from = _last_audio_time;
        }
 
-       /* XXX: _no_audio */
-       if (audio_fill_from && audio_fill_from < fill_towards) {
-               DCPTimePeriod period (*audio_fill_from, fill_towards);
+       DCPTime audio_fill_towards = fill_towards;
+       if (earliest && earliest->content->audio) {
+               audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0);
+       }
+
+       if (audio_fill_from && audio_fill_from < audio_fill_towards) {
+               DCPTimePeriod period (*audio_fill_from, audio_fill_towards);
                if (period.duration() > one_video_frame()) {
                        period.to = period.from + one_video_frame();
                }
-               list<DCPTimePeriod> p = subtract(period, _no_video);
+               list<DCPTimePeriod> p = subtract(period, _no_audio);
                if (!p.empty ()) {
                        fill_audio (p.front());
+                       filled = true;
                }
-               filled = true;
+       }
+
+       if (earliest) {
+               earliest->done = earliest->decoder->pass ();
        }
 
        /* Emit any audio that is ready */
@@ -706,32 +702,6 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
        emit_video (_last_video[wp], time);
 }
 
-void
-Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
-{
-       shared_ptr<AudioContent> content = piece->content->audio;
-       DCPOMATIC_ASSERT (content);
-
-       shared_ptr<Resampler> r = resampler (content, stream, false);
-       if (!r) {
-               return;
-       }
-
-       pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
-       if (ro.first->frames() == 0) {
-               return;
-       }
-
-       ContentAudio content_audio;
-       content_audio.audio = ro.first;
-       content_audio.frame = ro.second;
-
-       /* Compute time in the DCP */
-       DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
-
-       audio_transform (content, stream, content_audio, time);
-}
-
 /** Do our common processing on some audio */
 void
 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
@@ -748,24 +718,7 @@ Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream
 
        /* Remap */
 
-       shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), content_audio.audio->frames()));
-       dcp_mapped->make_silent ();
-
-       AudioMapping map = stream->mapping ();
-       for (int i = 0; i < map.input_channels(); ++i) {
-               for (int j = 0; j < dcp_mapped->channels(); ++j) {
-                       if (map.get (i, static_cast<dcp::Channel> (j)) > 0) {
-                               dcp_mapped->accumulate_channel (
-                                       content_audio.audio.get(),
-                                       i,
-                                       static_cast<dcp::Channel> (j),
-                                       map.get (i, static_cast<dcp::Channel> (j))
-                                       );
-                       }
-               }
-       }
-
-       content_audio.audio = dcp_mapped;
+       content_audio.audio = remap (content_audio.audio, _film->audio_channels(), stream->mapping());
 
        /* Process */
 
@@ -773,12 +726,6 @@ Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream
                content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
        }
 
-       /* Pad any gap which may be caused by audio delay */
-
-       if (_last_audio_time) {
-               fill_audio (DCPTimePeriod (*_last_audio_time, time));
-       }
-
        /* Push */
 
        _audio_merger.push (content_audio.audio, time);
@@ -799,17 +746,6 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
        shared_ptr<AudioContent> content = piece->content->audio;
        DCPOMATIC_ASSERT (content);
 
-       /* Resample */
-       if (stream->frame_rate() != content->resampled_frame_rate()) {
-               shared_ptr<Resampler> r = resampler (content, stream, true);
-               pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
-               if (ro.first->frames() == 0) {
-                       return;
-               }
-               content_audio.audio = ro.first;
-               content_audio.frame = ro.second;
-       }
-
        /* Compute time in the DCP */
        DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
        /* And the end of this block in the DCP */
@@ -934,11 +870,6 @@ Player::seek (DCPTime time, bool accurate)
                _audio_processor->flush ();
        }
 
-       for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
-               i->second->flush ();
-               i->second->reset ();
-       }
-
        _audio_merger.clear ();
        _active_subtitles.clear ();
 
@@ -966,33 +897,6 @@ Player::seek (DCPTime time, bool accurate)
        }
 }
 
-shared_ptr<Resampler>
-Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
-{
-       ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
-       if (i != _resamplers.end ()) {
-               return i->second;
-       }
-
-       if (!create) {
-               return shared_ptr<Resampler> ();
-       }
-
-       LOG_GENERAL (
-               "Creating new resampler from %1 to %2 with %3 channels",
-               stream->frame_rate(),
-               content->resampled_frame_rate(),
-               stream->channels()
-               );
-
-       shared_ptr<Resampler> r (
-               new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
-               );
-
-       _resamplers[make_pair(content, stream)] = r;
-       return r;
-}
-
 void
 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 {
@@ -1019,6 +923,12 @@ Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
 void
 Player::fill_audio (DCPTimePeriod period)
 {
+       if (period.from == period.to) {
+               return;
+       }
+
+       DCPOMATIC_ASSERT (period.from < period.to);
+
        BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
                DCPTime t = i.from;
                while (t < i.to) {