Allow repeat-frame to work with 3D.
[dcpomatic.git] / src / lib / player.cc
index 0c17e08d05dcd3b14024d7f92b380feca961fb23..de221fef357c17ed849725f9d57376f07786b8b0 100644 (file)
@@ -47,7 +47,6 @@
 #include "content_subtitle.h"
 #include "dcp_decoder.h"
 #include "image_decoder.h"
-#include "resampler.h"
 #include "compose.hpp"
 #include <dcp/reel.h>
 #include <dcp/reel_sound_asset.h>
@@ -519,7 +518,7 @@ Player::pass ()
 
        BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
                if (!i->done) {
-                       DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
+                       DCPTime const t = content_time_to_dcp (i, i->decoder->position());
                        if (!earliest || t < earliest_content) {
                                earliest_content = t;
                                earliest = i;
@@ -527,24 +526,14 @@ Player::pass ()
                }
        }
 
-       if (earliest) {
-               earliest->done = earliest->decoder->pass ();
-               if (earliest->done && earliest->content->audio) {
-                       /* Flush the Player audio system for this piece */
-                       BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
-                               audio_flush (earliest, i);
-                       }
-               }
-       }
-
        /* Fill towards the next thing that might happen (or the end of the playlist).  This is to fill gaps between content,
           NOT to fill gaps within content (the latter is done in ::video())
        */
-       DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
+       DCPTime fill_towards = earliest ? earliest_content : _playlist->length().ceil(_film->video_frame_rate());
 
        /* Work out where to fill video from */
        optional<DCPTime> video_fill_from;
-       if (_last_video_time && !_playlist->video_content_at(*_last_video_time)) {
+       if (_last_video_time) {
                /* Fill from the last video or seek time */
                video_fill_from = _last_video_time;
        }
@@ -553,10 +542,12 @@ Player::pass ()
        /* Fill some black if we would emit before the earliest piece of content.  This is so we act like a phantom
           Piece which emits black in spaces (we only emit if we are the earliest thing)
        */
-       /* XXX: this should take _no_video into account */
        if (video_fill_from && (!earliest || *video_fill_from < earliest_content) && ((fill_towards - *video_fill_from)) >= one_video_frame()) {
-               emit_video (black_player_video_frame(), *video_fill_from);
-               filled = true;
+               list<DCPTimePeriod> p = subtract(DCPTimePeriod(*video_fill_from, *video_fill_from + one_video_frame()), _no_video);
+               if (!p.empty ()) {
+                       emit_video (black_player_video_frame(), p.front().from);
+                       filled = true;
+               }
        } else if (_playlist->length() == DCPTime()) {
                /* Special case of an empty Film; just give one black frame */
                emit_video (black_player_video_frame(), DCPTime());
@@ -564,19 +555,30 @@ Player::pass ()
        }
 
        optional<DCPTime> audio_fill_from;
-       if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time)) {
+       if (_last_audio_time) {
                /* Fill from the last audio or seek time */
                audio_fill_from = _last_audio_time;
        }
 
-       /* XXX: _no_audio */
-       if (audio_fill_from && audio_fill_from < fill_towards) {
-               DCPTimePeriod period (*audio_fill_from, fill_towards);
+       DCPTime audio_fill_towards = fill_towards;
+       if (earliest && earliest->content->audio) {
+               audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0);
+       }
+
+       if (audio_fill_from && audio_fill_from < audio_fill_towards) {
+               DCPTimePeriod period (*audio_fill_from, audio_fill_towards);
                if (period.duration() > one_video_frame()) {
                        period.to = period.from + one_video_frame();
                }
-               fill_audio (period);
-               filled = true;
+               list<DCPTimePeriod> p = subtract(period, _no_audio);
+               if (!p.empty ()) {
+                       fill_audio (p.front());
+                       filled = true;
+               }
+       }
+
+       if (earliest) {
+               earliest->done = earliest->decoder->pass ();
        }
 
        /* Emit any audio that is ready */
@@ -700,32 +702,6 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
        emit_video (_last_video[wp], time);
 }
 
-void
-Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
-{
-       shared_ptr<AudioContent> content = piece->content->audio;
-       DCPOMATIC_ASSERT (content);
-
-       shared_ptr<Resampler> r = resampler (content, stream, false);
-       if (!r) {
-               return;
-       }
-
-       pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
-       if (ro.first->frames() == 0) {
-               return;
-       }
-
-       ContentAudio content_audio;
-       content_audio.audio = ro.first;
-       content_audio.frame = ro.second;
-
-       /* Compute time in the DCP */
-       DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
-
-       audio_transform (content, stream, content_audio, time);
-}
-
 /** Do our common processing on some audio */
 void
 Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
@@ -787,27 +763,11 @@ Player::audio (weak_ptr<Piece> wp, AudioStreamPtr stream, ContentAudio content_a
        shared_ptr<AudioContent> content = piece->content->audio;
        DCPOMATIC_ASSERT (content);
 
-       /* Resample */
-       if (stream->frame_rate() != content->resampled_frame_rate()) {
-               shared_ptr<Resampler> r = resampler (content, stream, true);
-               pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
-               if (ro.first->frames() == 0) {
-                       return;
-               }
-               content_audio.audio = ro.first;
-               content_audio.frame = ro.second;
-       }
-
        /* Compute time in the DCP */
        DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
        /* And the end of this block in the DCP */
        DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
 
-       /* Pad any gap which may be caused by audio delay */
-       if (_last_audio_time) {
-               fill_audio (DCPTimePeriod (*_last_audio_time, time));
-       }
-
        /* Remove anything that comes before the start or after the end of the content */
        if (time < piece->content->position()) {
                pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
@@ -927,11 +887,6 @@ Player::seek (DCPTime time, bool accurate)
                _audio_processor->flush ();
        }
 
-       for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
-               i->second->flush ();
-               i->second->reset ();
-       }
-
        _audio_merger.clear ();
        _active_subtitles.clear ();
 
@@ -959,33 +914,6 @@ Player::seek (DCPTime time, bool accurate)
        }
 }
 
-shared_ptr<Resampler>
-Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
-{
-       ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
-       if (i != _resamplers.end ()) {
-               return i->second;
-       }
-
-       if (!create) {
-               return shared_ptr<Resampler> ();
-       }
-
-       LOG_GENERAL (
-               "Creating new resampler from %1 to %2 with %3 channels",
-               stream->frame_rate(),
-               content->resampled_frame_rate(),
-               stream->channels()
-               );
-
-       shared_ptr<Resampler> r (
-               new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
-               );
-
-       _resamplers[make_pair(content, stream)] = r;
-       return r;
-}
-
 void
 Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
 {
@@ -993,9 +921,13 @@ Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
        if (subtitles) {
                pv->set_subtitle (subtitles.get ());
        }
+
        Video (pv, time);
-       _last_video_time = time + one_video_frame();
-       _active_subtitles.clear_before (time);
+
+       if (pv->eyes() == EYES_BOTH || pv->eyes() == EYES_RIGHT) {
+               _last_video_time = time + one_video_frame();
+               _active_subtitles.clear_before (time);
+       }
 }
 
 void
@@ -1008,6 +940,12 @@ Player::emit_audio (shared_ptr<AudioBuffers> data, DCPTime time)
 void
 Player::fill_audio (DCPTimePeriod period)
 {
+       if (period.from == period.to) {
+               return;
+       }
+
+       DCPOMATIC_ASSERT (period.from < period.to);
+
        BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
                DCPTime t = i.from;
                while (t < i.to) {