#include "content_subtitle.h"
#include "dcp_decoder.h"
#include "image_decoder.h"
-#include "resampler.h"
#include "compose.hpp"
#include <dcp/reel.h>
#include <dcp/reel_sound_asset.h>
BOOST_FOREACH (shared_ptr<Piece> i, _pieces) {
if (!i->done) {
- DCPTime const t = i->content->position() + DCPTime (i->decoder->position(), i->frc);
+ DCPTime const t = content_time_to_dcp (i, i->decoder->position());
if (!earliest || t < earliest_content) {
earliest_content = t;
earliest = i;
}
}
- if (earliest) {
- earliest->done = earliest->decoder->pass ();
- if (earliest->done && earliest->content->audio) {
- /* Flush the Player audio system for this piece */
- BOOST_FOREACH (AudioStreamPtr i, earliest->content->audio->streams()) {
- audio_flush (earliest, i);
- }
- }
- }
-
/* Fill towards the next thing that might happen (or the end of the playlist). This is to fill gaps between content,
NOT to fill gaps within content (the latter is done in ::video())
*/
- DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
+ DCPTime fill_towards = earliest ? earliest_content : _playlist->length().ceil(_film->video_frame_rate());
/* Work out where to fill video from */
optional<DCPTime> video_fill_from;
- if (_last_video_time && !_playlist->video_content_at(*_last_video_time)) {
+ if (_last_video_time) {
/* Fill from the last video or seek time */
video_fill_from = _last_video_time;
}
/* Fill some black if we would emit before the earliest piece of content. This is so we act like a phantom
Piece which emits black in spaces (we only emit if we are the earliest thing)
*/
- /* XXX: this should take _no_video into account */
if (video_fill_from && (!earliest || *video_fill_from < earliest_content) && ((fill_towards - *video_fill_from)) >= one_video_frame()) {
- emit_video (black_player_video_frame(), *video_fill_from);
- filled = true;
+ list<DCPTimePeriod> p = subtract(DCPTimePeriod(*video_fill_from, *video_fill_from + one_video_frame()), _no_video);
+ if (!p.empty ()) {
+ emit_video (black_player_video_frame(), p.front().from);
+ filled = true;
+ }
} else if (_playlist->length() == DCPTime()) {
/* Special case of an empty Film; just give one black frame */
emit_video (black_player_video_frame(), DCPTime());
}
optional<DCPTime> audio_fill_from;
- if (_last_audio_time && !_playlist->audio_content_at(*_last_audio_time)) {
+ if (_last_audio_time) {
/* Fill from the last audio or seek time */
audio_fill_from = _last_audio_time;
}
- /* XXX: _no_audio */
- if (audio_fill_from && audio_fill_from < fill_towards) {
- DCPTimePeriod period (*audio_fill_from, fill_towards);
+ DCPTime audio_fill_towards = fill_towards;
+ if (earliest && earliest->content->audio) {
+ audio_fill_towards += DCPTime::from_seconds (earliest->content->audio->delay() / 1000.0);
+ }
+
+ if (audio_fill_from && audio_fill_from < audio_fill_towards) {
+ DCPTimePeriod period (*audio_fill_from, audio_fill_towards);
if (period.duration() > one_video_frame()) {
period.to = period.from + one_video_frame();
}
- fill_audio (period);
- filled = true;
+ list<DCPTimePeriod> p = subtract(period, _no_audio);
+ if (!p.empty ()) {
+ fill_audio (p.front());
+ filled = true;
+ }
+ }
+
+ if (earliest) {
+ earliest->done = earliest->decoder->pass ();
}
/* Emit any audio that is ready */
emit_video (_last_video[wp], time);
}
-void
-Player::audio_flush (shared_ptr<Piece> piece, AudioStreamPtr stream)
-{
- shared_ptr<AudioContent> content = piece->content->audio;
- DCPOMATIC_ASSERT (content);
-
- shared_ptr<Resampler> r = resampler (content, stream, false);
- if (!r) {
- return;
- }
-
- pair<shared_ptr<const AudioBuffers>, Frame> ro = r->flush ();
- if (ro.first->frames() == 0) {
- return;
- }
-
- ContentAudio content_audio;
- content_audio.audio = ro.first;
- content_audio.frame = ro.second;
-
- /* Compute time in the DCP */
- DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
-
- audio_transform (content, stream, content_audio, time);
-}
-
/** Do our common processing on some audio */
void
Player::audio_transform (shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time)
content_audio.audio = _audio_processor->run (content_audio.audio, _film->audio_channels ());
}
+ /* Pad any gap which may be caused by audio delay */
+
+ if (_last_audio_time) {
+ fill_audio (DCPTimePeriod (*_last_audio_time, time));
+ }
+
/* Push */
_audio_merger.push (content_audio.audio, time);
shared_ptr<AudioContent> content = piece->content->audio;
DCPOMATIC_ASSERT (content);
- /* Resample */
- if (stream->frame_rate() != content->resampled_frame_rate()) {
- shared_ptr<Resampler> r = resampler (content, stream, true);
- pair<shared_ptr<const AudioBuffers>, Frame> ro = r->run (content_audio.audio, content_audio.frame);
- if (ro.first->frames() == 0) {
- return;
- }
- content_audio.audio = ro.first;
- content_audio.frame = ro.second;
- }
-
/* Compute time in the DCP */
DCPTime time = resampled_audio_to_dcp (piece, content_audio.frame) + DCPTime::from_seconds (content->delay() / 1000.0);
/* And the end of this block in the DCP */
DCPTime end = time + DCPTime::from_frames(content_audio.audio->frames(), content->resampled_frame_rate());
- /* Pad any gap which may be caused by audio delay */
- if (_last_audio_time) {
- fill_audio (DCPTimePeriod (*_last_audio_time, time));
- }
-
/* Remove anything that comes before the start or after the end of the content */
if (time < piece->content->position()) {
pair<shared_ptr<AudioBuffers>, DCPTime> cut = discard_audio (content_audio.audio, time, piece->content->position());
_audio_processor->flush ();
}
- for (ResamplerMap::iterator i = _resamplers.begin(); i != _resamplers.end(); ++i) {
- i->second->flush ();
- i->second->reset ();
- }
-
_audio_merger.clear ();
_active_subtitles.clear ();
}
}
-shared_ptr<Resampler>
-Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create)
-{
- ResamplerMap::const_iterator i = _resamplers.find (make_pair (content, stream));
- if (i != _resamplers.end ()) {
- return i->second;
- }
-
- if (!create) {
- return shared_ptr<Resampler> ();
- }
-
- LOG_GENERAL (
- "Creating new resampler from %1 to %2 with %3 channels",
- stream->frame_rate(),
- content->resampled_frame_rate(),
- stream->channels()
- );
-
- shared_ptr<Resampler> r (
- new Resampler (stream->frame_rate(), content->resampled_frame_rate(), stream->channels())
- );
-
- _resamplers[make_pair(content, stream)] = r;
- return r;
-}
-
void
Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
{
void
Player::fill_audio (DCPTimePeriod period)
{
+ if (period.from == period.to) {
+ return;
+ }
+
+ cout << "fillin " << to_string(period.from) << " to " << to_string(period.to) << "\n";
BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_audio)) {
DCPTime t = i.from;
while (t < i.to) {