X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fplayer.cc;h=f90cf32f5c3f228a35726017837de3c08a40492e;hb=8353a009aae1a604251c0160193c39741c2fa27c;hp=a3d52f43ee9dfac3a85a0d643c3d78e0af1d89e6;hpb=fca4842c205bc2fa74af94955100ca873bffc5d0;p=dcpomatic.git diff --git a/src/lib/player.cc b/src/lib/player.cc index a3d52f43e..cb6d51984 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -17,34 +17,70 @@ */ +#include +#include #include "player.h" #include "film.h" #include "ffmpeg_decoder.h" #include "ffmpeg_content.h" -#include "imagemagick_decoder.h" -#include "imagemagick_content.h" +#include "image_decoder.h" +#include "image_content.h" #include "sndfile_decoder.h" #include "sndfile_content.h" +#include "subtitle_content.h" +#include "subrip_decoder.h" +#include "subrip_content.h" #include "playlist.h" #include "job.h" +#include "image.h" +#include "ratio.h" +#include "log.h" +#include "scaler.h" +#include "render_subtitles.h" using std::list; using std::cout; +using std::min; +using std::max; using std::vector; +using std::pair; +using std::map; using boost::shared_ptr; using boost::weak_ptr; using boost::dynamic_pointer_cast; +using boost::optional; + +class Piece +{ +public: + Piece (shared_ptr c, shared_ptr d, FrameRateChange f) + : content (c) + , decoder (d) + , frc (f) + {} + + shared_ptr content; + shared_ptr decoder; + FrameRateChange frc; +}; Player::Player (shared_ptr f, shared_ptr p) : _film (f) , _playlist (p) , _video (true) , _audio (true) - , _subtitles (true) - , _have_valid_decoders (false) + , _have_valid_pieces (false) + , _video_position (0) + , _audio_position (0) + , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1)) + , _last_emit_was_black (false) + , _just_did_inaccurate_seek (false) + , _approximate_size (false) { - _playlist->Changed.connect (bind (&Player::playlist_changed, this)); - _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2)); + _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this)); + _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3)); + _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1)); + set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ())); } void @@ -59,268 +95,675 @@ Player::disable_audio () _audio = false; } -void -Player::disable_subtitles () -{ - _subtitles = false; -} - bool Player::pass () { - if (!_have_valid_decoders) { - setup_decoders (); - _have_valid_decoders = true; + if (!_have_valid_pieces) { + setup_pieces (); } - - bool done = true; - - if (_video && _video_decoder < _video_decoders.size ()) { - /* Run video decoder; this may also produce audio */ - - if (_video_decoders[_video_decoder]->pass ()) { - _video_decoder++; - } + /* Interrogate all our pieces to find the one with the earliest decoded data */ + + shared_ptr earliest_piece; + shared_ptr earliest_decoded; + DCPTime earliest_time = TIME_MAX; + DCPTime earliest_audio = TIME_MAX; + + for (list >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) { + + DCPTime const offset = (*i)->content->position() - (*i)->content->trim_start(); - if (_video_decoder < _video_decoders.size ()) { - done = false; + bool done = false; + shared_ptr dec; + while (!done) { + dec = (*i)->decoder->peek (); + if (!dec) { + /* Decoder has nothing else to give us */ + break; + } + + dec->set_dcp_times (_film->video_frame_rate(), _film->audio_frame_rate(), (*i)->frc, offset); + DCPTime const t = dec->dcp_time - offset; + if (t >= ((*i)->content->full_length() - (*i)->content->trim_end ())) { + /* In the end-trimmed part; decoder has nothing else to give us */ + dec.reset (); + done = true; + } else if (t >= (*i)->content->trim_start ()) { + /* Within the un-trimmed part; everything's ok */ + done = true; + } else { + /* Within the start-trimmed part; get something else */ + (*i)->decoder->consume (); + } } - - } - if (!_video && _audio && _playlist->audio_from() == Playlist::AUDIO_FFMPEG && _sequential_audio_decoder < _audio_decoders.size ()) { + if (!dec) { + continue; + } - /* We're not producing video, so we may need to run FFmpeg content to get the audio */ - - if (_audio_decoders[_sequential_audio_decoder]->pass ()) { - _sequential_audio_decoder++; + if (dec->dcp_time < earliest_time) { + earliest_piece = *i; + earliest_decoded = dec; + earliest_time = dec->dcp_time; } - - if (_sequential_audio_decoder < _audio_decoders.size ()) { - done = false; + + if (dynamic_pointer_cast (dec) && dec->dcp_time < earliest_audio) { + earliest_audio = dec->dcp_time; } + } + if (!earliest_piece) { + flush (); + return true; } - if (_audio && _playlist->audio_from() == Playlist::AUDIO_SNDFILE) { - - /* We're getting audio from SndfileContent */ - - for (vector >::iterator i = _audio_decoders.begin(); i != _audio_decoders.end(); ++i) { - if (!(*i)->pass ()) { - done = false; + if (earliest_audio != TIME_MAX) { + TimedAudioBuffers tb = _audio_merger.pull (max (int64_t (0), earliest_audio)); + Audio (tb.audio, tb.time); + /* This assumes that the audio_frames_to_time conversion is exact + so that there are no accumulated errors caused by rounding. + */ + _audio_position += _film->audio_frames_to_time (tb.audio->frames ()); + } + + /* Emit the earliest thing */ + + shared_ptr dv = dynamic_pointer_cast (earliest_decoded); + shared_ptr da = dynamic_pointer_cast (earliest_decoded); + shared_ptr dis = dynamic_pointer_cast (earliest_decoded); + shared_ptr dts = dynamic_pointer_cast (earliest_decoded); + + /* Will be set to false if we shouldn't consume the peeked DecodedThing */ + bool consume = true; + + if (dv && _video) { + + if (_just_did_inaccurate_seek) { + + /* Just emit; no subtlety */ + emit_video (earliest_piece, dv); + step_video_position (dv); + + } else if (dv->dcp_time > _video_position) { + + /* Too far ahead */ + + list >::iterator i = _pieces.begin(); + while (i != _pieces.end() && ((*i)->content->position() >= _video_position || _video_position >= (*i)->content->end())) { + ++i; + } + + if (i == _pieces.end() || !_last_incoming_video.video || !_have_valid_pieces) { + /* We're outside all video content */ + emit_black (); + _statistics.video.black++; + } else { + /* We're inside some video; repeat the frame */ + _last_incoming_video.video->dcp_time = _video_position; + emit_video (_last_incoming_video.weak_piece, _last_incoming_video.video); + step_video_position (_last_incoming_video.video); + _statistics.video.repeat++; } + + consume = false; + + } else if (dv->dcp_time == _video_position) { + /* We're ok */ + emit_video (earliest_piece, dv); + step_video_position (dv); + _statistics.video.good++; + } else { + /* Too far behind: skip */ + _statistics.video.skip++; } - Audio (_audio_buffers, _audio_time.get()); - _audio_buffers.reset (); - _audio_time = boost::none; + _just_did_inaccurate_seek = false; + + } else if (da && _audio) { + + if (da->dcp_time > _audio_position) { + /* Too far ahead */ + emit_silence (da->dcp_time - _audio_position); + consume = false; + _statistics.audio.silence += (da->dcp_time - _audio_position); + } else if (da->dcp_time == _audio_position) { + /* We're ok */ + emit_audio (earliest_piece, da); + _statistics.audio.good += da->data->frames(); + } else { + /* Too far behind: skip */ + _statistics.audio.skip += da->data->frames(); + } + + } else if (dis && _video) { + _image_subtitle.piece = earliest_piece; + _image_subtitle.subtitle = dis; + update_subtitle_from_image (); + } else if (dts && _video) { + _text_subtitle.piece = earliest_piece; + _text_subtitle.subtitle = dts; + update_subtitle_from_text (); } - return done; + if (consume) { + earliest_piece->decoder->consume (); + } + + return false; } void -Player::set_progress (shared_ptr job) +Player::emit_video (weak_ptr weak_piece, shared_ptr video) { - /* Assume progress can be divined from how far through the video we are */ - - if (_video_decoder >= _video_decoders.size() || !_playlist->video_length()) { + /* Keep a note of what came in so that we can repeat it if required */ + _last_incoming_video.weak_piece = weak_piece; + _last_incoming_video.video = video; + + shared_ptr piece = weak_piece.lock (); + if (!piece) { return; } - job->set_progress ((_video_start[_video_decoder] + _video_decoders[_video_decoder]->video_frame()) / _playlist->video_length ()); + shared_ptr content = dynamic_pointer_cast (piece->content); + assert (content); + + FrameRateChange frc (content->video_frame_rate(), _film->video_frame_rate()); + + float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio(); + libdcp::Size image_size = fit_ratio_within (ratio, _video_container_size); + if (_approximate_size) { + image_size.width &= ~3; + image_size.height &= ~3; + } + + shared_ptr pi ( + new PlayerImage ( + video->image, + content->crop(), + image_size, + _video_container_size, + _film->scaler() + ) + ); + + if ( + _film->with_subtitles () && + _out_subtitle.image && + video->dcp_time >= _out_subtitle.from && video->dcp_time <= _out_subtitle.to + ) { + + Position const container_offset ( + (_video_container_size.width - image_size.width) / 2, + (_video_container_size.height - image_size.height) / 2 + ); + + pi->set_subtitle (_out_subtitle.image, _out_subtitle.position + container_offset); + } + + +#ifdef DCPOMATIC_DEBUG + _last_video = piece->content; +#endif + + Video (pi, video->eyes, content->colour_conversion(), video->same, video->dcp_time); + + _last_emit_was_black = false; } void -Player::process_video (shared_ptr i, bool same, shared_ptr s, double t) +Player::step_video_position (shared_ptr video) { - Video (i, same, s, _video_start[_video_decoder] + t); + /* This is a bit of a hack; don't update _video_position if EYES_RIGHT is on its way */ + if (video->eyes != EYES_LEFT) { + /* This assumes that the video_frames_to_time conversion is exact + so that there are no accumulated errors caused by rounding. + */ + _video_position += _film->video_frames_to_time (1); + } } void -Player::process_audio (weak_ptr c, shared_ptr b, double t) +Player::emit_audio (weak_ptr weak_piece, shared_ptr audio) { - AudioMapping mapping = _film->audio_mapping (); - if (!_audio_buffers) { - _audio_buffers.reset (new AudioBuffers (mapping.dcp_channels(), b->frames ())); - _audio_buffers->make_silent (); - _audio_time = t; - if (_playlist->audio_from() == Playlist::AUDIO_FFMPEG) { - _audio_time = _audio_time.get() + _audio_start[_sequential_audio_decoder]; + shared_ptr piece = weak_piece.lock (); + if (!piece) { + return; + } + + shared_ptr content = dynamic_pointer_cast (piece->content); + assert (content); + + /* Gain */ + if (content->audio_gain() != 0) { + shared_ptr gain (new AudioBuffers (audio->data)); + gain->apply_gain (content->audio_gain ()); + audio->data = gain; + } + + /* Remap channels */ + shared_ptr dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->data->frames())); + dcp_mapped->make_silent (); + AudioMapping map = content->audio_mapping (); + for (int i = 0; i < map.content_channels(); ++i) { + for (int j = 0; j < _film->audio_channels(); ++j) { + if (map.get (i, static_cast (j)) > 0) { + dcp_mapped->accumulate_channel ( + audio->data.get(), + i, + static_cast (j), + map.get (i, static_cast (j)) + ); + } } } - for (int i = 0; i < b->channels(); ++i) { - list dcp = mapping.content_to_dcp (AudioMapping::Channel (c, i)); - for (list::iterator j = dcp.begin(); j != dcp.end(); ++j) { - _audio_buffers->accumulate (b, i, static_cast (*j)); + audio->data = dcp_mapped; + + /* Delay */ + audio->dcp_time += content->audio_delay() * TIME_HZ / 1000; + if (audio->dcp_time < 0) { + int const frames = - audio->dcp_time * _film->audio_frame_rate() / TIME_HZ; + if (frames >= audio->data->frames ()) { + return; } + + shared_ptr trimmed (new AudioBuffers (audio->data->channels(), audio->data->frames() - frames)); + trimmed->copy_from (audio->data.get(), audio->data->frames() - frames, frames, 0); + + audio->data = trimmed; + audio->dcp_time = 0; + } + + _audio_merger.push (audio->data, audio->dcp_time); +} + +void +Player::flush () +{ + TimedAudioBuffers tb = _audio_merger.flush (); + if (_audio && tb.audio) { + Audio (tb.audio, tb.time); + _audio_position += _film->audio_frames_to_time (tb.audio->frames ()); + } + + while (_video && _video_position < _audio_position) { + emit_black (); } - if (_playlist->audio_from() == Playlist::AUDIO_FFMPEG) { - /* We can just emit this audio now as it will all be here */ - Audio (_audio_buffers, t); - _audio_buffers.reset (); - _audio_time = boost::none; + while (_audio && _audio_position < _video_position) { + emit_silence (_video_position - _audio_position); } + } -/** @return true on error */ -bool -Player::seek (double t) +/** Seek so that the next pass() will yield (approximately) the requested frame. + * Pass accurate = true to try harder to get close to the request. + * @return true on error + */ +void +Player::seek (DCPTime t, bool accurate) { - if (!_have_valid_decoders) { - setup_decoders (); - _have_valid_decoders = true; + if (!_have_valid_pieces) { + setup_pieces (); } - if (_video_decoders.empty ()) { - return true; + if (_pieces.empty ()) { + return; + } + + for (list >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) { + /* s is the offset of t from the start position of this content */ + DCPTime s = t - (*i)->content->position (); + s = max (static_cast (0), s); + s = min ((*i)->content->length_after_trim(), s); + + /* Convert this to the content time */ + ContentTime ct = (s + (*i)->content->trim_start()) * (*i)->frc.speed_up; + + /* And seek the decoder */ + (*i)->decoder->seek (ct, accurate); + } + + _video_position = time_round_up (t, TIME_HZ / _film->video_frame_rate()); + _audio_position = time_round_up (t, TIME_HZ / _film->audio_frame_rate()); + + _audio_merger.clear (_audio_position); + + if (!accurate) { + /* We just did an inaccurate seek, so it's likely that the next thing seen + out of pass() will be a fair distance from _{video,audio}_position. Setting + this flag stops pass() from trying to fix that: we assume that if it + was an inaccurate seek then the caller does not care too much about + inserting black/silence to keep the time tidy. + */ + _just_did_inaccurate_seek = true; } +} + +void +Player::setup_pieces () +{ + list > old_pieces = _pieces; + _pieces.clear (); + + ContentList content = _playlist->content (); + + for (ContentList::iterator i = content.begin(); i != content.end(); ++i) { + + shared_ptr decoder; + optional frc; + + /* Work out a FrameRateChange for the best overlap video for this content, in case we need it below */ + DCPTime best_overlap_t = 0; + shared_ptr best_overlap; + for (ContentList::iterator j = content.begin(); j != content.end(); ++j) { + shared_ptr vc = dynamic_pointer_cast (*j); + if (!vc) { + continue; + } + + DCPTime const overlap = max (vc->position(), (*i)->position()) - min (vc->end(), (*i)->end()); + if (overlap > best_overlap_t) { + best_overlap = vc; + best_overlap_t = overlap; + } + } - /* Find the decoder that contains this position */ - _video_decoder = 0; - while (1) { - ++_video_decoder; - if (_video_decoder >= _video_decoders.size () || t < _video_start[_video_decoder]) { - --_video_decoder; - t -= _video_start[_video_decoder]; - break; + optional best_overlap_frc; + if (best_overlap) { + best_overlap_frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ()); + } else { + /* No video overlap; e.g. if the DCP is just audio */ + best_overlap_frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ()); } + + /* FFmpeg */ + shared_ptr fc = dynamic_pointer_cast (*i); + if (fc) { + decoder.reset (new FFmpegDecoder (_film, fc, _video, _audio)); + frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate()); + } + + /* ImageContent */ + shared_ptr ic = dynamic_pointer_cast (*i); + if (ic) { + /* See if we can re-use an old ImageDecoder */ + for (list >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) { + shared_ptr imd = dynamic_pointer_cast ((*j)->decoder); + if (imd && imd->content() == ic) { + decoder = imd; + } + } + + if (!decoder) { + decoder.reset (new ImageDecoder (_film, ic)); + } + + frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate()); + } + + /* SndfileContent */ + shared_ptr sc = dynamic_pointer_cast (*i); + if (sc) { + decoder.reset (new SndfileDecoder (_film, sc)); + frc = best_overlap_frc; + } + + /* SubRipContent */ + shared_ptr rc = dynamic_pointer_cast (*i); + if (rc) { + decoder.reset (new SubRipDecoder (_film, rc)); + frc = best_overlap_frc; + } + + ContentTime st = (*i)->trim_start() * frc->speed_up; + decoder->seek (st, true); + + _pieces.push_back (shared_ptr (new Piece (*i, decoder, frc.get ()))); } - if (_video_decoder < _video_decoders.size()) { - _video_decoders[_video_decoder]->seek (t); - } else { - return true; + _have_valid_pieces = true; + + /* The Piece for the _last_incoming_video will no longer be valid */ + _last_incoming_video.video.reset (); + + _video_position = _audio_position = 0; +} + +void +Player::content_changed (weak_ptr w, int property, bool frequent) +{ + shared_ptr c = w.lock (); + if (!c) { + return; } - /* XXX: don't seek audio because we don't need to... */ + if ( + property == ContentProperty::POSITION || property == ContentProperty::LENGTH || + property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END || + property == VideoContentProperty::VIDEO_FRAME_TYPE + ) { + + _have_valid_pieces = false; + Changed (frequent); + + } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) { - return false; + update_subtitle_from_image (); + update_subtitle_from_text (); + Changed (frequent); + + } else if ( + property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_RATIO || + property == VideoContentProperty::VIDEO_FRAME_RATE + ) { + + Changed (frequent); + + } else if (property == ContentProperty::PATH) { + + Changed (frequent); + } } +void +Player::playlist_changed () +{ + _have_valid_pieces = false; + Changed (false); +} void -Player::seek_back () +Player::set_video_container_size (libdcp::Size s) { - /* XXX */ + _video_container_size = s; + + shared_ptr im (new Image (PIX_FMT_RGB24, _video_container_size, true)); + im->make_black (); + + _black_frame.reset ( + new PlayerImage ( + im, + Crop(), + _video_container_size, + _video_container_size, + Scaler::from_id ("bicubic") + ) + ); } void -Player::seek_forward () +Player::emit_black () { - /* XXX */ +#ifdef DCPOMATIC_DEBUG + _last_video.reset (); +#endif + + Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position); + _video_position += _film->video_frames_to_time (1); + _last_emit_was_black = true; } +void +Player::emit_silence (DCPTime most) +{ + if (most == 0) { + return; + } + + DCPTime t = min (most, TIME_HZ / 2); + shared_ptr silence (new AudioBuffers (_film->audio_channels(), t * _film->audio_frame_rate() / TIME_HZ)); + silence->make_silent (); + Audio (silence, _audio_position); + + _audio_position += t; +} void -Player::setup_decoders () +Player::film_changed (Film::Property p) { - _video_decoders.clear (); - _video_decoder = 0; - _audio_decoders.clear (); - _sequential_audio_decoder = 0; + /* Here we should notice Film properties that affect our output, and + alert listeners that our output now would be different to how it was + last time we were run. + */ - _video_start.clear(); - _audio_start.clear(); + if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) { + Changed (false); + } +} - double video_so_far = 0; - double audio_so_far = 0; +void +Player::update_subtitle_from_image () +{ + shared_ptr piece = _image_subtitle.piece.lock (); + if (!piece) { + return; + } - for (int l = 0; l < _playlist->loop(); ++l) { - list > vc = _playlist->video (); - for (list >::iterator i = vc.begin(); i != vc.end(); ++i) { - - shared_ptr video_content; - shared_ptr audio_content; - shared_ptr video_decoder; - shared_ptr audio_decoder; - - /* XXX: into content? */ - - shared_ptr fc = dynamic_pointer_cast (*i); - if (fc) { - shared_ptr fd ( - new FFmpegDecoder ( - _film, fc, _video, - _audio && _playlist->audio_from() == Playlist::AUDIO_FFMPEG, - _subtitles - ) - ); - - video_content = fc; - audio_content = fc; - video_decoder = fd; - audio_decoder = fd; - } - - shared_ptr ic = dynamic_pointer_cast (*i); - if (ic) { - video_content = ic; - video_decoder.reset (new ImageMagickDecoder (_film, ic)); - } - - video_decoder->connect_video (shared_from_this ()); - _video_decoders.push_back (video_decoder); - _video_start.push_back (video_so_far); - video_so_far += video_content->video_length() / video_content->video_frame_rate(); - - if (audio_decoder && _playlist->audio_from() == Playlist::AUDIO_FFMPEG) { - audio_decoder->Audio.connect (bind (&Player::process_audio, this, audio_content, _1, _2)); - _audio_decoders.push_back (audio_decoder); - _audio_start.push_back (audio_so_far); - audio_so_far += double(audio_content->audio_length()) / audio_content->audio_frame_rate(); - } - } - - _video_decoder = 0; - _sequential_audio_decoder = 0; - - if (_playlist->audio_from() == Playlist::AUDIO_SNDFILE) { - - list > ac = _playlist->audio (); - for (list >::iterator i = ac.begin(); i != ac.end(); ++i) { - - shared_ptr sc = dynamic_pointer_cast (*i); - assert (sc); - - shared_ptr d (new SndfileDecoder (_film, sc)); - d->Audio.connect (bind (&Player::process_audio, this, sc, _1, _2)); - _audio_decoders.push_back (d); - _audio_start.push_back (audio_so_far); - } - } + if (!_image_subtitle.subtitle->image) { + _out_subtitle.image.reset (); + return; } + + shared_ptr sc = dynamic_pointer_cast (piece->content); + assert (sc); + + dcpomatic::Rect in_rect = _image_subtitle.subtitle->rect; + libdcp::Size scaled_size; + + in_rect.y += sc->subtitle_offset (); + + /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */ + scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale (); + scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale (); + + /* Then we need a corrective translation, consisting of two parts: + * + * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be + * rect.x * _video_container_size.width and rect.y * _video_container_size.height. + * + * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be + * (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and + * (height_before_subtitle_scale * (1 - subtitle_scale) / 2). + * + * Combining these two translations gives these expressions. + */ + + _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2))); + _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2))); + + _out_subtitle.image = _image_subtitle.subtitle->image->scale ( + scaled_size, + Scaler::from_id ("bicubic"), + _image_subtitle.subtitle->image->pixel_format (), + true + ); + + _out_subtitle.from = _image_subtitle.subtitle->dcp_time; + _out_subtitle.to = _image_subtitle.subtitle->dcp_time_to; } -double -Player::last_video_time () const +/** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles. + * @return false if this could not be done. + */ +bool +Player::repeat_last_video () { - if (_video_decoder >= _video_decoders.size ()) { - return 0; + if (!_last_incoming_video.video || !_have_valid_pieces) { + return false; } - - return _video_start[_video_decoder] + _video_decoders[_video_decoder]->last_content_time (); + + emit_video ( + _last_incoming_video.weak_piece, + _last_incoming_video.video + ); + + return true; } void -Player::content_changed (weak_ptr w, int p) +Player::update_subtitle_from_text () { - shared_ptr c = w.lock (); - if (!c) { + if (_text_subtitle.subtitle->subs.empty ()) { + _out_subtitle.image.reset (); return; } - if (p == VideoContentProperty::VIDEO_LENGTH) { - _have_valid_decoders = false; + render_subtitles (_text_subtitle.subtitle->subs, _video_container_size, _out_subtitle.image, _out_subtitle.position); +} + +void +Player::set_approximate_size () +{ + _approximate_size = true; +} + +PlayerImage::PlayerImage ( + shared_ptr in, + Crop crop, + libdcp::Size inter_size, + libdcp::Size out_size, + Scaler const * scaler + ) + : _in (in) + , _crop (crop) + , _inter_size (inter_size) + , _out_size (out_size) + , _scaler (scaler) +{ + +} + +void +PlayerImage::set_subtitle (shared_ptr image, Position pos) +{ + _subtitle_image = image; + _subtitle_position = pos; +} + +shared_ptr +PlayerImage::image (AVPixelFormat format, bool aligned) +{ + shared_ptr out = _in->crop_scale_window (_crop, _inter_size, _out_size, _scaler, format, aligned); + + Position const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2); + + if (_subtitle_image) { + out->alpha_blend (_subtitle_image, _subtitle_position); } + + return out; } void -Player::playlist_changed () +PlayerStatistics::dump (shared_ptr log) const +{ + log->log (String::compose ("Video: %1 good %2 skipped %3 black %4 repeat", video.good, video.skip, video.black, video.repeat)); + log->log (String::compose ("Audio: %1 good %2 skipped %3 silence", audio.good, audio.skip, audio.silence)); +} + +PlayerStatistics const & +Player::statistics () const { - _have_valid_decoders = false; + return _statistics; }