X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fplayer.cc;h=c05897c23b02adfc64d233c88d900ff14264a749;hb=c226f90a2c113b8bbc270f29e6aa035ae1229d57;hp=01bdc5ee0bf03fb4b28ec9a94ea5c4859f8a39aa;hpb=23050047454f1c1f7aadad41bf7b05d00d8ffe7f;p=dcpomatic.git diff --git a/src/lib/player.cc b/src/lib/player.cc index 01bdc5ee0..a79a4fc5e 100644 --- a/src/lib/player.cc +++ b/src/lib/player.cc @@ -17,34 +17,66 @@ */ +#include #include "player.h" #include "film.h" #include "ffmpeg_decoder.h" #include "ffmpeg_content.h" -#include "imagemagick_decoder.h" -#include "imagemagick_content.h" +#include "image_decoder.h" +#include "image_content.h" #include "sndfile_decoder.h" #include "sndfile_content.h" +#include "subtitle_content.h" #include "playlist.h" #include "job.h" +#include "image.h" +#include "ratio.h" +#include "resampler.h" +#include "log.h" +#include "scaler.h" using std::list; using std::cout; +using std::min; +using std::max; using std::vector; +using std::pair; +using std::map; using boost::shared_ptr; using boost::weak_ptr; using boost::dynamic_pointer_cast; +using boost::optional; + +class Piece +{ +public: + Piece (shared_ptr c, shared_ptr d, FrameRateChange f) + : content (c) + , decoder (d) + , frc (f) + {} + + shared_ptr content; + shared_ptr decoder; + FrameRateChange frc; +}; Player::Player (shared_ptr f, shared_ptr p) : _film (f) , _playlist (p) , _video (true) , _audio (true) - , _subtitles (true) - , _have_valid_decoders (false) + , _have_valid_pieces (false) + , _video_position (0) + , _audio_position (0) + , _audio_merger (f->audio_channels(), bind (&Film::time_to_audio_frames, f.get(), _1), bind (&Film::audio_frames_to_time, f.get(), _1)) + , _last_emit_was_black (false) + , _just_did_inaccurate_seek (false) { - _playlist->Changed.connect (bind (&Player::playlist_changed, this)); - _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2)); + _playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this)); + _playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3)); + _film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1)); + set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ())); } void @@ -59,283 +91,594 @@ Player::disable_audio () _audio = false; } -void -Player::disable_subtitles () -{ - _subtitles = false; -} - bool Player::pass () { - if (!_have_valid_decoders) { - setup_decoders (); - _have_valid_decoders = true; + if (!_have_valid_pieces) { + setup_pieces (); } - - bool done = true; - - if (_video_decoder < _video_decoders.size ()) { - /* Run video decoder; this may also produce audio */ - - if (_video_decoders[_video_decoder]->pass ()) { - _video_decoder++; + /* Interrogate all our pieces to find the one with the earliest decoded data */ + + shared_ptr earliest_piece; + shared_ptr earliest_decoded; + DCPTime earliest_time = TIME_MAX; + DCPTime earliest_audio = TIME_MAX; + + for (list >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) { + + shared_ptr dec = (*i)->decoder->peek (); + + if (dec) { + dec->set_dcp_times ((*i)->frc.speed_up, (*i)->content->position()); } - - if (_video_decoder < _video_decoders.size ()) { - done = false; + + /* XXX: don't know what to do with this */ +#if 0 + if (ad->done()) { + shared_ptr ac = dynamic_pointer_cast ((*i)->content); + assert (ac); + shared_ptr re = resampler (ac, false); + if (re) { + shared_ptr b = re->flush (); + if (b->frames ()) { + process_audio (earliest, b, ac->audio_length ()); + } + } } - - } else if (!_video && _playlist->audio_from() == Playlist::AUDIO_FFMPEG && _sequential_audio_decoder < _audio_decoders.size ()) { +#endif - /* We're not producing video, so we may need to run FFmpeg content to get the audio */ - - if (_audio_decoders[_sequential_audio_decoder]->pass ()) { - _sequential_audio_decoder++; + if (dec && dec->dcp_time < earliest_time) { + earliest_piece = *i; + earliest_decoded = dec; + earliest_time = dec->dcp_time; } - - if (_sequential_audio_decoder < _audio_decoders.size ()) { - done = false; + + if (dynamic_pointer_cast (dec) && dec->dcp_time < earliest_audio) { + earliest_audio = dec->dcp_time; } + } - } else if (_playlist->audio_from() == Playlist::AUDIO_SNDFILE) { + if (!earliest_piece) { + flush (); + return true; + } - /* We're getting audio from SndfileContent */ - - for (vector >::iterator i = _audio_decoders.begin(); i != _audio_decoders.end(); ++i) { - if (!(*i)->pass ()) { - done = false; + if (earliest_audio != TIME_MAX) { + TimedAudioBuffers tb = _audio_merger.pull (earliest_audio); + Audio (tb.audio, tb.time); + _audio_position += _film->audio_frames_to_time (tb.audio->frames ()); + } + + /* Emit the earliest thing */ + + shared_ptr dv = dynamic_pointer_cast (earliest_decoded); + shared_ptr da = dynamic_pointer_cast (earliest_decoded); + shared_ptr ds = dynamic_pointer_cast (earliest_decoded); + + if (dv) { + if (!_just_did_inaccurate_seek && earliest_time > _video_position) { + + /* See if we're inside some video content */ + list >::iterator i = _pieces.begin(); + while (i != _pieces.end() && ((*i)->content->position() >= _video_position || _video_position >= (*i)->content->end())) { + ++i; } - } - Audio (_audio_buffers, _audio_time.get()); - _audio_buffers.reset (); - _audio_time = boost::none; + if (i == _pieces.end() || !_last_incoming_video.video || !_have_valid_pieces) { + /* We're outside all video content */ + emit_black (); + } else { + _last_incoming_video.video->dcp_time = _video_position; + emit_video (_last_incoming_video.weak_piece, _last_incoming_video.video); + } + } else { + emit_video (earliest_piece, dv); + earliest_piece->decoder->get (); + } + } else if (da) { + if (!_just_did_inaccurate_seek && earliest_time > _audio_position) { + emit_silence (earliest_time - _audio_position); + } else { + emit_audio (earliest_piece, da); + earliest_piece->decoder->get (); + } + } else if (ds) { + _in_subtitle.piece = earliest_piece; + _in_subtitle.subtitle = ds; + update_subtitle (); + earliest_piece->decoder->get (); } - return done; + _just_did_inaccurate_seek = false; + + return false; } void -Player::set_progress (shared_ptr job) +Player::emit_video (weak_ptr weak_piece, shared_ptr video) { - /* Assume progress can be divined from how far through the video we are */ + /* Keep a note of what came in so that we can repeat it if required */ + _last_incoming_video.weak_piece = weak_piece; + _last_incoming_video.video = video; + + shared_ptr piece = weak_piece.lock (); + if (!piece) { + return; + } + + shared_ptr content = dynamic_pointer_cast (piece->content); + assert (content); - if (_video_decoder >= _video_decoders.size() || !_playlist->video_length()) { + FrameRateChange frc (content->video_frame_rate(), _film->video_frame_rate()); +#if 0 + XXX + if (frc.skip && (frame % 2) == 1) { return; } +#endif - job->set_progress ((_video_start[_video_decoder] + _video_decoders[_video_decoder]->video_frame()) / _playlist->video_length ()); -} + if (content->trimmed (video->dcp_time - content->position ())) { + return; + } -void -Player::process_video (shared_ptr i, bool same, shared_ptr s, double t) -{ - Video (i, same, s, _video_start[_video_decoder] + t); + float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio(); + libdcp::Size const image_size = fit_ratio_within (ratio, _video_container_size); + + shared_ptr pi ( + new PlayerImage ( + video->image, + content->crop(), + image_size, + _video_container_size, + _film->scaler() + ) + ); + + if ( + _film->with_subtitles () && + _out_subtitle.subtitle->image && + video->dcp_time >= _out_subtitle.subtitle->dcp_time && video->dcp_time <= _out_subtitle.subtitle->dcp_time_to + ) { + + Position const container_offset ( + (_video_container_size.width - image_size.width) / 2, + (_video_container_size.height - image_size.width) / 2 + ); + + pi->set_subtitle (_out_subtitle.subtitle->image, _out_subtitle.position + container_offset); + } + +#ifdef DCPOMATIC_DEBUG + _last_video = piece->content; +#endif + + Video (pi, video->eyes, content->colour_conversion(), video->same, video->dcp_time); + + _last_emit_was_black = false; + _video_position = rint (video->dcp_time + TIME_HZ / _film->video_frame_rate()); } void -Player::process_audio (weak_ptr c, shared_ptr b, double t) +Player::emit_audio (weak_ptr weak_piece, shared_ptr audio) { - AudioMapping mapping = _film->audio_mapping (); - if (!_audio_buffers) { - _audio_buffers.reset (new AudioBuffers (mapping.dcp_channels(), b->frames ())); - _audio_buffers->make_silent (); - _audio_time = t; - if (_playlist->audio_from() == Playlist::AUDIO_FFMPEG) { - _audio_time = _audio_time.get() + _audio_start[_sequential_audio_decoder]; - } + shared_ptr piece = weak_piece.lock (); + if (!piece) { + return; + } + + shared_ptr content = dynamic_pointer_cast (piece->content); + assert (content); + + /* Gain */ + if (content->audio_gain() != 0) { + shared_ptr gain (new AudioBuffers (audio->data)); + gain->apply_gain (content->audio_gain ()); + audio->data = gain; + } + + /* Resample */ + if (content->content_audio_frame_rate() != content->output_audio_frame_rate()) { + audio->data = resampler(content, true)->run (audio->data); + } + + if (content->trimmed (audio->dcp_time - content->position ())) { + return; } - for (int i = 0; i < b->channels(); ++i) { - list dcp = mapping.content_to_dcp (AudioMapping::Channel (c, i)); - for (list::iterator j = dcp.begin(); j != dcp.end(); ++j) { - _audio_buffers->accumulate (b, i, static_cast (*j)); + /* Remap channels */ + shared_ptr dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->data->frames())); + dcp_mapped->make_silent (); + list > map = content->audio_mapping().content_to_dcp (); + for (list >::iterator i = map.begin(); i != map.end(); ++i) { + if (i->first < audio->data->channels() && i->second < dcp_mapped->channels()) { + dcp_mapped->accumulate_channel (audio->data.get(), i->first, i->second); } } - if (_playlist->audio_from() == Playlist::AUDIO_FFMPEG) { - /* We can just emit this audio now as it will all be here */ - Audio (_audio_buffers, t); - _audio_buffers.reset (); - _audio_time = boost::none; + audio->data = dcp_mapped; + + /* Delay */ + audio->dcp_time += content->audio_delay() * TIME_HZ / 1000; + if (audio->dcp_time < 0) { + int const frames = - audio->dcp_time * _film->audio_frame_rate() / TIME_HZ; + if (frames >= audio->data->frames ()) { + return; + } + + shared_ptr trimmed (new AudioBuffers (audio->data->channels(), audio->data->frames() - frames)); + trimmed->copy_from (audio->data.get(), audio->data->frames() - frames, frames, 0); + + audio->data = trimmed; + audio->dcp_time = 0; } + + _audio_merger.push (audio->data, audio->dcp_time); } -/** @return true on error */ -bool -Player::seek (double t) +void +Player::flush () { - if (!_have_valid_decoders) { - setup_decoders (); - _have_valid_decoders = true; - } - - /* Find the decoder that contains this position */ - _video_decoder = 0; - while (_video_decoder < _video_decoders.size ()) { - if (t < _video_start[_video_decoder]) { - assert (_video_decoder); - --_video_decoder; - break; - } + TimedAudioBuffers tb = _audio_merger.flush (); + if (tb.audio) { + Audio (tb.audio, tb.time); + _audio_position += _film->audio_frames_to_time (tb.audio->frames ()); + } + + while (_video_position < _audio_position) { + emit_black (); + } - t -= _video_start[_video_decoder]; - ++_video_decoder; + while (_audio_position < _video_position) { + emit_silence (_video_position - _audio_position); } - if (_video_decoder < _video_decoders.size()) { - _video_decoders[_video_decoder]->seek (t); - } else { - return true; +} + +/** Seek so that the next pass() will yield (approximately) the requested frame. + * Pass accurate = true to try harder to get close to the request. + * @return true on error + */ +void +Player::seek (DCPTime t, bool accurate) +{ + if (!_have_valid_pieces) { + setup_pieces (); } - /* XXX: don't seek audio because we don't need to... */ + if (_pieces.empty ()) { + return; + } - return false; -} + for (list >::iterator i = _pieces.begin(); i != _pieces.end(); ++i) { + /* s is the offset of t from the start position of this content */ + DCPTime s = t - (*i)->content->position (); + s = max (static_cast (0), s); + s = min ((*i)->content->length_after_trim(), s); + /* Convert this to the content time */ + ContentTime ct = (s * (*i)->frc.speed_up) + (*i)->content->trim_start (); -void -Player::seek_back () -{ - /* XXX */ + /* And seek the decoder */ + (*i)->decoder->seek (ct, accurate); + } + + _video_position = time_round_up (t, TIME_HZ / _film->video_frame_rate()); + _audio_position = time_round_up (t, TIME_HZ / _film->audio_frame_rate()); + + _audio_merger.clear (_audio_position); + + if (!accurate) { + /* We just did an inaccurate seek, so it's likely that the next thing seen + out of pass() will be a fair distance from _{video,audio}_position. Setting + this flag stops pass() from trying to fix that: we assume that if it + was an inaccurate seek then the caller does not care too much about + inserting black/silence to keep the time tidy. + */ + _just_did_inaccurate_seek = true; + } } void -Player::seek_forward () +Player::setup_pieces () { - /* XXX */ -} + list > old_pieces = _pieces; + _pieces.clear (); + ContentList content = _playlist->content (); -void -Player::setup_decoders () -{ - _video_decoders.clear (); - _video_decoder = 0; - _audio_decoders.clear (); - _sequential_audio_decoder = 0; + for (ContentList::iterator i = content.begin(); i != content.end(); ++i) { - _video_start.clear(); - _audio_start.clear(); + shared_ptr decoder; + optional frc; - double video_so_far = 0; - double audio_so_far = 0; - - if (_video) { - list > vc = _playlist->video (); - for (list >::iterator i = vc.begin(); i != vc.end(); ++i) { - - shared_ptr c; - shared_ptr d; - - /* XXX: into content? */ - - shared_ptr fc = dynamic_pointer_cast (*i); - if (fc) { - shared_ptr fd ( - new FFmpegDecoder ( - _film, fc, _video, - _audio && _playlist->audio_from() == Playlist::AUDIO_FFMPEG, - _subtitles - ) - ); - - if (_playlist->audio_from() == Playlist::AUDIO_FFMPEG) { - fd->Audio.connect (bind (&Player::process_audio, this, fc, _1, _2)); + shared_ptr fc = dynamic_pointer_cast (*i); + if (fc) { + decoder.reset (new FFmpegDecoder (_film, fc, _video, _audio)); + frc = FrameRateChange (fc->video_frame_rate(), _film->video_frame_rate()); + } + + shared_ptr ic = dynamic_pointer_cast (*i); + if (ic) { + /* See if we can re-use an old ImageDecoder */ + for (list >::const_iterator j = old_pieces.begin(); j != old_pieces.end(); ++j) { + shared_ptr imd = dynamic_pointer_cast ((*j)->decoder); + if (imd && imd->content() == ic) { + decoder = imd; } + } - c = fc; - d = fd; + if (!decoder) { + decoder.reset (new ImageDecoder (_film, ic)); } - shared_ptr ic = dynamic_pointer_cast (*i); - if (ic) { - c = ic; - d.reset (new ImageMagickDecoder (_film, ic)); + frc = FrameRateChange (ic->video_frame_rate(), _film->video_frame_rate()); + } + + shared_ptr sc = dynamic_pointer_cast (*i); + if (sc) { + decoder.reset (new SndfileDecoder (_film, sc)); + + /* Working out the frc for this content is a bit tricky: what if it overlaps + two pieces of video content with different frame rates? For now, use + the one with the best overlap. + */ + + DCPTime best_overlap_t = 0; + shared_ptr best_overlap; + for (ContentList::iterator j = content.begin(); j != content.end(); ++j) { + shared_ptr vc = dynamic_pointer_cast (*j); + if (!vc) { + continue; + } + + DCPTime const overlap = max (vc->position(), sc->position()) - min (vc->end(), sc->end()); + if (overlap > best_overlap_t) { + best_overlap = vc; + best_overlap_t = overlap; + } } - d->connect_video (shared_from_this ()); - _video_decoders.push_back (d); - _video_start.push_back (video_so_far); - video_so_far += c->video_length() / c->video_frame_rate(); + if (best_overlap) { + frc = FrameRateChange (best_overlap->video_frame_rate(), _film->video_frame_rate ()); + } else { + /* No video overlap; e.g. if the DCP is just audio */ + frc = FrameRateChange (_film->video_frame_rate(), _film->video_frame_rate ()); + } } - _video_decoder = 0; + decoder->seek ((*i)->trim_start (), true); + + _pieces.push_back (shared_ptr (new Piece (*i, decoder, frc.get ()))); } - if (_playlist->audio_from() == Playlist::AUDIO_FFMPEG && !_video) { + _have_valid_pieces = true; +} - /* If we're getting audio from FFmpegContent but not the video, we need a set - of decoders for the audio. - */ +void +Player::content_changed (weak_ptr w, int property, bool frequent) +{ + shared_ptr c = w.lock (); + if (!c) { + return; + } + + if ( + property == ContentProperty::POSITION || property == ContentProperty::LENGTH || + property == ContentProperty::TRIM_START || property == ContentProperty::TRIM_END + ) { - list > ac = _playlist->audio (); - for (list >::iterator i = ac.begin(); i != ac.end(); ++i) { - - shared_ptr fc = dynamic_pointer_cast (*i); - assert (fc); - - shared_ptr d ( - new FFmpegDecoder ( - _film, fc, _video, - _audio && _playlist->audio_from() == Playlist::AUDIO_FFMPEG, - _subtitles - ) - ); - - d->Audio.connect (bind (&Player::process_audio, this, fc, _1, _2)); - _audio_decoders.push_back (d); - _audio_start.push_back (audio_so_far); - audio_so_far += fc->audio_length() / fc->audio_frame_rate(); - } + _have_valid_pieces = false; + Changed (frequent); - _sequential_audio_decoder = 0; - } + } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) { + + update_subtitle (); + Changed (frequent); - if (_playlist->audio_from() == Playlist::AUDIO_SNDFILE) { + } else if ( + property == VideoContentProperty::VIDEO_FRAME_TYPE || property == VideoContentProperty::VIDEO_CROP || + property == VideoContentProperty::VIDEO_RATIO + ) { - list > ac = _playlist->audio (); - for (list >::iterator i = ac.begin(); i != ac.end(); ++i) { - - shared_ptr sc = dynamic_pointer_cast (*i); - assert (sc); - - shared_ptr d (new SndfileDecoder (_film, sc)); - d->Audio.connect (bind (&Player::process_audio, this, sc, _1, _2)); - _audio_decoders.push_back (d); - _audio_start.push_back (audio_so_far); - audio_so_far += sc->audio_length () / sc->audio_frame_rate(); - } + Changed (frequent); + + } else if (property == ContentProperty::PATH) { + + Changed (frequent); } } -double -Player::last_video_time () const +void +Player::playlist_changed () { - return _video_start[_video_decoder] + _video_decoders[_video_decoder]->last_content_time (); + _have_valid_pieces = false; + Changed (false); } void -Player::content_changed (weak_ptr w, int p) +Player::set_video_container_size (libdcp::Size s) { - shared_ptr c = w.lock (); - if (!c) { + _video_container_size = s; + + shared_ptr im (new Image (PIX_FMT_RGB24, _video_container_size, true)); + im->make_black (); + + _black_frame.reset ( + new PlayerImage ( + im, + Crop(), + _video_container_size, + _video_container_size, + Scaler::from_id ("bicubic") + ) + ); +} + +shared_ptr +Player::resampler (shared_ptr c, bool create) +{ + map, shared_ptr >::iterator i = _resamplers.find (c); + if (i != _resamplers.end ()) { + return i->second; + } + + if (!create) { + return shared_ptr (); + } + + _film->log()->log ( + String::compose ( + "Creating new resampler for %1 to %2 with %3 channels", c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels() + ) + ); + + shared_ptr r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels())); + _resamplers[c] = r; + return r; +} + +void +Player::emit_black () +{ +#ifdef DCPOMATIC_DEBUG + _last_video.reset (); +#endif + + Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position); + _video_position += _film->video_frames_to_time (1); + _last_emit_was_black = true; +} + +void +Player::emit_silence (DCPTime most) +{ + if (most == 0) { return; } + + DCPTime t = min (most, TIME_HZ / 2); + shared_ptr silence (new AudioBuffers (_film->audio_channels(), t * _film->audio_frame_rate() / TIME_HZ)); + silence->make_silent (); + Audio (silence, _audio_position); + _audio_position += t; +} - if (p == VideoContentProperty::VIDEO_LENGTH) { - if (dynamic_pointer_cast (c)) { - /* FFmpeg content length changes are serious; we need new decoders */ - _have_valid_decoders = false; - } +void +Player::film_changed (Film::Property p) +{ + /* Here we should notice Film properties that affect our output, and + alert listeners that our output now would be different to how it was + last time we were run. + */ + + if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER) { + Changed (false); } } void -Player::playlist_changed () +Player::update_subtitle () +{ + shared_ptr piece = _in_subtitle.piece.lock (); + if (!piece) { + return; + } + + if (!_in_subtitle.subtitle->image) { + _out_subtitle.subtitle->image.reset (); + return; + } + + shared_ptr sc = dynamic_pointer_cast (piece->content); + assert (sc); + + dcpomatic::Rect in_rect = _in_subtitle.subtitle->rect; + libdcp::Size scaled_size; + + in_rect.y += sc->subtitle_offset (); + + /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */ + scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale (); + scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale (); + + /* Then we need a corrective translation, consisting of two parts: + * + * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be + * rect.x * _video_container_size.width and rect.y * _video_container_size.height. + * + * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be + * (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and + * (height_before_subtitle_scale * (1 - subtitle_scale) / 2). + * + * Combining these two translations gives these expressions. + */ + + _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2))); + _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2))); + + _out_subtitle.subtitle->image = _in_subtitle.subtitle->image->scale ( + scaled_size, + Scaler::from_id ("bicubic"), + _in_subtitle.subtitle->image->pixel_format (), + true + ); + + _out_subtitle.subtitle->dcp_time = _in_subtitle.subtitle->dcp_time; + _out_subtitle.subtitle->dcp_time = _in_subtitle.subtitle->dcp_time; +} + +/** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles. + * @return false if this could not be done. + */ +bool +Player::repeat_last_video () +{ + if (!_last_incoming_video.video || !_have_valid_pieces) { + return false; + } + + emit_video ( + _last_incoming_video.weak_piece, + _last_incoming_video.video + ); + + return true; +} + +PlayerImage::PlayerImage ( + shared_ptr in, + Crop crop, + libdcp::Size inter_size, + libdcp::Size out_size, + Scaler const * scaler + ) + : _in (in) + , _crop (crop) + , _inter_size (inter_size) + , _out_size (out_size) + , _scaler (scaler) +{ + +} + +void +PlayerImage::set_subtitle (shared_ptr image, Position pos) { - _have_valid_decoders = false; + _subtitle_image = image; + _subtitle_position = pos; +} + +shared_ptr +PlayerImage::image () +{ + shared_ptr out = _in->crop_scale_window (_crop, _inter_size, _out_size, _scaler, PIX_FMT_RGB24, false); + + Position const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2); + + if (_subtitle_image) { + out->alpha_blend (_subtitle_image, _subtitle_position); + } + + return out; }