/*
- Copyright (C) 2013 Carl Hetherington <cth@carlh.net>
+ Copyright (C) 2013-2014 Carl Hetherington <cth@carlh.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
#include "playlist.h"
#include "job.h"
#include "image.h"
+#include "image_proxy.h"
#include "ratio.h"
#include "resampler.h"
#include "log.h"
#include "scaler.h"
+#include "player_video_frame.h"
+
+#define LOG_GENERAL(...) _film->log()->log (String::compose (__VA_ARGS__), Log::TYPE_GENERAL);
using std::list;
using std::cout;
using boost::weak_ptr;
using boost::dynamic_pointer_cast;
-class Piece
-{
-public:
- Piece (shared_ptr<Content> c)
- : content (c)
- , video_position (c->position ())
- , audio_position (c->position ())
- , repeat_to_do (0)
- , repeat_done (0)
- {}
-
- Piece (shared_ptr<Content> c, shared_ptr<Decoder> d)
- : content (c)
- , decoder (d)
- , video_position (c->position ())
- , audio_position (c->position ())
- {}
-
- /** Set this piece to repeat a video frame a given number of times */
- void set_repeat (IncomingVideo video, int num)
- {
- repeat_video = video;
- repeat_to_do = num;
- repeat_done = 0;
- }
-
- void reset_repeat ()
- {
- repeat_video.image.reset ();
- repeat_to_do = 0;
- repeat_done = 0;
- }
-
- bool repeating () const
- {
- return repeat_done != repeat_to_do;
- }
-
- void repeat (Player* player)
- {
- player->process_video (
- repeat_video.weak_piece,
- repeat_video.image,
- repeat_video.eyes,
- repeat_done > 0,
- repeat_video.frame,
- (repeat_done + 1) * (TIME_HZ / player->_film->video_frame_rate ())
- );
-
- ++repeat_done;
- }
-
- shared_ptr<Content> content;
- shared_ptr<Decoder> decoder;
- /** Time of the last video we emitted relative to the start of the DCP */
- Time video_position;
- /** Time of the last audio we emitted relative to the start of the DCP */
- Time audio_position;
-
- IncomingVideo repeat_video;
- int repeat_to_do;
- int repeat_done;
-};
-
Player::Player (shared_ptr<const Film> f, shared_ptr<const Playlist> p)
: _film (f)
, _playlist (p)
_playlist_changed_connection = _playlist->Changed.connect (bind (&Player::playlist_changed, this));
_playlist_content_changed_connection = _playlist->ContentChanged.connect (bind (&Player::content_changed, this, _1, _2, _3));
_film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
- set_video_container_size (fit_ratio_within (_film->container()->ratio (), _film->full_frame ()));
+ set_video_container_size (_film->frame_size ());
}
void
continue;
}
- if (dynamic_pointer_cast<AudioDecoder> ((*i)->decoder)) {
+ shared_ptr<AudioDecoder> ad = dynamic_pointer_cast<AudioDecoder> ((*i)->decoder);
+ if (ad && ad->has_audio ()) {
audio_done_up_to = min (audio_done_up_to.get_value_or (TIME_MAX), (*i)->audio_position);
}
}
/** @param extra Amount of extra time to add to the content frame's time (for repeat) */
void
-Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const Image> image, Eyes eyes, bool same, VideoContent::Frame frame, Time extra)
+Player::process_video (weak_ptr<Piece> weak_piece, shared_ptr<const ImageProxy> image, Eyes eyes, Part part, bool same, VideoContent::Frame frame, Time extra)
{
/* Keep a note of what came in so that we can repeat it if required */
_last_incoming_video.weak_piece = weak_piece;
_last_incoming_video.image = image;
_last_incoming_video.eyes = eyes;
+ _last_incoming_video.part = part;
_last_incoming_video.same = same;
_last_incoming_video.frame = frame;
_last_incoming_video.extra = extra;
}
Time const time = content->position() + relative_time + extra - content->trim_start ();
- float const ratio = content->ratio() ? content->ratio()->ratio() : content->video_size_after_crop().ratio();
- libdcp::Size const image_size = fit_ratio_within (ratio, _video_container_size);
+ libdcp::Size const image_size = content->scale().size (content, _video_container_size, _film->frame_size ());
- shared_ptr<PlayerImage> pi (
- new PlayerImage (
+ shared_ptr<PlayerVideoFrame> pi (
+ new PlayerVideoFrame (
image,
content->crop(),
image_size,
_video_container_size,
- _film->scaler()
+ _film->scaler(),
+ eyes,
+ part,
+ content->colour_conversion()
)
);
- if (_film->with_subtitles () && _out_subtitle.image && time >= _out_subtitle.from && time <= _out_subtitle.to) {
+ if (_film->with_subtitles ()) {
+ for (list<Subtitle>::const_iterator i = _subtitles.begin(); i != _subtitles.end(); ++i) {
+ if (i->covers (time)) {
+ /* This may be true for more than one of _subtitles, but the last (latest-starting)
+ one is the one we want to use, so that's ok.
+ */
+ Position<int> const container_offset (
+ (_video_container_size.width - image_size.width) / 2,
+ (_video_container_size.height - image_size.width) / 2
+ );
+
+ pi->set_subtitle (i->out_image(), i->out_position() + container_offset);
+ }
+ }
+ }
- Position<int> const container_offset (
- (_video_container_size.width - image_size.width) / 2,
- (_video_container_size.height - image_size.width) / 2
- );
+ /* Clear out old subtitles */
+ for (list<Subtitle>::iterator i = _subtitles.begin(); i != _subtitles.end(); ) {
+ list<Subtitle>::iterator j = i;
+ ++j;
+
+ if (i->ends_before (time)) {
+ _subtitles.erase (i);
+ }
- pi->set_subtitle (_out_subtitle.image, _out_subtitle.position + container_offset);
+ i = j;
}
-
+
#ifdef DCPOMATIC_DEBUG
_last_video = piece->content;
#endif
- Video (pi, eyes, content->colour_conversion(), same, time);
+ Video (pi, same, time);
_last_emit_was_black = false;
_video_position = piece->video_position = (time + TIME_HZ / _film->video_frame_rate());
/* Remap channels */
shared_ptr<AudioBuffers> dcp_mapped (new AudioBuffers (_film->audio_channels(), audio->frames()));
dcp_mapped->make_silent ();
- list<pair<int, libdcp::Channel> > map = content->audio_mapping().content_to_dcp ();
- for (list<pair<int, libdcp::Channel> >::iterator i = map.begin(); i != map.end(); ++i) {
- if (i->first < audio->channels() && i->second < dcp_mapped->channels()) {
- dcp_mapped->accumulate_channel (audio.get(), i->first, i->second);
+
+ AudioMapping map = content->audio_mapping ();
+ for (int i = 0; i < map.content_channels(); ++i) {
+ for (int j = 0; j < _film->audio_channels(); ++j) {
+ if (map.get (i, static_cast<libdcp::Channel> (j)) > 0) {
+ dcp_mapped->accumulate_channel (
+ audio.get(),
+ i,
+ static_cast<libdcp::Channel> (j),
+ map.get (i, static_cast<libdcp::Channel> (j))
+ );
+ }
}
}
Player::flush ()
{
TimedAudioBuffers<Time> tb = _audio_merger.flush ();
- if (tb.audio) {
+ if (_audio && tb.audio) {
Audio (tb.audio, tb.time);
_audio_position += _film->audio_frames_to_time (tb.audio->frames ());
}
- while (_video_position < _audio_position) {
+ while (_video && _video_position < _audio_position) {
emit_black ();
}
- while (_audio_position < _video_position) {
+ while (_audio && _audio_position < _video_position) {
emit_silence (_film->time_to_audio_frames (_video_position - _audio_position));
}
for (ContentList::iterator i = content.begin(); i != content.end(); ++i) {
+ if (!(*i)->paths_valid ()) {
+ continue;
+ }
+
shared_ptr<Piece> piece (new Piece (*i));
/* XXX: into content? */
if (fc) {
shared_ptr<FFmpegDecoder> fd (new FFmpegDecoder (_film, fc, _video, _audio));
- fd->Video.connect (bind (&Player::process_video, this, weak_ptr<Piece> (piece), _1, _2, _3, _4, 0));
+ fd->Video.connect (bind (&Player::process_video, this, weak_ptr<Piece> (piece), _1, _2, _3, _4, _5, 0));
fd->Audio.connect (bind (&Player::process_audio, this, weak_ptr<Piece> (piece), _1, _2));
fd->Subtitle.connect (bind (&Player::process_subtitle, this, weak_ptr<Piece> (piece), _1, _2, _3, _4));
if (!reusing) {
shared_ptr<ImageDecoder> id (new ImageDecoder (_film, ic));
- id->Video.connect (bind (&Player::process_video, this, weak_ptr<Piece> (piece), _1, _2, _3, _4, 0));
+ id->Video.connect (bind (&Player::process_video, this, weak_ptr<Piece> (piece), _1, _2, _3, _4, _5, 0));
piece->decoder = id;
}
}
_have_valid_pieces = false;
Changed (frequent);
- } else if (property == SubtitleContentProperty::SUBTITLE_OFFSET || property == SubtitleContentProperty::SUBTITLE_SCALE) {
+ } else if (
+ property == SubtitleContentProperty::SUBTITLE_X_OFFSET ||
+ property == SubtitleContentProperty::SUBTITLE_Y_OFFSET ||
+ property == SubtitleContentProperty::SUBTITLE_SCALE
+ ) {
- update_subtitle ();
+ for (list<Subtitle>::iterator i = _subtitles.begin(); i != _subtitles.end(); ++i) {
+ i->update (_film, _video_container_size);
+ }
+
Changed (frequent);
- } else if (property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_RATIO) {
+ } else if (
+ property == VideoContentProperty::VIDEO_CROP || property == VideoContentProperty::VIDEO_SCALE ||
+ property == VideoContentProperty::VIDEO_FRAME_RATE
+ ) {
Changed (frequent);
} else if (property == ContentProperty::PATH) {
+ _have_valid_pieces = false;
Changed (frequent);
}
}
im->make_black ();
_black_frame.reset (
- new PlayerImage (
- im,
+ new PlayerVideoFrame (
+ shared_ptr<ImageProxy> (new RawImageProxy (im, _film->log ())),
Crop(),
_video_container_size,
_video_container_size,
- Scaler::from_id ("bicubic")
+ Scaler::from_id ("bicubic"),
+ EYES_BOTH,
+ PART_WHOLE,
+ ColourConversion ()
)
);
}
return shared_ptr<Resampler> ();
}
- _film->log()->log (
- String::compose (
- "Creating new resampler for %1 to %2 with %3 channels", c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()
- )
+ LOG_GENERAL (
+ "Creating new resampler for %1 to %2 with %3 channels", c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()
);
shared_ptr<Resampler> r (new Resampler (c->content_audio_frame_rate(), c->output_audio_frame_rate(), c->audio_channels()));
_last_video.reset ();
#endif
- Video (_black_frame, EYES_BOTH, ColourConversion(), _last_emit_was_black, _video_position);
+ Video (_black_frame, _last_emit_was_black, _video_position);
_video_position += _film->video_frames_to_time (1);
_last_emit_was_black = true;
}
last time we were run.
*/
- if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER) {
+ if (p == Film::SCALER || p == Film::WITH_SUBTITLES || p == Film::CONTAINER || p == Film::VIDEO_FRAME_RATE) {
Changed (false);
}
}
void
Player::process_subtitle (weak_ptr<Piece> weak_piece, shared_ptr<Image> image, dcpomatic::Rect<double> rect, Time from, Time to)
{
- _in_subtitle.piece = weak_piece;
- _in_subtitle.image = image;
- _in_subtitle.rect = rect;
- _in_subtitle.from = from;
- _in_subtitle.to = to;
-
- update_subtitle ();
-}
-
-void
-Player::update_subtitle ()
-{
- shared_ptr<Piece> piece = _in_subtitle.piece.lock ();
- if (!piece) {
- return;
- }
-
- if (!_in_subtitle.image) {
- _out_subtitle.image.reset ();
- return;
+ if (!image) {
+ /* A null image means that we should stop any current subtitles at `from' */
+ for (list<Subtitle>::iterator i = _subtitles.begin(); i != _subtitles.end(); ++i) {
+ i->set_stop (from);
+ }
+ } else {
+ _subtitles.push_back (Subtitle (_film, _video_container_size, weak_piece, image, rect, from, to));
}
-
- shared_ptr<SubtitleContent> sc = dynamic_pointer_cast<SubtitleContent> (piece->content);
- assert (sc);
-
- dcpomatic::Rect<double> in_rect = _in_subtitle.rect;
- libdcp::Size scaled_size;
-
- in_rect.y += sc->subtitle_offset ();
-
- /* We will scale the subtitle up to fit _video_container_size, and also by the additional subtitle_scale */
- scaled_size.width = in_rect.width * _video_container_size.width * sc->subtitle_scale ();
- scaled_size.height = in_rect.height * _video_container_size.height * sc->subtitle_scale ();
-
- /* Then we need a corrective translation, consisting of two parts:
- *
- * 1. that which is the result of the scaling of the subtitle by _video_container_size; this will be
- * rect.x * _video_container_size.width and rect.y * _video_container_size.height.
- *
- * 2. that to shift the origin of the scale by subtitle_scale to the centre of the subtitle; this will be
- * (width_before_subtitle_scale * (1 - subtitle_scale) / 2) and
- * (height_before_subtitle_scale * (1 - subtitle_scale) / 2).
- *
- * Combining these two translations gives these expressions.
- */
-
- _out_subtitle.position.x = rint (_video_container_size.width * (in_rect.x + (in_rect.width * (1 - sc->subtitle_scale ()) / 2)));
- _out_subtitle.position.y = rint (_video_container_size.height * (in_rect.y + (in_rect.height * (1 - sc->subtitle_scale ()) / 2)));
-
- _out_subtitle.image = _in_subtitle.image->scale (
- scaled_size,
- Scaler::from_id ("bicubic"),
- _in_subtitle.image->pixel_format (),
- true
- );
- _out_subtitle.from = _in_subtitle.from + piece->content->position ();
- _out_subtitle.to = _in_subtitle.to + piece->content->position ();
}
/** Re-emit the last frame that was emitted, using current settings for crop, ratio, scaler and subtitles.
_last_incoming_video.weak_piece,
_last_incoming_video.image,
_last_incoming_video.eyes,
+ _last_incoming_video.part,
_last_incoming_video.same,
_last_incoming_video.frame,
_last_incoming_video.extra
return true;
}
-
-PlayerImage::PlayerImage (
- shared_ptr<const Image> in,
- Crop crop,
- libdcp::Size inter_size,
- libdcp::Size out_size,
- Scaler const * scaler
- )
- : _in (in)
- , _crop (crop)
- , _inter_size (inter_size)
- , _out_size (out_size)
- , _scaler (scaler)
-{
-
-}
-
-void
-PlayerImage::set_subtitle (shared_ptr<const Image> image, Position<int> pos)
-{
- _subtitle_image = image;
- _subtitle_position = pos;
-}
-
-shared_ptr<Image>
-PlayerImage::image ()
-{
- shared_ptr<Image> out = _in->crop_scale_window (_crop, _inter_size, _out_size, _scaler, PIX_FMT_RGB24, false);
-
- Position<int> const container_offset ((_out_size.width - _inter_size.width) / 2, (_out_size.height - _inter_size.width) / 2);
-
- if (_subtitle_image) {
- out->alpha_blend (_subtitle_image, _subtitle_position);
- }
-
- return out;
-}