summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCarl Hetherington <cth@carlh.net>2017-05-08 14:39:46 +0100
committerCarl Hetherington <cth@carlh.net>2017-05-08 14:39:46 +0100
commit97632e5e91fc0e2c0a3dd84ed9a167f79e4bd14d (patch)
treeac8bc4c05ef78c4b0df460e707789ff25675548a
parentd02e03907214808ce9da9eb9a2267ff577e85559 (diff)
Fix up black-filling logic.
-rw-r--r--doc/design/Attic/who_fills_the_gaps.tex (renamed from doc/design/who_fills_the_gaps.tex)0
-rw-r--r--doc/design/gap_filling.tex56
-rw-r--r--doc/design/servers_and_ports.tex36
-rw-r--r--src/lib/decoder.cc1
-rw-r--r--src/lib/player.cc66
-rw-r--r--src/lib/player.h6
6 files changed, 129 insertions, 36 deletions
diff --git a/doc/design/who_fills_the_gaps.tex b/doc/design/Attic/who_fills_the_gaps.tex
index 00e8ac39f..00e8ac39f 100644
--- a/doc/design/who_fills_the_gaps.tex
+++ b/doc/design/Attic/who_fills_the_gaps.tex
diff --git a/doc/design/gap_filling.tex b/doc/design/gap_filling.tex
new file mode 100644
index 000000000..a6f748748
--- /dev/null
+++ b/doc/design/gap_filling.tex
@@ -0,0 +1,56 @@
+\documentclass{article}
+\renewcommand{\c}[1]{\texttt{#1}}
+\begin{document}
+
+There are two sources of video gaps:
+
+\begin{enumerate}
+\item Discontiguous outputs from \c{FFmpegDecoder} --- it makes sense
+ to fix these when the frame after the gap is seen, as that's the
+ first time we know about the gap. For example, it emits frame~1
+ then frame~3; when we see 3 we should emit 2 to fill the gap.
+\item Periods where there is no video content --- these could be long,
+ so you can't just wait for the frame after the gap.
+\end{enumerate}
+
+Two solutions suggest themselves for the period of no video content:
+
+\begin{enumerate}
+\item Create `black' \c{Piece}s to act as dummy content and emit black
+ frames as required.
+\item Fix it in \c{Player::pass()}.
+\end{enumerate}
+
+Dummy pieces feels like a nice solution but quite wordy as you need a
+hierarchy of \c{Piece}s with virtual functions and so on.
+
+If we can trust \c{Decoder::position()} we know the earliest time that
+a decoder will emit data when it is next \c{pass()}ed. If this is
+more than one frame since the last video frame emitted we know we need
+to emit a black frame. This much seems straightforward.
+
+Things appear to get harder with seeking. There are two paths here:
+
+\begin{enumerate}
+\item Seeking into the middle of some video content.
+\item Seeking into some empty space.
+\end{enumerate}
+
+and also differences between accurate and inaccurate seek.
+
+Let's take them case-by-case:
+
+\begin{enumerate}
+ \item \emph{Accurate seek into content} --- we should not fill
+ anything since the correct data will be generated, in time, by
+ \c{pass()}.
+ \item \emph{Accurate seek into space} --- we should fill up to the
+ earliest decoder position.
+ \item \emph{Inaccurate seek into content} --- we should not fill
+ anything since \c{pass()} will generate something at some
+ unknown time.
+ \item \emph{Inaccurate seek into space} --- we should fill up to
+ the earliest decoder position from the seek time.
+\end{enumerate}
+
+\end{document}
diff --git a/doc/design/servers_and_ports.tex b/doc/design/servers_and_ports.tex
new file mode 100644
index 000000000..c635b3054
--- /dev/null
+++ b/doc/design/servers_and_ports.tex
@@ -0,0 +1,36 @@
+\documentclass{article}
+\usepackage[usenames]{xcolor}
+\usepackage{listings}
+\title{Servers and ports}
+\author{}
+\date{}
+\begin{document}
+\maketitle
+
+\begin{tabular}{|l|l|l|}
+ \hline
+ \textbf{Port offset} & \textbf{Component} & \textbf{Purpose} \\
+ \hline
+ 0 & \texttt{EncodeServer} & Listen for frame encoding requests \\
+ \hline
+ 1 & \texttt{EncodeServer} & Listen for \texttt{DCPOMATIC\_HELLO} from masters \\
+ \hline
+ 2 & \texttt{EncodeServerFinder} & Listen for replies to \texttt{DCPOMATIC\_HELLO} from servers \\
+ \hline
+ 3 & Batch converter & Listen for job requests \\
+ \hline
+\end{tabular}
+
+
+\texttt{EncodeServer} listens on \texttt{server\_port\_base}.
+Receives an XML \texttt{EncodingRequest} and then the video data.
+Sends back the encoded data.
+
+\texttt{EncodeServer} also listens on $\texttt{server\_port\_base} +
+1$. A main DCP-o-matic instance broadcasts \texttt{DCPOMATIC\_HELLO}
+on this port. When a server receives this it connects to the main
+DCP-o-matic on $\texttt{server\_port\_base} + 1$ and tells it what it can do.
+
+Batch Converter listens on $\texttt{server\_port\_base} + 2$ for jobs.
+
+\end{document}
diff --git a/src/lib/decoder.cc b/src/lib/decoder.cc
index fef5e2a99..1b281f718 100644
--- a/src/lib/decoder.cc
+++ b/src/lib/decoder.cc
@@ -28,6 +28,7 @@
using std::cout;
using boost::optional;
+/** @return Earliest time of content that the next pass() will emit */
ContentTime
Decoder::position () const
{
diff --git a/src/lib/player.cc b/src/lib/player.cc
index 392804ee8..11440ec5e 100644
--- a/src/lib/player.cc
+++ b/src/lib/player.cc
@@ -87,6 +87,7 @@ Player::Player (shared_ptr<const Film> film, shared_ptr<const Playlist> playlist
, _always_burn_subtitles (false)
, _fast (false)
, _play_referenced (false)
+ , _last_seek_accurate (true)
, _audio_merger (_film->audio_frame_rate())
{
_film_changed_connection = _film->Changed.connect (bind (&Player::film_changed, this, _1));
@@ -537,14 +538,24 @@ Player::pass ()
}
}
- bool filled = false;
+ DCPTime fill_towards = earliest ? earliest_content : _playlist->length();
+
+ optional<DCPTime> fill_from;
if (_last_video_time) {
- filled = fill_video (DCPTimePeriod (_last_video_time.get(), earliest ? earliest_content : _playlist->length()));
- } else if (_last_seek_time) {
- filled = fill_video (DCPTimePeriod (_last_seek_time.get(), _last_seek_time.get() + one_video_frame ()));
+ /* No seek; fill towards the next thing that might happen (or the end of the playlist) */
+ fill_from = _last_video_time;
+ } else if (_last_seek_time && !_playlist->video_content_at(_last_seek_time.get())) {
+ /* Seek into an empty area; fill from the seek time */
+ fill_from = _last_seek_time;
+ }
+
+ if (fill_from && ((fill_towards - fill_from.get())) > one_video_frame()) {
+ emit_video (black_player_video_frame(), fill_from.get());
+ } else if (_playlist->length() == DCPTime()) {
+ emit_video (black_player_video_frame(), DCPTime());
}
- if (!earliest && !filled) {
+ if (!earliest && !fill_from) {
return true;
}
@@ -624,7 +635,10 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
DCPTimePeriod const period (time, time + one_video_frame());
/* Discard if it's outside the content's period or if it's before the last accurate seek */
- if (time < piece->content->position() || time >= piece->content->end() || (_last_video_time && time < _last_video_time)) {
+ if (
+ time < piece->content->position() ||
+ time >= piece->content->end() ||
+ (_last_seek_time && _last_seek_accurate && time < _last_seek_time.get())) {
return;
}
@@ -633,7 +647,16 @@ Player::video (weak_ptr<Piece> wp, ContentVideo video)
*/
if (_last_video_time) {
- fill_video (DCPTimePeriod (_last_video_time.get(), time));
+ /* XXX: this may not work for 3D */
+ BOOST_FOREACH (DCPTimePeriod i, subtract(DCPTimePeriod (_last_video_time.get(), time), _no_video)) {
+ for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
+ if (_last_video) {
+ emit_video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
+ } else {
+ emit_video (black_player_video_frame(), j);
+ }
+ }
+ }
}
_last_video.reset (
@@ -897,15 +920,10 @@ Player::seek (DCPTime time, bool accurate)
}
}
- if (accurate) {
- _last_video_time = time;
- _last_audio_time = time;
- } else {
- _last_video_time = optional<DCPTime> ();
- _last_audio_time = optional<DCPTime> ();
- }
-
+ _last_video_time = optional<DCPTime> ();
+ _last_audio_time = optional<DCPTime> ();
_last_seek_time = time;
+ _last_seek_accurate = accurate;
}
shared_ptr<Resampler>
@@ -935,24 +953,6 @@ Player::resampler (shared_ptr<const AudioContent> content, AudioStreamPtr stream
return r;
}
-bool
-Player::fill_video (DCPTimePeriod period)
-{
- /* XXX: this may not work for 3D */
- bool filled = false;
- BOOST_FOREACH (DCPTimePeriod i, subtract(period, _no_video)) {
- for (DCPTime j = i.from; j < i.to; j += one_video_frame()) {
- if (_playlist->video_content_at(j) && _last_video) {
- emit_video (shared_ptr<PlayerVideo> (new PlayerVideo (*_last_video)), j);
- } else {
- emit_video (black_player_video_frame(), j);
- }
- filled = true;
- }
- }
- return filled;
-}
-
void
Player::emit_video (shared_ptr<PlayerVideo> pv, DCPTime time)
{
diff --git a/src/lib/player.h b/src/lib/player.h
index 7cffa1f11..ffc578c71 100644
--- a/src/lib/player.h
+++ b/src/lib/player.h
@@ -110,7 +110,6 @@ private:
void subtitle_stop (boost::weak_ptr<Piece>, ContentTime);
boost::shared_ptr<Resampler> resampler (boost::shared_ptr<const AudioContent> content, AudioStreamPtr stream, bool create);
DCPTime one_video_frame () const;
- bool fill_video (DCPTimePeriod period);
void fill_audio (DCPTimePeriod period);
void audio_flush (boost::shared_ptr<Piece>, AudioStreamPtr stream);
void audio_transform (boost::shared_ptr<AudioContent> content, AudioStreamPtr stream, ContentAudio content_audio, DCPTime time);
@@ -146,11 +145,12 @@ private:
/** Last PlayerVideo that was emitted */
boost::shared_ptr<PlayerVideo> _last_video;
- /** Time just after the last video frame we emitted, or the last seek time */
+ /** Time just after the last video frame we emitted, or empty if we have just seeked */
boost::optional<DCPTime> _last_video_time;
- /** Time just after the last audio frame we emitted, or the last seek time */
+ /** Time just after the last audio frame we emitted, or empty if we have just seeked */
boost::optional<DCPTime> _last_audio_time;
boost::optional<DCPTime> _last_seek_time;
+ bool _last_seek_accurate;
AudioMerger _audio_merger;