Handle 2D/3D mismatches in the player (#2409).
authorCarl Hetherington <cth@carlh.net>
Wed, 11 Jan 2023 22:00:25 +0000 (23:00 +0100)
committerCarl Hetherington <cth@carlh.net>
Wed, 1 Feb 2023 00:12:38 +0000 (01:12 +0100)
Previously if there was 2D content in a 3D project (or vice versa)
the player would emit the video anyway and the encoder would have
to deal with it.

Since previously the FFmpeg encoder did not deal with this quite
right, it seems to make sense to move the logic into the player
so that neither encoder has to worry about it.

src/lib/player.cc
src/lib/writer.cc
test/ffmpeg_encoder_test.cc
test/player_test.cc

index 3c3032bd12ed7a9162a2b7c668213139c17a3b00..510def583279819e6e4c60239dec3f7481325399 100644 (file)
@@ -970,6 +970,28 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                return;
        }
 
+       vector<Eyes> eyes_to_emit;
+
+       if (!film->three_d()) {
+               if (video.eyes == Eyes::RIGHT) {
+                       /* 2D film, 3D content: discard right */
+                       return;
+               } else if (video.eyes == Eyes::LEFT) {
+                       /* 2D film, 3D content: emit left as "both" */
+                       video.eyes = Eyes::BOTH;
+                       eyes_to_emit = { Eyes::BOTH };
+               }
+       } else {
+               if (video.eyes == Eyes::BOTH) {
+                       /* 3D film, 2D content; emit "both" for left and right */
+                       eyes_to_emit = { Eyes::LEFT, Eyes::RIGHT };
+               }
+       }
+
+       if (eyes_to_emit.empty()) {
+               eyes_to_emit = { video.eyes };
+       }
+
        /* Time of the first frame we will emit */
        DCPTime const time = content_video_to_dcp (piece, video.frame);
        LOG_DEBUG_PLAYER("Received video frame %1 at %2", video.frame, to_string(time));
@@ -998,7 +1020,7 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
                if ((fill_to - fill_from) > one_video_frame() / 2) {
                        auto last = _last_video.find (weak_piece);
                        if (film->three_d()) {
-                               auto fill_to_eyes = video.eyes;
+                               auto fill_to_eyes = eyes_to_emit[0];
                                if (fill_to_eyes == Eyes::BOTH) {
                                        fill_to_eyes = Eyes::LEFT;
                                }
@@ -1040,32 +1062,34 @@ Player::video (weak_ptr<Piece> weak_piece, ContentVideo video)
 
        auto const content_video = piece->content->video;
 
-       _last_video[weak_piece] = std::make_shared<PlayerVideo>(
-               video.image,
-               content_video->actual_crop(),
-               content_video->fade(film, video.frame),
-               scale_for_display(
-                       content_video->scaled_size(film->frame_size()),
+       for (auto eyes: eyes_to_emit) {
+               _last_video[weak_piece] = std::make_shared<PlayerVideo>(
+                       video.image,
+                       content_video->actual_crop(),
+                       content_video->fade(film, video.frame),
+                       scale_for_display(
+                               content_video->scaled_size(film->frame_size()),
+                               _video_container_size,
+                               film->frame_size(),
+                               content_video->pixel_quanta()
+                               ),
                        _video_container_size,
-                       film->frame_size(),
-                       content_video->pixel_quanta()
-                       ),
-               _video_container_size,
-               video.eyes,
-               video.part,
-               content_video->colour_conversion(),
-               content_video->range(),
-               piece->content,
-               video.frame,
-               false
-               );
-
-       DCPTime t = time;
-       for (int i = 0; i < frc.repeat; ++i) {
-               if (t < piece->content->end(film)) {
-                       emit_video (_last_video[weak_piece], t);
+                       eyes,
+                       video.part,
+                       content_video->colour_conversion(),
+                       content_video->range(),
+                       piece->content,
+                       video.frame,
+                       false
+                       );
+
+               DCPTime t = time;
+               for (int i = 0; i < frc.repeat; ++i) {
+                       if (t < piece->content->end(film)) {
+                               emit_video (_last_video[weak_piece], t);
+                       }
+                       t += one_video_frame ();
                }
-               t += one_video_frame ();
        }
 }
 
index f7665a7a7011f1a01efe0a56ac2ebbe884710bb2..a369447232f5aa99aa03d70a10fa5232d090833b 100644 (file)
@@ -153,19 +153,11 @@ Writer::write (shared_ptr<const Data> encoded, Frame frame, Eyes eyes)
        qi.reel = video_reel (frame);
        qi.frame = frame - _reels[qi.reel].start ();
 
-       if (film()->three_d() && eyes == Eyes::BOTH) {
-               /* 2D material in a 3D DCP; fake the 3D */
-               qi.eyes = Eyes::LEFT;
-               _queue.push_back (qi);
-               ++_queued_full_in_memory;
-               qi.eyes = Eyes::RIGHT;
-               _queue.push_back (qi);
-               ++_queued_full_in_memory;
-       } else {
-               qi.eyes = eyes;
-               _queue.push_back (qi);
-               ++_queued_full_in_memory;
-       }
+       DCPOMATIC_ASSERT((film()->three_d() && eyes != Eyes::BOTH) || (!film()->three_d() && eyes == Eyes::BOTH));
+
+       qi.eyes = eyes;
+       _queue.push_back(qi);
+       ++_queued_full_in_memory;
 
        /* Now there's something to do: wake anything wait()ing on _empty_condition */
        _empty_condition.notify_all ();
index 90bac56ef21e8181d23125a313c8616cac9f328f..c58689a02613765f910a8dda8c75df9514d0aaf8 100644 (file)
@@ -369,6 +369,18 @@ BOOST_AUTO_TEST_CASE (ffmpeg_encoder_h264_test7)
 }
 
 
+BOOST_AUTO_TEST_CASE(ffmpeg_encoder_2d_content_in_3d_project)
+{
+       auto content = make_shared<ImageContent>(TestPaths::private_data() / "bbc405.png");
+       auto film = new_test_film2("ffmpeg_encoder_2d_content_in_3d_project", { content });
+       film->set_three_d(true);
+
+       auto job = make_shared<TranscodeJob>(film, TranscodeJob::ChangedBehaviour::IGNORE);
+       FFmpegEncoder encoder(film, job, "build/test/ffmpeg_encoder_2d_content_in_3d_project.mp4", ExportFormat::H264_AAC, true, false, false, 23);
+       encoder.go();
+}
+
+
 /** Stereo project with mixdown-to-stereo set */
 BOOST_AUTO_TEST_CASE (ffmpeg_encoder_h264_test8)
 {
index 733883b5a5fba4b6e0b58e7127f00822bb542763..892efe5b1053d5641bd863f370c997996e63f0f8 100644 (file)
@@ -38,6 +38,7 @@
 #include "lib/ffmpeg_content.h"
 #include "lib/film.h"
 #include "lib/image_content.h"
+#include "lib/image_png.h"
 #include "lib/player.h"
 #include "lib/ratio.h"
 #include "lib/string_text_file_content.h"
@@ -596,3 +597,106 @@ BOOST_AUTO_TEST_CASE(trimmed_sound_mix_bug_13_frame_rate_change)
        check_mxf_audio_file("test/data/trimmed_sound_mix_bug_13_frame_rate_change.mxf", dcp_file(film, "pcm_"));
 }
 
+
+BOOST_AUTO_TEST_CASE(two_d_in_three_d_duplicates)
+{
+       auto A = content_factory("test/data/flat_red.png").front();
+       auto B = content_factory("test/data/flat_green.png").front();
+       auto film = new_test_film2("two_d_in_three_d_duplicates", { A, B });
+
+       film->set_three_d(true);
+       B->video->set_frame_type(VideoFrameType::THREE_D_LEFT_RIGHT);
+       B->set_position(film, DCPTime::from_seconds(10));
+       B->video->set_custom_size(dcp::Size(1998, 1080));
+
+       auto player = std::make_shared<Player>(film, film->playlist());
+
+       std::vector<uint8_t> red_line(1998 * 3);
+       for (int i = 0; i < 1998; ++i) {
+               red_line[i * 3] = 255;
+       };
+
+       std::vector<uint8_t> green_line(1998 * 3);
+       for (int i = 0; i < 1998; ++i) {
+               green_line[i * 3 + 1] = 255;
+       };
+
+       Eyes last_eyes = Eyes::RIGHT;
+       optional<DCPTime> last_time;
+       player->Video.connect([&last_eyes, &last_time, &red_line, &green_line](shared_ptr<PlayerVideo> video, dcpomatic::DCPTime time) {
+               BOOST_CHECK(last_eyes != video->eyes());
+               last_eyes = video->eyes();
+               if (video->eyes() == Eyes::LEFT) {
+                       BOOST_CHECK(!last_time || time == *last_time + DCPTime::from_frames(1, 24));
+               } else {
+                       BOOST_CHECK(time == *last_time);
+               }
+               last_time = time;
+
+               auto image = video->image([](AVPixelFormat) { return AV_PIX_FMT_RGB24; }, VideoRange::FULL, false);
+               auto const size = image->size();
+               for (int y = 0; y < size.height; ++y) {
+                       uint8_t* line = image->data()[0] + y * image->stride()[0];
+                       if (time < DCPTime::from_seconds(10)) {
+                               BOOST_REQUIRE_EQUAL(memcmp(line, red_line.data(), 1998 * 3), 0);
+                       } else {
+                               BOOST_REQUIRE_EQUAL(memcmp(line, green_line.data(), 1998 * 3), 0);
+                       }
+               }
+       });
+
+       BOOST_CHECK(film->length() == DCPTime::from_seconds(20));
+       while (!player->pass()) {}
+}
+
+
+BOOST_AUTO_TEST_CASE(three_d_in_two_d_chooses_left)
+{
+       auto left = content_factory("test/data/flat_red.png").front();
+       auto right = content_factory("test/data/flat_green.png").front();
+       auto mono = content_factory("test/data/flat_blue.png").front();
+
+       auto film = new_test_film2("three_d_in_two_d_chooses_left", { left, right, mono });
+
+       left->video->set_frame_type(VideoFrameType::THREE_D_LEFT);
+       left->set_position(film, dcpomatic::DCPTime());
+       right->video->set_frame_type(VideoFrameType::THREE_D_RIGHT);
+       right->set_position(film, dcpomatic::DCPTime());
+
+       mono->set_position(film, dcpomatic::DCPTime::from_seconds(10));
+
+       auto player = std::make_shared<Player>(film, film->playlist());
+
+       std::vector<uint8_t> red_line(1998 * 3);
+       for (int i = 0; i < 1998; ++i) {
+               red_line[i * 3] = 255;
+       };
+
+       std::vector<uint8_t> blue_line(1998 * 3);
+       for (int i = 0; i < 1998; ++i) {
+               blue_line[i * 3 + 2] = 255;
+       };
+
+       optional<DCPTime> last_time;
+       player->Video.connect([&last_time, &red_line, &blue_line](shared_ptr<PlayerVideo> video, dcpomatic::DCPTime time) {
+               BOOST_CHECK(video->eyes() == Eyes::BOTH);
+               BOOST_CHECK(!last_time || time == *last_time + DCPTime::from_frames(1, 24));
+               last_time = time;
+
+               std::cout << to_string(time) << "\n";
+               auto image = video->image([](AVPixelFormat) { return AV_PIX_FMT_RGB24; }, VideoRange::FULL, false);
+               auto const size = image->size();
+               for (int y = 0; y < size.height; ++y) {
+                       uint8_t* line = image->data()[0] + y * image->stride()[0];
+                       if (time < DCPTime::from_seconds(10)) {
+                               BOOST_REQUIRE_EQUAL(memcmp(line, red_line.data(), 1998 * 3), 0);
+                       } else {
+                               BOOST_REQUIRE_EQUAL(memcmp(line, blue_line.data(), 1998 * 3), 0);
+                       }
+               }
+       });
+
+       BOOST_CHECK(film->length() == DCPTime::from_seconds(20));
+       while (!player->pass()) {}
+}
+