X-Git-Url: https://git.carlh.net/gitweb/?a=blobdiff_plain;f=src%2Flib%2Fwriter.cc;h=d6c3370b09cdfcb2d481f8d3aa737c0f311bf03b;hb=6e6ebf3122333b38333482bce64df3e6e61e64c4;hp=064c3f0ec08db18da764b54a1b218c8f6759c598;hpb=7a6b206783fd44735679f8c7ea542b42e1a4dbd6;p=dcpomatic.git diff --git a/src/lib/writer.cc b/src/lib/writer.cc index 064c3f0ec..d6c3370b0 100644 --- a/src/lib/writer.cc +++ b/src/lib/writer.cc @@ -106,6 +106,9 @@ void Writer::start () { _thread = new boost::thread (boost::bind (&Writer::thread, this)); +#ifdef DCPOMATIC_LINUX + pthread_setname_np (_thread->native_handle(), "writer"); +#endif } Writer::~Writer () @@ -231,36 +234,61 @@ Writer::fake_write (Frame frame, Eyes eyes) } /** Write some audio frames to the DCP. - * @param audio Audio data or 0 if there is no audio to be written here (i.e. it is referenced). + * @param audio Audio data. + * @param time Time of this data within the DCP. * This method is not thread safe. */ void -Writer::write (shared_ptr audio) +Writer::write (shared_ptr audio, DCPTime const time) { + DCPOMATIC_ASSERT (audio); + + int const afr = _film->audio_frame_rate(); + + DCPTime const end = time + DCPTime::from_frames(audio->frames(), afr); + /* The audio we get might span a reel boundary, and if so we have to write it in bits */ - int32_t offset = 0; - while (offset < audio->frames ()) { + DCPTime t = time; + while (t < end) { if (_audio_reel == _reels.end ()) { /* This audio is off the end of the last reel; ignore it */ return; } - int32_t const remaining = audio->frames() - offset; - int32_t const reel_space = _audio_reel->period().duration().frames_floor(_film->audio_frame_rate()) - _audio_reel->total_written_audio_frames(); - - if (remaining <= reel_space) { + if (end <= _audio_reel->period().to) { /* Easy case: we can write all the audio to this reel */ _audio_reel->write (audio); - offset += remaining; + t = end; } else { - /* Write the part we can */ - shared_ptr part (new AudioBuffers (audio->channels(), reel_space)); - part->copy_from (audio.get(), reel_space, offset, 0); - _audio_reel->write (part); + /* Split the audio into two and write the first part */ + DCPTime part_lengths[2] = { + _audio_reel->period().to - t, + end - _audio_reel->period().to + }; + + Frame part_frames[2] = { + part_lengths[0].frames_ceil(afr), + part_lengths[1].frames_ceil(afr) + }; + + if (part_frames[0]) { + shared_ptr part (new AudioBuffers (audio->channels(), part_frames[0])); + part->copy_from (audio.get(), part_frames[0], 0, 0); + _audio_reel->write (part); + } + + if (part_frames[1]) { + shared_ptr part (new AudioBuffers (audio->channels(), part_frames[1])); + part->copy_from (audio.get(), part_frames[1], part_frames[0], 0); + audio = part; + } else { + audio.reset (); + } + ++_audio_reel; - offset += reel_space; + t += part_lengths[0]; } } } @@ -554,7 +582,7 @@ Writer::write_cover_sheet () string text = Config::instance()->cover_sheet (); boost::algorithm::replace_all (text, "$CPL_NAME", _film->name()); boost::algorithm::replace_all (text, "$TYPE", _film->dcp_content_type()->pretty_name()); - boost::algorithm::replace_all (text, "$CONTAINER", _film->container()->nickname()); + boost::algorithm::replace_all (text, "$CONTAINER", _film->container()->container_nickname()); boost::algorithm::replace_all (text, "$AUDIO_LANGUAGE", _film->isdcf_metadata().audio_language); boost::algorithm::replace_all (text, "$SUBTITLE_LANGUAGE", _film->isdcf_metadata().subtitle_language); @@ -563,7 +591,9 @@ Writer::write_cover_sheet () boost::filesystem::recursive_directory_iterator i = boost::filesystem::recursive_directory_iterator(_film->dir(_film->dcp_name())); i != boost::filesystem::recursive_directory_iterator(); ++i) { - size += boost::filesystem::file_size (i->path ()); + if (boost::filesystem::is_regular_file (i->path ())) { + size += boost::filesystem::file_size (i->path ()); + } } if (size > (1000000000L)) { @@ -607,6 +637,11 @@ Writer::write_cover_sheet () bool Writer::can_fake_write (Frame frame) const { + if (_film->encrypted()) { + /* We need to re-write the frame because the asset ID is embedded in the HMAC... I think... */ + return false; + } + /* We have to do a proper write of the first frame so that we can set up the JPEG2000 parameters in the asset writer. */ @@ -674,16 +709,7 @@ operator== (QueueItem const & a, QueueItem const & b) void Writer::set_encoder_threads (int threads) { - /* I think the scaling factor here should be the ratio of the longest frame - encode time to the shortest; if the thread count is T, longest time is L - and the shortest time S we could encode L/S frames per thread whilst waiting - for the L frame to encode so we might have to store LT/S frames. - - However we don't want to use too much memory, so keep it a bit lower than we'd - perhaps like. A J2K frame is typically about 1Mb so 3 here will mean we could - use about 240Mb with 72 encoding threads. - */ - _maximum_frames_in_memory = lrint (threads * 3); + _maximum_frames_in_memory = lrint (threads * Config::instance()->frames_in_memory_multiplier()); } void