X-Git-Url: https://git.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fjob_manager.cc;h=9fcd86f2d38b7c034a332740a74457f672bd1b9b;hp=26e2d28970a71c0f5a9d90b7582ed2738fd2fca0;hb=a6c4b4fa16d9c6597e362044b875f3d6df80753f;hpb=4df42c81390ed61aecbcc5bf0ad380937c26eaef diff --git a/src/lib/job_manager.cc b/src/lib/job_manager.cc index 26e2d2897..9fcd86f2d 100644 --- a/src/lib/job_manager.cc +++ b/src/lib/job_manager.cc @@ -1,5 +1,5 @@ /* - Copyright (C) 2012-2018 Carl Hetherington + Copyright (C) 2012-2021 Carl Hetherington This file is part of DCP-o-matic. @@ -18,114 +18,117 @@ */ + /** @file src/job_manager.cc * @brief A simple scheduler for jobs. */ -#include "job_manager.h" -#include "job.h" -#include "cross.h" + #include "analyse_audio_job.h" +#include "analyse_subtitles_job.h" +#include "cross.h" #include "film.h" +#include "job.h" +#include "job_manager.h" +#include "util.h" #include -#include -#include -using std::string; + +using std::dynamic_pointer_cast; +using std::function; using std::list; -using std::cout; -using boost::shared_ptr; -using boost::weak_ptr; -using boost::function; -using boost::dynamic_pointer_cast; -using boost::optional; +using std::make_shared; +using std::shared_ptr; +using std::string; +using std::weak_ptr; using boost::bind; +using boost::optional; + + +JobManager* JobManager::_instance = nullptr; -JobManager* JobManager::_instance = 0; JobManager::JobManager () - : _terminate (false) - , _paused (false) - , _scheduler (0) { } + void JobManager::start () { - _scheduler = new boost::thread (boost::bind (&JobManager::scheduler, this)); + _scheduler = boost::thread (boost::bind(&JobManager::scheduler, this)); #ifdef DCPOMATIC_LINUX - pthread_setname_np (_scheduler->native_handle(), "job-scheduler"); + pthread_setname_np (_scheduler.native_handle(), "job-scheduler"); #endif } + JobManager::~JobManager () { - BOOST_FOREACH (boost::signals2::connection& i, _connections) { + boost::this_thread::disable_interruption dis; + + for (auto& i: _connections) { i.disconnect (); } { boost::mutex::scoped_lock lm (_mutex); _terminate = true; - _empty_condition.notify_all (); + _schedule_condition.notify_all(); } - if (_scheduler) { - /* Ideally this would be a DCPOMATIC_ASSERT(_scheduler->joinable()) but we - can't throw exceptions from a destructor. - */ - if (_scheduler->joinable ()) { - _scheduler->join (); - } - } - - delete _scheduler; + try { + _scheduler.join(); + } catch (...) {} } + shared_ptr JobManager::add (shared_ptr j) { { boost::mutex::scoped_lock lm (_mutex); _jobs.push_back (j); - _empty_condition.notify_all (); + _schedule_condition.notify_all(); } - emit (boost::bind (boost::ref (JobAdded), weak_ptr (j))); + emit (boost::bind(boost::ref(JobAdded), weak_ptr(j))); return j; } + shared_ptr JobManager::add_after (shared_ptr after, shared_ptr j) { { boost::mutex::scoped_lock lm (_mutex); - list >::iterator i = find (_jobs.begin(), _jobs.end(), after); + auto i = find (_jobs.begin(), _jobs.end(), after); DCPOMATIC_ASSERT (i != _jobs.end()); _jobs.insert (i, j); - _empty_condition.notify_all (); + _schedule_condition.notify_all(); } - emit (boost::bind (boost::ref (JobAdded), weak_ptr (j))); + emit (boost::bind(boost::ref(JobAdded), weak_ptr(j))); return j; } -list > + +list> JobManager::get () const { boost::mutex::scoped_lock lm (_mutex); return _jobs; } + bool JobManager::work_to_do () const { boost::mutex::scoped_lock lm (_mutex); - list >::const_iterator i = _jobs.begin(); + auto i = _jobs.begin(); while (i != _jobs.end() && (*i)->finished()) { ++i; } @@ -133,12 +136,13 @@ JobManager::work_to_do () const return i != _jobs.end (); } + bool JobManager::errors () const { boost::mutex::scoped_lock lm (_mutex); - BOOST_FOREACH (shared_ptr i, _jobs) { - if (i->finished_in_error ()) { + for (auto i: _jobs) { + if (i->finished_in_error()) { return true; } } @@ -146,65 +150,63 @@ JobManager::errors () const return false; } + void JobManager::scheduler () { + start_of_thread ("JobManager"); + while (true) { boost::mutex::scoped_lock lm (_mutex); - while (true) { - bool have_new = false; - bool have_running = false; - BOOST_FOREACH (shared_ptr i, _jobs) { - if (i->running()) { - have_running = true; - } - if (i->is_new()) { - have_new = true; - } - } - - if ((!have_running && have_new) || _terminate) { - break; - } - - _empty_condition.wait (lm); - } - if (_terminate) { break; } - BOOST_FOREACH (shared_ptr i, _jobs) { - if (i->is_new()) { - _connections.push_back (i->FinishedImmediate.connect(bind(&JobManager::job_finished, this))); - i->start (); + bool have_running = false; + for (auto i: _jobs) { + if ((have_running || _paused) && i->running()) { + /* We already have a running job, or are totally paused, so this job should not be running */ + i->pause_by_priority(); + } else if (!have_running && !_paused && (i->is_new() || i->paused_by_priority())) { + /* We don't have a running job, and we should have one, so start/resume this */ + if (i->is_new()) { + _connections.push_back (i->FinishedImmediate.connect(bind(&JobManager::job_finished, this))); + i->start (); + } else { + i->resume (); + } emit (boost::bind (boost::ref (ActiveJobsChanged), _last_active_job, i->json_name())); _last_active_job = i->json_name (); - /* Only start one job at once */ - break; + have_running = true; + } else if (!have_running && i->running()) { + have_running = true; } } + + _schedule_condition.wait(lm); } } + void JobManager::job_finished () { { boost::mutex::scoped_lock lm (_mutex); - emit (boost::bind (boost::ref (ActiveJobsChanged), _last_active_job, optional())); + emit (boost::bind(boost::ref (ActiveJobsChanged), _last_active_job, optional())); _last_active_job = optional(); } - _empty_condition.notify_all (); + _schedule_condition.notify_all(); } + JobManager * JobManager::instance () { - if (_instance == 0) { + if (!_instance) { _instance = new JobManager (); _instance->start (); } @@ -212,28 +214,30 @@ JobManager::instance () return _instance; } + void JobManager::drop () { delete _instance; - _instance = 0; + _instance = nullptr; } + void JobManager::analyse_audio ( shared_ptr film, shared_ptr playlist, bool from_zero, boost::signals2::connection& connection, - function ready + function ready ) { { boost::mutex::scoped_lock lm (_mutex); - BOOST_FOREACH (shared_ptr i, _jobs) { - shared_ptr a = dynamic_pointer_cast (i); - if (a && a->path() == film->audio_analysis_path(playlist)) { + for (auto i: _jobs) { + auto a = dynamic_pointer_cast (i); + if (a && a->path() == film->audio_analysis_path(playlist) && !i->finished_cancelled()) { i->when_finished (connection, ready); return; } @@ -245,117 +249,100 @@ JobManager::analyse_audio ( { boost::mutex::scoped_lock lm (_mutex); - job.reset (new AnalyseAudioJob (film, playlist, from_zero)); + job = make_shared (film, playlist, from_zero); connection = job->Finished.connect (ready); _jobs.push_back (job); - _empty_condition.notify_all (); + _schedule_condition.notify_all (); } emit (boost::bind (boost::ref (JobAdded), weak_ptr (job))); } + void -JobManager::increase_priority (shared_ptr job) +JobManager::analyse_subtitles ( + shared_ptr film, + shared_ptr content, + boost::signals2::connection& connection, + function ready + ) { - bool changed = false; - { boost::mutex::scoped_lock lm (_mutex); - list >::iterator last = _jobs.end (); - for (list >::iterator i = _jobs.begin(); i != _jobs.end(); ++i) { - if (*i == job && last != _jobs.end()) { - swap (*i, *last); - changed = true; - break; + + for (auto i: _jobs) { + auto a = dynamic_pointer_cast (i); + if (a && a->path() == film->subtitle_analysis_path(content)) { + i->when_finished (connection, ready); + return; } - last = i; } } - if (changed) { - priority_changed (); + shared_ptr job; + + { + boost::mutex::scoped_lock lm (_mutex); + + job = make_shared(film, content); + connection = job->Finished.connect (ready); + _jobs.push_back (job); + _schedule_condition.notify_all (); } + + emit (boost::bind(boost::ref(JobAdded), weak_ptr(job))); } + void -JobManager::priority_changed () +JobManager::increase_priority (shared_ptr job) { { boost::mutex::scoped_lock lm (_mutex); - - bool first = true; - BOOST_FOREACH (shared_ptr i, _jobs) { - if (first) { - if (i->is_new ()) { - i->start (); - } else if (i->paused_by_priority ()) { - i->resume (); - } - first = false; - } else { - if (i->running ()) { - i->pause_by_priority (); - } - } + auto iter = std::find(_jobs.begin(), _jobs.end(), job); + if (iter == _jobs.begin() || iter == _jobs.end()) { + return; } + swap(*iter, *std::prev(iter)); } - emit (boost::bind (boost::ref (JobsReordered))); + _schedule_condition.notify_all(); + emit(boost::bind(boost::ref(JobsReordered))); } + void JobManager::decrease_priority (shared_ptr job) { - bool changed = false; - { boost::mutex::scoped_lock lm (_mutex); - for (list >::iterator i = _jobs.begin(); i != _jobs.end(); ++i) { - list >::iterator next = i; - ++next; - if (*i == job && next != _jobs.end()) { - swap (*i, *next); - changed = true; - break; - } + auto iter = std::find(_jobs.begin(), _jobs.end(), job); + if (iter == _jobs.end() || std::next(iter) == _jobs.end()) { + return; } + swap(*iter, *std::next(iter)); } - if (changed) { - priority_changed (); - } + _schedule_condition.notify_all(); + emit(boost::bind(boost::ref(JobsReordered))); } + +/** Pause all job processing */ void JobManager::pause () { boost::mutex::scoped_lock lm (_mutex); - - if (_paused) { - return; - } - - BOOST_FOREACH (shared_ptr i, _jobs) { - if (i->pause_by_user()) { - _paused_job = i; - } - } - _paused = true; + _schedule_condition.notify_all(); } + +/** Resume processing jobs after a previous pause() */ void JobManager::resume () { boost::mutex::scoped_lock lm (_mutex); - if (!_paused) { - return; - } - - if (_paused_job) { - _paused_job->resume (); - } - - _paused_job.reset (); _paused = false; + _schedule_condition.notify_all(); }