X-Git-Url: https://git.carlh.net/gitweb/?p=dcpomatic.git;a=blobdiff_plain;f=src%2Flib%2Fjob_manager.cc;h=9fcd86f2d38b7c034a332740a74457f672bd1b9b;hp=63db662d0f185aed9502c342d5d7dd794a75f68b;hb=a6c4b4fa16d9c6597e362044b875f3d6df80753f;hpb=05c37bfdb86be26497d5baa448a0cbda20e33bed diff --git a/src/lib/job_manager.cc b/src/lib/job_manager.cc index 63db662d0..9fcd86f2d 100644 --- a/src/lib/job_manager.cc +++ b/src/lib/job_manager.cc @@ -1,86 +1,134 @@ /* - Copyright (C) 2012 Carl Hetherington + Copyright (C) 2012-2021 Carl Hetherington - This program is free software; you can redistribute it and/or modify + This file is part of DCP-o-matic. + + DCP-o-matic is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. - This program is distributed in the hope that it will be useful, + DCP-o-matic is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + along with DCP-o-matic. If not, see . */ + /** @file src/job_manager.cc * @brief A simple scheduler for jobs. */ -#include -#include -#include "job_manager.h" -#include "job.h" + +#include "analyse_audio_job.h" +#include "analyse_subtitles_job.h" #include "cross.h" -#include "ui_signaller.h" +#include "film.h" +#include "job.h" +#include "job_manager.h" +#include "util.h" +#include -using std::string; + +using std::dynamic_pointer_cast; +using std::function; using std::list; -using std::cout; -using boost::shared_ptr; -using boost::weak_ptr; +using std::make_shared; +using std::shared_ptr; +using std::string; +using std::weak_ptr; +using boost::bind; +using boost::optional; + + +JobManager* JobManager::_instance = nullptr; -JobManager* JobManager::_instance = 0; JobManager::JobManager () - : _terminate (false) - , _last_active_jobs (false) - , _scheduler (new boost::thread (boost::bind (&JobManager::scheduler, this))) { - + } + +void +JobManager::start () +{ + _scheduler = boost::thread (boost::bind(&JobManager::scheduler, this)); +#ifdef DCPOMATIC_LINUX + pthread_setname_np (_scheduler.native_handle(), "job-scheduler"); +#endif +} + + JobManager::~JobManager () { + boost::this_thread::disable_interruption dis; + + for (auto& i: _connections) { + i.disconnect (); + } + { boost::mutex::scoped_lock lm (_mutex); _terminate = true; + _schedule_condition.notify_all(); } - if (_scheduler->joinable ()) { - _scheduler->join (); - } + try { + _scheduler.join(); + } catch (...) {} } + shared_ptr JobManager::add (shared_ptr j) { { boost::mutex::scoped_lock lm (_mutex); _jobs.push_back (j); + _schedule_condition.notify_all(); + } + + emit (boost::bind(boost::ref(JobAdded), weak_ptr(j))); + + return j; +} + + +shared_ptr +JobManager::add_after (shared_ptr after, shared_ptr j) +{ + { + boost::mutex::scoped_lock lm (_mutex); + auto i = find (_jobs.begin(), _jobs.end(), after); + DCPOMATIC_ASSERT (i != _jobs.end()); + _jobs.insert (i, j); + _schedule_condition.notify_all(); } - emit (boost::bind (boost::ref (JobAdded), weak_ptr (j))); - + emit (boost::bind(boost::ref(JobAdded), weak_ptr(j))); + return j; } -list > + +list> JobManager::get () const { boost::mutex::scoped_lock lm (_mutex); return _jobs; } + bool JobManager::work_to_do () const { boost::mutex::scoped_lock lm (_mutex); - list >::const_iterator i = _jobs.begin(); + auto i = _jobs.begin(); while (i != _jobs.end() && (*i)->finished()) { ++i; } @@ -88,74 +136,213 @@ JobManager::work_to_do () const return i != _jobs.end (); } + bool JobManager::errors () const { boost::mutex::scoped_lock lm (_mutex); - for (list >::const_iterator i = _jobs.begin(); i != _jobs.end(); ++i) { - if ((*i)->finished_in_error ()) { + for (auto i: _jobs) { + if (i->finished_in_error()) { return true; } } return false; -} +} + void JobManager::scheduler () { + start_of_thread ("JobManager"); + while (true) { - bool active_jobs = false; + boost::mutex::scoped_lock lm (_mutex); - { - boost::mutex::scoped_lock lm (_mutex); - if (_terminate) { - return; - } - - for (list >::iterator i = _jobs.begin(); i != _jobs.end(); ++i) { + if (_terminate) { + break; + } - if (!(*i)->finished ()) { - active_jobs = true; - } - - if ((*i)->running ()) { - /* Something is already happening */ - break; - } - - if ((*i)->is_new()) { - (*i)->start (); - - /* Only start one job at once */ - break; + bool have_running = false; + for (auto i: _jobs) { + if ((have_running || _paused) && i->running()) { + /* We already have a running job, or are totally paused, so this job should not be running */ + i->pause_by_priority(); + } else if (!have_running && !_paused && (i->is_new() || i->paused_by_priority())) { + /* We don't have a running job, and we should have one, so start/resume this */ + if (i->is_new()) { + _connections.push_back (i->FinishedImmediate.connect(bind(&JobManager::job_finished, this))); + i->start (); + } else { + i->resume (); } + emit (boost::bind (boost::ref (ActiveJobsChanged), _last_active_job, i->json_name())); + _last_active_job = i->json_name (); + have_running = true; + } else if (!have_running && i->running()) { + have_running = true; } } - if (active_jobs != _last_active_jobs) { - _last_active_jobs = active_jobs; - emit (boost::bind (boost::ref (ActiveJobsChanged), active_jobs)); - } + _schedule_condition.wait(lm); + } +} + - dcpomatic_sleep (1); +void +JobManager::job_finished () +{ + { + boost::mutex::scoped_lock lm (_mutex); + emit (boost::bind(boost::ref (ActiveJobsChanged), _last_active_job, optional())); + _last_active_job = optional(); } + + _schedule_condition.notify_all(); } + JobManager * JobManager::instance () { - if (_instance == 0) { + if (!_instance) { _instance = new JobManager (); + _instance->start (); } return _instance; } + void JobManager::drop () { delete _instance; - _instance = 0; + _instance = nullptr; +} + + +void +JobManager::analyse_audio ( + shared_ptr film, + shared_ptr playlist, + bool from_zero, + boost::signals2::connection& connection, + function ready + ) +{ + { + boost::mutex::scoped_lock lm (_mutex); + + for (auto i: _jobs) { + auto a = dynamic_pointer_cast (i); + if (a && a->path() == film->audio_analysis_path(playlist) && !i->finished_cancelled()) { + i->when_finished (connection, ready); + return; + } + } + } + + shared_ptr job; + + { + boost::mutex::scoped_lock lm (_mutex); + + job = make_shared (film, playlist, from_zero); + connection = job->Finished.connect (ready); + _jobs.push_back (job); + _schedule_condition.notify_all (); + } + + emit (boost::bind (boost::ref (JobAdded), weak_ptr (job))); +} + + +void +JobManager::analyse_subtitles ( + shared_ptr film, + shared_ptr content, + boost::signals2::connection& connection, + function ready + ) +{ + { + boost::mutex::scoped_lock lm (_mutex); + + for (auto i: _jobs) { + auto a = dynamic_pointer_cast (i); + if (a && a->path() == film->subtitle_analysis_path(content)) { + i->when_finished (connection, ready); + return; + } + } + } + + shared_ptr job; + + { + boost::mutex::scoped_lock lm (_mutex); + + job = make_shared(film, content); + connection = job->Finished.connect (ready); + _jobs.push_back (job); + _schedule_condition.notify_all (); + } + + emit (boost::bind(boost::ref(JobAdded), weak_ptr(job))); +} + + +void +JobManager::increase_priority (shared_ptr job) +{ + { + boost::mutex::scoped_lock lm (_mutex); + auto iter = std::find(_jobs.begin(), _jobs.end(), job); + if (iter == _jobs.begin() || iter == _jobs.end()) { + return; + } + swap(*iter, *std::prev(iter)); + } + + _schedule_condition.notify_all(); + emit(boost::bind(boost::ref(JobsReordered))); +} + + +void +JobManager::decrease_priority (shared_ptr job) +{ + { + boost::mutex::scoped_lock lm (_mutex); + auto iter = std::find(_jobs.begin(), _jobs.end(), job); + if (iter == _jobs.end() || std::next(iter) == _jobs.end()) { + return; + } + swap(*iter, *std::next(iter)); + } + + _schedule_condition.notify_all(); + emit(boost::bind(boost::ref(JobsReordered))); +} + + +/** Pause all job processing */ +void +JobManager::pause () +{ + boost::mutex::scoped_lock lm (_mutex); + _paused = true; + _schedule_condition.notify_all(); +} + + +/** Resume processing jobs after a previous pause() */ +void +JobManager::resume () +{ + boost::mutex::scoped_lock lm (_mutex); + _paused = false; + _schedule_condition.notify_all(); }