forked from pool/systemd
f565552c51
- Add fix-dir-noatime-tmpfiles.patch: do not modify directory atime, which was preventing removing empty directories (bnc#751253, rh#810257). - Add improve-restart-behaviour.patch: prevent deadlock during try-restart (bnc#743218). - Add journal-bugfixes.patch: don't crash when rotating journal (bnc#768953) and prevent memleak at rotation time too. - Add ulimit-support.patch: add support for system wide ulimit (bnc#744818). - Add change-terminal.patch: use vt102 instead of vt100 as terminal for non-vc tty. - Package various .wants directories, which were no longer packaged due to plymouth units being removed from systemd package. - Fix buildrequires for manpages build. OBS-URL: https://build.opensuse.org/request/show/127017 OBS-URL: https://build.opensuse.org/package/show/Base:System/systemd?expand=0&rev=284
7662 lines
291 KiB
Diff
7662 lines
291 KiB
Diff
From 4cbb803c2197b6be6be709a11911bde00f6853f6 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Wed, 28 Mar 2012 00:42:27 +0200
|
|
Subject: [PATCH 01/30] job: fix loss of ordering with restart jobs
|
|
|
|
Suppose that foo.service/start is a job waiting on other job bar.service/start
|
|
to finish. And then foo.service/restart is enqueued (not using
|
|
--ignore-dependencies).
|
|
|
|
Currently this makes foo.service start immediately, forgetting about the
|
|
ordering to bar.service.
|
|
|
|
The runnability check for JOB_RESTART jobs looks only at dependencies for
|
|
stopping. That's actually correct, because restart jobs should be treated the
|
|
same as stop jobs at first. The bug is that job_run_and_invalidate() does not
|
|
treat them exactly the same as stop jobs. unit_start() gets called without
|
|
checking for the runnability of the converted JOB_START job.
|
|
|
|
The fix is to simplify the switch in job_run_and_invalidate(). Handle
|
|
JOB_RESTART identically to JOB_STOP.
|
|
Also simplify the handling of JOB_TRY_RESTART - just convert it to JOB_RESTART
|
|
if the unit is active and let it fall through to the JOB_RESTART case.
|
|
Similarly for JOB_RELOAD_OR_START - have a fall through to JOB_START.
|
|
|
|
In job_finish_and_invalidate() it's not necessary to check for JOB_TRY_RESTART
|
|
with JOB_DONE, because JOB_TRY_RESTART jobs will have been converted to
|
|
JOB_RESTART already.
|
|
|
|
Speeding up the restart of services in "auto-restart" state still works as
|
|
before.
|
|
|
|
Improves: https://bugzilla.redhat.com/show_bug.cgi?id=753586
|
|
but it's still not perfect. With this fix the try-restart action will wait for
|
|
the restart to complete in the right order, but the optimal behaviour would be
|
|
to finish quickly (without disturbing the start job).
|
|
---
|
|
src/job.c | 64 ++++++++++++++++++++-----------------------------------------
|
|
1 files changed, 21 insertions(+), 43 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index e57286f..d43ce8e 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -387,14 +387,21 @@ int job_run_and_invalidate(Job *j) {
|
|
|
|
switch (j->type) {
|
|
|
|
+ case JOB_RELOAD_OR_START:
|
|
+ if (unit_active_state(j->unit) == UNIT_ACTIVE) {
|
|
+ j->type = JOB_RELOAD;
|
|
+ r = unit_reload(j->unit);
|
|
+ break;
|
|
+ }
|
|
+ j->type = JOB_START;
|
|
+ /* fall through */
|
|
+
|
|
case JOB_START:
|
|
r = unit_start(j->unit);
|
|
|
|
- /* If this unit cannot be started, then simply
|
|
- * wait */
|
|
+ /* If this unit cannot be started, then simply wait */
|
|
if (r == -EBADR)
|
|
r = 0;
|
|
-
|
|
break;
|
|
|
|
case JOB_VERIFY_ACTIVE: {
|
|
@@ -408,11 +415,19 @@ int job_run_and_invalidate(Job *j) {
|
|
break;
|
|
}
|
|
|
|
+ case JOB_TRY_RESTART:
|
|
+ if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(j->unit))) {
|
|
+ r = -ENOEXEC;
|
|
+ break;
|
|
+ }
|
|
+ j->type = JOB_RESTART;
|
|
+ /* fall through */
|
|
+
|
|
case JOB_STOP:
|
|
+ case JOB_RESTART:
|
|
r = unit_stop(j->unit);
|
|
|
|
- /* If this unit cannot stopped, then simply
|
|
- * wait. */
|
|
+ /* If this unit cannot stopped, then simply wait. */
|
|
if (r == -EBADR)
|
|
r = 0;
|
|
break;
|
|
@@ -421,43 +436,6 @@ int job_run_and_invalidate(Job *j) {
|
|
r = unit_reload(j->unit);
|
|
break;
|
|
|
|
- case JOB_RELOAD_OR_START:
|
|
- if (unit_active_state(j->unit) == UNIT_ACTIVE) {
|
|
- j->type = JOB_RELOAD;
|
|
- r = unit_reload(j->unit);
|
|
- } else {
|
|
- j->type = JOB_START;
|
|
- r = unit_start(j->unit);
|
|
-
|
|
- if (r == -EBADR)
|
|
- r = 0;
|
|
- }
|
|
- break;
|
|
-
|
|
- case JOB_RESTART: {
|
|
- UnitActiveState t = unit_active_state(j->unit);
|
|
- if (t == UNIT_INACTIVE || t == UNIT_FAILED || t == UNIT_ACTIVATING) {
|
|
- j->type = JOB_START;
|
|
- r = unit_start(j->unit);
|
|
- } else
|
|
- r = unit_stop(j->unit);
|
|
- break;
|
|
- }
|
|
-
|
|
- case JOB_TRY_RESTART: {
|
|
- UnitActiveState t = unit_active_state(j->unit);
|
|
- if (t == UNIT_INACTIVE || t == UNIT_FAILED || t == UNIT_DEACTIVATING)
|
|
- r = -ENOEXEC;
|
|
- else if (t == UNIT_ACTIVATING) {
|
|
- j->type = JOB_START;
|
|
- r = unit_start(j->unit);
|
|
- } else {
|
|
- j->type = JOB_RESTART;
|
|
- r = unit_stop(j->unit);
|
|
- }
|
|
- break;
|
|
- }
|
|
-
|
|
default:
|
|
assert_not_reached("Unknown job type");
|
|
}
|
|
@@ -536,7 +514,7 @@ int job_finish_and_invalidate(Job *j, JobResult result) {
|
|
job_add_to_dbus_queue(j);
|
|
|
|
/* Patch restart jobs so that they become normal start jobs */
|
|
- if (result == JOB_DONE && (j->type == JOB_RESTART || j->type == JOB_TRY_RESTART)) {
|
|
+ if (result == JOB_DONE && j->type == JOB_RESTART) {
|
|
|
|
log_debug("Converting job %s/%s -> %s/%s",
|
|
j->unit->id, job_type_to_string(j->type),
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 8166db595346260a3e36206249f7e82bc9ad7406 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Wed, 28 Mar 2012 01:26:04 +0200
|
|
Subject: [PATCH 02/30] job: add debug prints where job type gets changed
|
|
|
|
Conflicts:
|
|
|
|
src/job.c
|
|
---
|
|
src/job.c | 14 +++++++++-----
|
|
1 files changed, 9 insertions(+), 5 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index d43ce8e..6a4d8a7 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -355,6 +355,14 @@ bool job_is_runnable(Job *j) {
|
|
return true;
|
|
}
|
|
|
|
+static void job_change_type(Job *j, JobType newtype) {
|
|
+ log_debug("Converting job %s/%s -> %s/%s",
|
|
+ j->unit->id, job_type_to_string(j->type),
|
|
+ j->unit->id, job_type_to_string(newtype));
|
|
+
|
|
+ j->type = newtype;
|
|
+}
|
|
+
|
|
int job_run_and_invalidate(Job *j) {
|
|
int r;
|
|
uint32_t id;
|
|
@@ -516,12 +524,8 @@ int job_finish_and_invalidate(Job *j, JobResult result) {
|
|
/* Patch restart jobs so that they become normal start jobs */
|
|
if (result == JOB_DONE && j->type == JOB_RESTART) {
|
|
|
|
- log_debug("Converting job %s/%s -> %s/%s",
|
|
- j->unit->id, job_type_to_string(j->type),
|
|
- j->unit->id, job_type_to_string(JOB_START));
|
|
-
|
|
+ job_change_type(j, JOB_START);
|
|
j->state = JOB_WAITING;
|
|
- j->type = JOB_START;
|
|
|
|
job_add_to_run_queue(j);
|
|
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From b0055c992aefe7512ad0f53d7c6e202d4db2bf53 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Thu, 5 Apr 2012 08:34:05 +0200
|
|
Subject: [PATCH 03/30] job: use a lookup table for merging of job types
|
|
|
|
It is easier to see what job_type_merge() is doing when the merging
|
|
rules are written in the form of a table.
|
|
|
|
job_type_is_superset() contained redundant information. It can be
|
|
simplified to a simple rule: Type A is a superset of B iff merging A
|
|
with B gives A.
|
|
|
|
Two job types are conflicting iff they are not mergeable.
|
|
|
|
Make job_type_lookup_merge() the core function to decide the type
|
|
merging. All other job_type_*() are just short wrappers around it.
|
|
They can be inline.
|
|
|
|
test-job-type gives the same results as before.
|
|
btw, the systemd binary is smaller by almost 1 KB.
|
|
(cherry picked from commit 348e27fedfd4cdd2238ff31a46785a70b9dc6fc0)
|
|
---
|
|
src/job.c | 123 +++++++++++++++++-------------------------------------------
|
|
src/job.h | 29 ++++++++++++--
|
|
2 files changed, 60 insertions(+), 92 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index 6a4d8a7..da939fb 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -164,100 +164,47 @@ bool job_is_anchor(Job *j) {
|
|
return false;
|
|
}
|
|
|
|
-static bool types_match(JobType a, JobType b, JobType c, JobType d) {
|
|
- return
|
|
- (a == c && b == d) ||
|
|
- (a == d && b == c);
|
|
-}
|
|
-
|
|
-int job_type_merge(JobType *a, JobType b) {
|
|
- if (*a == b)
|
|
- return 0;
|
|
-
|
|
- /* Merging is associative! a merged with b merged with c is
|
|
- * the same as a merged with c merged with b. */
|
|
-
|
|
- /* Mergeability is transitive! if a can be merged with b and b
|
|
- * with c then a also with c */
|
|
-
|
|
- /* Also, if a merged with b cannot be merged with c, then
|
|
- * either a or b cannot be merged with c either */
|
|
-
|
|
- if (types_match(*a, b, JOB_START, JOB_VERIFY_ACTIVE))
|
|
- *a = JOB_START;
|
|
- else if (types_match(*a, b, JOB_START, JOB_RELOAD) ||
|
|
- types_match(*a, b, JOB_START, JOB_RELOAD_OR_START) ||
|
|
- types_match(*a, b, JOB_VERIFY_ACTIVE, JOB_RELOAD_OR_START) ||
|
|
- types_match(*a, b, JOB_RELOAD, JOB_RELOAD_OR_START))
|
|
- *a = JOB_RELOAD_OR_START;
|
|
- else if (types_match(*a, b, JOB_START, JOB_RESTART) ||
|
|
- types_match(*a, b, JOB_START, JOB_TRY_RESTART) ||
|
|
- types_match(*a, b, JOB_VERIFY_ACTIVE, JOB_RESTART) ||
|
|
- types_match(*a, b, JOB_RELOAD, JOB_RESTART) ||
|
|
- types_match(*a, b, JOB_RELOAD_OR_START, JOB_RESTART) ||
|
|
- types_match(*a, b, JOB_RELOAD_OR_START, JOB_TRY_RESTART) ||
|
|
- types_match(*a, b, JOB_RESTART, JOB_TRY_RESTART))
|
|
- *a = JOB_RESTART;
|
|
- else if (types_match(*a, b, JOB_VERIFY_ACTIVE, JOB_RELOAD))
|
|
- *a = JOB_RELOAD;
|
|
- else if (types_match(*a, b, JOB_VERIFY_ACTIVE, JOB_TRY_RESTART) ||
|
|
- types_match(*a, b, JOB_RELOAD, JOB_TRY_RESTART))
|
|
- *a = JOB_TRY_RESTART;
|
|
- else
|
|
- return -EEXIST;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-bool job_type_is_mergeable(JobType a, JobType b) {
|
|
- return job_type_merge(&a, b) >= 0;
|
|
-}
|
|
-
|
|
-bool job_type_is_superset(JobType a, JobType b) {
|
|
+/*
|
|
+ * Merging is commutative, so imagine the matrix as symmetric. We store only
|
|
+ * its lower triangle to avoid duplication. We don't store the main diagonal,
|
|
+ * because A merged with A is simply A.
|
|
+ *
|
|
+ * Merging is associative! A merged with B merged with C is the same as
|
|
+ * A merged with C merged with B.
|
|
+ *
|
|
+ * Mergeability is transitive! If A can be merged with B and B with C then
|
|
+ * A also with C.
|
|
+ *
|
|
+ * Also, if A merged with B cannot be merged with C, then either A or B cannot
|
|
+ * be merged with C either.
|
|
+ */
|
|
+static const JobType job_merging_table[] = {
|
|
+/* What \ With * JOB_START JOB_VERIFY_ACTIVE JOB_STOP JOB_RELOAD JOB_RELOAD_OR_START JOB_RESTART JOB_TRY_RESTART */
|
|
+/************************************************************************************************************************************/
|
|
+/*JOB_START */
|
|
+/*JOB_VERIFY_ACTIVE */ JOB_START,
|
|
+/*JOB_STOP */ -1, -1,
|
|
+/*JOB_RELOAD */ JOB_RELOAD_OR_START, JOB_RELOAD, -1,
|
|
+/*JOB_RELOAD_OR_START*/ JOB_RELOAD_OR_START, JOB_RELOAD_OR_START, -1, JOB_RELOAD_OR_START,
|
|
+/*JOB_RESTART */ JOB_RESTART, JOB_RESTART, -1, JOB_RESTART, JOB_RESTART,
|
|
+/*JOB_TRY_RESTART */ JOB_RESTART, JOB_TRY_RESTART, -1, JOB_TRY_RESTART, JOB_RESTART, JOB_RESTART,
|
|
+};
|
|
|
|
- /* Checks whether operation a is a "superset" of b in its
|
|
- * actions */
|
|
+JobType job_type_lookup_merge(JobType a, JobType b) {
|
|
+ assert_cc(ELEMENTSOF(job_merging_table) == _JOB_TYPE_MAX * (_JOB_TYPE_MAX - 1) / 2);
|
|
+ assert(a >= 0 && a < _JOB_TYPE_MAX);
|
|
+ assert(b >= 0 && b < _JOB_TYPE_MAX);
|
|
|
|
if (a == b)
|
|
- return true;
|
|
-
|
|
- switch (a) {
|
|
- case JOB_START:
|
|
- return b == JOB_VERIFY_ACTIVE;
|
|
-
|
|
- case JOB_RELOAD:
|
|
- return
|
|
- b == JOB_VERIFY_ACTIVE;
|
|
-
|
|
- case JOB_RELOAD_OR_START:
|
|
- return
|
|
- b == JOB_RELOAD ||
|
|
- b == JOB_START ||
|
|
- b == JOB_VERIFY_ACTIVE;
|
|
-
|
|
- case JOB_RESTART:
|
|
- return
|
|
- b == JOB_START ||
|
|
- b == JOB_VERIFY_ACTIVE ||
|
|
- b == JOB_RELOAD ||
|
|
- b == JOB_RELOAD_OR_START ||
|
|
- b == JOB_TRY_RESTART;
|
|
-
|
|
- case JOB_TRY_RESTART:
|
|
- return
|
|
- b == JOB_VERIFY_ACTIVE ||
|
|
- b == JOB_RELOAD;
|
|
- default:
|
|
- return false;
|
|
+ return a;
|
|
|
|
+ if (a < b) {
|
|
+ JobType tmp = a;
|
|
+ a = b;
|
|
+ b = tmp;
|
|
}
|
|
-}
|
|
-
|
|
-bool job_type_is_conflicting(JobType a, JobType b) {
|
|
- assert(a >= 0 && a < _JOB_TYPE_MAX);
|
|
- assert(b >= 0 && b < _JOB_TYPE_MAX);
|
|
|
|
- return (a == JOB_STOP) != (b == JOB_STOP);
|
|
+ return job_merging_table[(a - 1) * a / 2 + b];
|
|
}
|
|
|
|
bool job_type_is_redundant(JobType a, UnitActiveState b) {
|
|
diff --git a/src/job.h b/src/job.h
|
|
index 2121426..60a43e0 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -24,6 +24,7 @@
|
|
|
|
#include <stdbool.h>
|
|
#include <inttypes.h>
|
|
+#include <errno.h>
|
|
|
|
typedef struct Job Job;
|
|
typedef struct JobDependency JobDependency;
|
|
@@ -37,6 +38,7 @@ typedef enum JobResult JobResult;
|
|
#include "hashmap.h"
|
|
#include "list.h"
|
|
|
|
+/* Be careful when changing the job types! Adjust job_merging_table[] accordingly! */
|
|
enum JobType {
|
|
JOB_START, /* if a unit does not support being started, we'll just wait until it becomes active */
|
|
JOB_VERIFY_ACTIVE,
|
|
@@ -145,10 +147,29 @@ bool job_is_anchor(Job *j);
|
|
|
|
int job_merge(Job *j, Job *other);
|
|
|
|
-int job_type_merge(JobType *a, JobType b);
|
|
-bool job_type_is_mergeable(JobType a, JobType b);
|
|
-bool job_type_is_superset(JobType a, JobType b);
|
|
-bool job_type_is_conflicting(JobType a, JobType b);
|
|
+JobType job_type_lookup_merge(JobType a, JobType b);
|
|
+
|
|
+static inline int job_type_merge(JobType *a, JobType b) {
|
|
+ JobType t = job_type_lookup_merge(*a, b);
|
|
+ if (t < 0)
|
|
+ return -EEXIST;
|
|
+ *a = t;
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static inline bool job_type_is_mergeable(JobType a, JobType b) {
|
|
+ return job_type_lookup_merge(a, b) >= 0;
|
|
+}
|
|
+
|
|
+static inline bool job_type_is_conflicting(JobType a, JobType b) {
|
|
+ return !job_type_is_mergeable(a, b);
|
|
+}
|
|
+
|
|
+static inline bool job_type_is_superset(JobType a, JobType b) {
|
|
+ /* Checks whether operation a is a "superset" of b in its actions */
|
|
+ return a == job_type_lookup_merge(a, b);
|
|
+}
|
|
+
|
|
bool job_type_is_redundant(JobType a, UnitActiveState b);
|
|
|
|
bool job_is_runnable(Job *j);
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 5503c3419fbcceebb6f8e94c291457adb260537b Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Thu, 19 Apr 2012 23:20:34 +0200
|
|
Subject: [PATCH 04/30] job: allow job_free() only on already unlinked jobs
|
|
|
|
job_free() is IMO too helpful when it unlinks the job from the transaction.
|
|
The callers should ensure the job is already unlinked before freeing.
|
|
The added assertions check if anyone gets it wrong.
|
|
(cherry picked from commit 02a3bcc6b4372ca50c0a62b193f9a75b988ffa69)
|
|
---
|
|
src/job.c | 6 ++++--
|
|
src/manager.c | 11 ++++++++---
|
|
src/manager.h | 2 --
|
|
3 files changed, 12 insertions(+), 7 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index da939fb..8e4d93e 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -71,8 +71,10 @@ void job_free(Job *j) {
|
|
j->installed = false;
|
|
}
|
|
|
|
- /* Detach from next 'smaller' objects */
|
|
- manager_transaction_unlink_job(j->manager, j, true);
|
|
+ assert(!j->transaction_prev);
|
|
+ assert(!j->transaction_next);
|
|
+ assert(!j->subject_list);
|
|
+ assert(!j->object_list);
|
|
|
|
if (j->in_run_queue)
|
|
LIST_REMOVE(Job, run_queue, j->manager->run_queue, j);
|
|
diff --git a/src/manager.c b/src/manager.c
|
|
index 74bd740..c77b5d2 100644
|
|
--- a/src/manager.c
|
|
+++ b/src/manager.c
|
|
@@ -637,13 +637,15 @@ int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
|
|
return r;
|
|
}
|
|
|
|
+static void transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies);
|
|
+
|
|
static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
|
|
assert(m);
|
|
assert(j);
|
|
|
|
/* Deletes one job from the transaction */
|
|
|
|
- manager_transaction_unlink_job(m, j, delete_dependencies);
|
|
+ transaction_unlink_job(m, j, delete_dependencies);
|
|
|
|
if (!j->installed)
|
|
job_free(j);
|
|
@@ -685,8 +687,10 @@ static void transaction_abort(Manager *m) {
|
|
while ((j = hashmap_first(m->transaction_jobs)))
|
|
if (j->installed)
|
|
transaction_delete_job(m, j, true);
|
|
- else
|
|
+ else {
|
|
+ transaction_unlink_job(m, j, true);
|
|
job_free(j);
|
|
+ }
|
|
|
|
assert(hashmap_isempty(m->transaction_jobs));
|
|
|
|
@@ -1415,6 +1419,7 @@ static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool o
|
|
LIST_PREPEND(Job, transaction, f, j);
|
|
|
|
if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
|
|
+ LIST_REMOVE(Job, transaction, f, j);
|
|
job_free(j);
|
|
return NULL;
|
|
}
|
|
@@ -1427,7 +1432,7 @@ static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool o
|
|
return j;
|
|
}
|
|
|
|
-void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
|
|
+static void transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
|
|
assert(m);
|
|
assert(j);
|
|
|
|
diff --git a/src/manager.h b/src/manager.h
|
|
index a9d08f0..64d8d0e 100644
|
|
--- a/src/manager.h
|
|
+++ b/src/manager.h
|
|
@@ -254,8 +254,6 @@ int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode
|
|
void manager_dump_units(Manager *s, FILE *f, const char *prefix);
|
|
void manager_dump_jobs(Manager *s, FILE *f, const char *prefix);
|
|
|
|
-void manager_transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies);
|
|
-
|
|
void manager_clear_jobs(Manager *m);
|
|
|
|
unsigned manager_dispatch_load_queue(Manager *m);
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From c2c46edd09f2d0eefc127d22fd38312bf2bed01a Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Thu, 19 Apr 2012 23:23:04 +0200
|
|
Subject: [PATCH 05/30] manager: simplify transaction_abort()
|
|
|
|
This is equivalent.
|
|
(cherry picked from commit 121b3b318042b7fd67ac96601971c1c2f9b77be5)
|
|
---
|
|
src/manager.c | 7 +------
|
|
1 files changed, 1 insertions(+), 6 deletions(-)
|
|
|
|
diff --git a/src/manager.c b/src/manager.c
|
|
index c77b5d2..cac1fe8 100644
|
|
--- a/src/manager.c
|
|
+++ b/src/manager.c
|
|
@@ -685,12 +685,7 @@ static void transaction_abort(Manager *m) {
|
|
assert(m);
|
|
|
|
while ((j = hashmap_first(m->transaction_jobs)))
|
|
- if (j->installed)
|
|
- transaction_delete_job(m, j, true);
|
|
- else {
|
|
- transaction_unlink_job(m, j, true);
|
|
- job_free(j);
|
|
- }
|
|
+ transaction_delete_job(m, j, true);
|
|
|
|
assert(hashmap_isempty(m->transaction_jobs));
|
|
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 4eb87c1148bd48b5d121297f72aa47eb39482ad3 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 10:21:37 +0200
|
|
Subject: [PATCH 06/30] job: job_uninstall()
|
|
|
|
Split the uninstallation of the job from job_free() into a separate function.
|
|
Adjust the callers.
|
|
|
|
job_free() now only works on unlinked and uninstalled jobs. This enforces clear
|
|
thinking about job lifetimes.
|
|
(cherry picked from commit 97e7d748d1bf26790fc3b2607885f4ac8c4ecf3a)
|
|
---
|
|
src/job.c | 25 ++++++++++++++-----------
|
|
src/job.h | 1 +
|
|
src/manager.c | 8 ++++++--
|
|
src/unit.c | 7 +++++--
|
|
4 files changed, 26 insertions(+), 15 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index 8e4d93e..35e358d 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -55,22 +55,24 @@ Job* job_new(Manager *m, JobType type, Unit *unit) {
|
|
return j;
|
|
}
|
|
|
|
-void job_free(Job *j) {
|
|
- assert(j);
|
|
-
|
|
+void job_uninstall(Job *j) {
|
|
+ assert(j->installed);
|
|
/* Detach from next 'bigger' objects */
|
|
- if (j->installed) {
|
|
- bus_job_send_removed_signal(j);
|
|
|
|
- if (j->unit->job == j) {
|
|
- j->unit->job = NULL;
|
|
- unit_add_to_gc_queue(j->unit);
|
|
- }
|
|
+ bus_job_send_removed_signal(j);
|
|
|
|
- hashmap_remove(j->manager->jobs, UINT32_TO_PTR(j->id));
|
|
- j->installed = false;
|
|
+ if (j->unit->job == j) {
|
|
+ j->unit->job = NULL;
|
|
+ unit_add_to_gc_queue(j->unit);
|
|
}
|
|
|
|
+ hashmap_remove(j->manager->jobs, UINT32_TO_PTR(j->id));
|
|
+ j->installed = false;
|
|
+}
|
|
+
|
|
+void job_free(Job *j) {
|
|
+ assert(j);
|
|
+ assert(!j->installed);
|
|
assert(!j->transaction_prev);
|
|
assert(!j->transaction_next);
|
|
assert(!j->subject_list);
|
|
@@ -491,6 +493,7 @@ int job_finish_and_invalidate(Job *j, JobResult result) {
|
|
|
|
u = j->unit;
|
|
t = j->type;
|
|
+ job_uninstall(j);
|
|
job_free(j);
|
|
|
|
job_print_status_message(u, t, result);
|
|
diff --git a/src/job.h b/src/job.h
|
|
index 60a43e0..ed375fa 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -137,6 +137,7 @@ struct Job {
|
|
};
|
|
|
|
Job* job_new(Manager *m, JobType type, Unit *unit);
|
|
+void job_uninstall(Job *j);
|
|
void job_free(Job *job);
|
|
void job_dump(Job *j, FILE*f, const char *prefix);
|
|
|
|
diff --git a/src/manager.c b/src/manager.c
|
|
index cac1fe8..f8e5d64 100644
|
|
--- a/src/manager.c
|
|
+++ b/src/manager.c
|
|
@@ -1249,13 +1249,17 @@ static int transaction_apply(Manager *m, JobMode mode) {
|
|
}
|
|
|
|
while ((j = hashmap_steal_first(m->transaction_jobs))) {
|
|
+ Job *uj;
|
|
if (j->installed) {
|
|
/* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
|
|
continue;
|
|
}
|
|
|
|
- if (j->unit->job)
|
|
- job_free(j->unit->job);
|
|
+ uj = j->unit->job;
|
|
+ if (uj) {
|
|
+ job_uninstall(uj);
|
|
+ job_free(uj);
|
|
+ }
|
|
|
|
j->unit->job = j;
|
|
j->installed = true;
|
|
diff --git a/src/unit.c b/src/unit.c
|
|
index 9e33701..1949995 100644
|
|
--- a/src/unit.c
|
|
+++ b/src/unit.c
|
|
@@ -352,8 +352,11 @@ void unit_free(Unit *u) {
|
|
SET_FOREACH(t, u->names, i)
|
|
hashmap_remove_value(u->manager->units, t, u);
|
|
|
|
- if (u->job)
|
|
- job_free(u->job);
|
|
+ if (u->job) {
|
|
+ Job *j = u->job;
|
|
+ job_uninstall(j);
|
|
+ job_free(j);
|
|
+ }
|
|
|
|
for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
|
|
bidi_set_free(u, u->dependencies[d]);
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From b95d521e3816800bf43d5e11cef58b69b360233d Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 10:22:07 +0200
|
|
Subject: [PATCH 07/30] manager: Transaction as an object
|
|
|
|
This makes it obvious that transactions are short-lived. They are created in
|
|
manager_add_job() and destroyed after the application of jobs.
|
|
It also prepares for a split of the transaction code to a new source.
|
|
(cherry picked from commit 7527cb527598aaabf0ed9b38a352edb28536392a)
|
|
---
|
|
src/job.c | 8 +-
|
|
src/job.h | 4 +-
|
|
src/manager.c | 390 +++++++++++++++++++++++++++++++--------------------------
|
|
src/manager.h | 11 +-
|
|
4 files changed, 225 insertions(+), 188 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index 35e358d..6d48748 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -97,7 +97,7 @@ void job_free(Job *j) {
|
|
free(j);
|
|
}
|
|
|
|
-JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts) {
|
|
+JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts, Transaction *tr) {
|
|
JobDependency *l;
|
|
|
|
assert(object);
|
|
@@ -118,20 +118,20 @@ JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool
|
|
if (subject)
|
|
LIST_PREPEND(JobDependency, subject, subject->subject_list, l);
|
|
else
|
|
- LIST_PREPEND(JobDependency, subject, object->manager->transaction_anchor, l);
|
|
+ LIST_PREPEND(JobDependency, subject, tr->anchor, l);
|
|
|
|
LIST_PREPEND(JobDependency, object, object->object_list, l);
|
|
|
|
return l;
|
|
}
|
|
|
|
-void job_dependency_free(JobDependency *l) {
|
|
+void job_dependency_free(JobDependency *l, Transaction *tr) {
|
|
assert(l);
|
|
|
|
if (l->subject)
|
|
LIST_REMOVE(JobDependency, subject, l->subject->subject_list, l);
|
|
else
|
|
- LIST_REMOVE(JobDependency, subject, l->object->manager->transaction_anchor, l);
|
|
+ LIST_REMOVE(JobDependency, subject, tr->anchor, l);
|
|
|
|
LIST_REMOVE(JobDependency, object, l->object->object_list, l);
|
|
|
|
diff --git a/src/job.h b/src/job.h
|
|
index ed375fa..18ba64f 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -141,8 +141,8 @@ void job_uninstall(Job *j);
|
|
void job_free(Job *job);
|
|
void job_dump(Job *j, FILE*f, const char *prefix);
|
|
|
|
-JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts);
|
|
-void job_dependency_free(JobDependency *l);
|
|
+JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts, Transaction *tr);
|
|
+void job_dependency_free(JobDependency *l, Transaction *tr);
|
|
|
|
bool job_is_anchor(Job *j);
|
|
|
|
diff --git a/src/manager.c b/src/manager.c
|
|
index f8e5d64..ff4249d 100644
|
|
--- a/src/manager.c
|
|
+++ b/src/manager.c
|
|
@@ -259,9 +259,6 @@ int manager_new(ManagerRunningAs running_as, Manager **_m) {
|
|
if (!(m->jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
|
|
goto fail;
|
|
|
|
- if (!(m->transaction_jobs = hashmap_new(trivial_hash_func, trivial_compare_func)))
|
|
- goto fail;
|
|
-
|
|
if (!(m->watch_pids = hashmap_new(trivial_hash_func, trivial_compare_func)))
|
|
goto fail;
|
|
|
|
@@ -429,14 +426,10 @@ static unsigned manager_dispatch_gc_queue(Manager *m) {
|
|
}
|
|
|
|
static void manager_clear_jobs_and_units(Manager *m) {
|
|
- Job *j;
|
|
Unit *u;
|
|
|
|
assert(m);
|
|
|
|
- while ((j = hashmap_first(m->transaction_jobs)))
|
|
- job_free(j);
|
|
-
|
|
while ((u = hashmap_first(m->units)))
|
|
unit_free(u);
|
|
|
|
@@ -449,7 +442,6 @@ static void manager_clear_jobs_and_units(Manager *m) {
|
|
assert(!m->cleanup_queue);
|
|
assert(!m->gc_queue);
|
|
|
|
- assert(hashmap_isempty(m->transaction_jobs));
|
|
assert(hashmap_isempty(m->jobs));
|
|
assert(hashmap_isempty(m->units));
|
|
}
|
|
@@ -475,7 +467,6 @@ void manager_free(Manager *m) {
|
|
|
|
hashmap_free(m->units);
|
|
hashmap_free(m->jobs);
|
|
- hashmap_free(m->transaction_jobs);
|
|
hashmap_free(m->watch_pids);
|
|
hashmap_free(m->watch_bus);
|
|
|
|
@@ -637,65 +628,47 @@ int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
|
|
return r;
|
|
}
|
|
|
|
-static void transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies);
|
|
+static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies);
|
|
|
|
-static void transaction_delete_job(Manager *m, Job *j, bool delete_dependencies) {
|
|
- assert(m);
|
|
+static void transaction_delete_job(Transaction *tr, Job *j, bool delete_dependencies) {
|
|
+ assert(tr);
|
|
assert(j);
|
|
|
|
/* Deletes one job from the transaction */
|
|
|
|
- transaction_unlink_job(m, j, delete_dependencies);
|
|
+ transaction_unlink_job(tr, j, delete_dependencies);
|
|
|
|
if (!j->installed)
|
|
job_free(j);
|
|
}
|
|
|
|
-static void transaction_delete_unit(Manager *m, Unit *u) {
|
|
+static void transaction_delete_unit(Transaction *tr, Unit *u) {
|
|
Job *j;
|
|
|
|
/* Deletes all jobs associated with a certain unit from the
|
|
* transaction */
|
|
|
|
- while ((j = hashmap_get(m->transaction_jobs, u)))
|
|
- transaction_delete_job(m, j, true);
|
|
-}
|
|
-
|
|
-static void transaction_clean_dependencies(Manager *m) {
|
|
- Iterator i;
|
|
- Job *j;
|
|
-
|
|
- assert(m);
|
|
-
|
|
- /* Drops all dependencies of all installed jobs */
|
|
-
|
|
- HASHMAP_FOREACH(j, m->jobs, i) {
|
|
- while (j->subject_list)
|
|
- job_dependency_free(j->subject_list);
|
|
- while (j->object_list)
|
|
- job_dependency_free(j->object_list);
|
|
- }
|
|
-
|
|
- assert(!m->transaction_anchor);
|
|
+ while ((j = hashmap_get(tr->jobs, u)))
|
|
+ transaction_delete_job(tr, j, true);
|
|
}
|
|
|
|
-static void transaction_abort(Manager *m) {
|
|
+static void transaction_abort(Transaction *tr) {
|
|
Job *j;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
|
|
- while ((j = hashmap_first(m->transaction_jobs)))
|
|
- transaction_delete_job(m, j, true);
|
|
+ while ((j = hashmap_first(tr->jobs)))
|
|
+ transaction_delete_job(tr, j, true);
|
|
|
|
- assert(hashmap_isempty(m->transaction_jobs));
|
|
+ assert(hashmap_isempty(tr->jobs));
|
|
|
|
- transaction_clean_dependencies(m);
|
|
+ assert(!tr->anchor);
|
|
}
|
|
|
|
-static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsigned generation) {
|
|
+static void transaction_find_jobs_that_matter_to_anchor(Transaction *tr, Job *j, unsigned generation) {
|
|
JobDependency *l;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
|
|
/* A recursive sweep through the graph that marks all units
|
|
* that matter to the anchor job, i.e. are directly or
|
|
@@ -705,7 +678,7 @@ static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsi
|
|
if (j)
|
|
l = j->subject_list;
|
|
else
|
|
- l = m->transaction_anchor;
|
|
+ l = tr->anchor;
|
|
|
|
LIST_FOREACH(subject, l, l) {
|
|
|
|
@@ -720,11 +693,11 @@ static void transaction_find_jobs_that_matter_to_anchor(Manager *m, Job *j, unsi
|
|
l->object->matters_to_anchor = true;
|
|
l->object->generation = generation;
|
|
|
|
- transaction_find_jobs_that_matter_to_anchor(m, l->object, generation);
|
|
+ transaction_find_jobs_that_matter_to_anchor(tr, l->object, generation);
|
|
}
|
|
}
|
|
|
|
-static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, JobType t) {
|
|
+static void transaction_merge_and_delete_job(Transaction *tr, Job *j, Job *other, JobType t) {
|
|
JobDependency *l, *last;
|
|
|
|
assert(j);
|
|
@@ -775,7 +748,7 @@ static void transaction_merge_and_delete_job(Manager *m, Job *j, Job *other, Job
|
|
/* Kill the other job */
|
|
other->subject_list = NULL;
|
|
other->object_list = NULL;
|
|
- transaction_delete_job(m, other, true);
|
|
+ transaction_delete_job(tr, other, true);
|
|
}
|
|
static bool job_is_conflicted_by(Job *j) {
|
|
JobDependency *l;
|
|
@@ -792,7 +765,7 @@ static bool job_is_conflicted_by(Job *j) {
|
|
return false;
|
|
}
|
|
|
|
-static int delete_one_unmergeable_job(Manager *m, Job *j) {
|
|
+static int delete_one_unmergeable_job(Transaction *tr, Job *j) {
|
|
Job *k;
|
|
|
|
assert(j);
|
|
@@ -853,23 +826,23 @@ static int delete_one_unmergeable_job(Manager *m, Job *j) {
|
|
|
|
/* Ok, we can drop one, so let's do so. */
|
|
log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->id, job_type_to_string(d->type));
|
|
- transaction_delete_job(m, d, true);
|
|
+ transaction_delete_job(tr, d, true);
|
|
return 0;
|
|
}
|
|
|
|
return -EINVAL;
|
|
}
|
|
|
|
-static int transaction_merge_jobs(Manager *m, DBusError *e) {
|
|
+static int transaction_merge_jobs(Transaction *tr, DBusError *e) {
|
|
Job *j;
|
|
Iterator i;
|
|
int r;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
|
|
/* First step, check whether any of the jobs for one specific
|
|
* task conflict. If so, try to drop one of them. */
|
|
- HASHMAP_FOREACH(j, m->transaction_jobs, i) {
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
JobType t;
|
|
Job *k;
|
|
|
|
@@ -882,7 +855,8 @@ static int transaction_merge_jobs(Manager *m, DBusError *e) {
|
|
* action. Let's see if we can get rid of one
|
|
* of them */
|
|
|
|
- if ((r = delete_one_unmergeable_job(m, j)) >= 0)
|
|
+ r = delete_one_unmergeable_job(tr, j);
|
|
+ if (r >= 0)
|
|
/* Ok, we managed to drop one, now
|
|
* let's ask our callers to call us
|
|
* again after garbage collecting */
|
|
@@ -896,7 +870,7 @@ static int transaction_merge_jobs(Manager *m, DBusError *e) {
|
|
}
|
|
|
|
/* Second step, merge the jobs. */
|
|
- HASHMAP_FOREACH(j, m->transaction_jobs, i) {
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
JobType t = j->type;
|
|
Job *k;
|
|
|
|
@@ -910,14 +884,14 @@ static int transaction_merge_jobs(Manager *m, DBusError *e) {
|
|
|
|
while ((k = j->transaction_next)) {
|
|
if (j->installed) {
|
|
- transaction_merge_and_delete_job(m, k, j, t);
|
|
+ transaction_merge_and_delete_job(tr, k, j, t);
|
|
j = k;
|
|
} else
|
|
- transaction_merge_and_delete_job(m, j, k, t);
|
|
+ transaction_merge_and_delete_job(tr, j, k, t);
|
|
}
|
|
|
|
if (j->unit->job && !j->installed)
|
|
- transaction_merge_and_delete_job(m, j, j->unit->job, t);
|
|
+ transaction_merge_and_delete_job(tr, j, j->unit->job, t);
|
|
|
|
assert(!j->transaction_next);
|
|
assert(!j->transaction_prev);
|
|
@@ -926,10 +900,10 @@ static int transaction_merge_jobs(Manager *m, DBusError *e) {
|
|
return 0;
|
|
}
|
|
|
|
-static void transaction_drop_redundant(Manager *m) {
|
|
+static void transaction_drop_redundant(Transaction *tr) {
|
|
bool again;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
|
|
/* Goes through the transaction and removes all jobs that are
|
|
* a noop */
|
|
@@ -940,7 +914,7 @@ static void transaction_drop_redundant(Manager *m) {
|
|
|
|
again = false;
|
|
|
|
- HASHMAP_FOREACH(j, m->transaction_jobs, i) {
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
bool changes_something = false;
|
|
Job *k;
|
|
|
|
@@ -959,7 +933,7 @@ static void transaction_drop_redundant(Manager *m) {
|
|
continue;
|
|
|
|
/* log_debug("Found redundant job %s/%s, dropping.", j->unit->id, job_type_to_string(j->type)); */
|
|
- transaction_delete_job(m, j, false);
|
|
+ transaction_delete_job(tr, j, false);
|
|
again = true;
|
|
break;
|
|
}
|
|
@@ -981,12 +955,12 @@ static bool unit_matters_to_anchor(Unit *u, Job *j) {
|
|
return false;
|
|
}
|
|
|
|
-static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned generation, DBusError *e) {
|
|
+static int transaction_verify_order_one(Transaction *tr, Job *j, Job *from, unsigned generation, DBusError *e) {
|
|
Iterator i;
|
|
Unit *u;
|
|
int r;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
assert(j);
|
|
assert(!j->transaction_prev);
|
|
|
|
@@ -1033,7 +1007,7 @@ static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned
|
|
|
|
if (delete) {
|
|
log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->id, job_type_to_string(delete->type));
|
|
- transaction_delete_unit(m, delete->unit);
|
|
+ transaction_delete_unit(tr, delete->unit);
|
|
return -EAGAIN;
|
|
}
|
|
|
|
@@ -1056,15 +1030,18 @@ static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned
|
|
Job *o;
|
|
|
|
/* Is there a job for this unit? */
|
|
- if (!(o = hashmap_get(m->transaction_jobs, u)))
|
|
-
|
|
+ o = hashmap_get(tr->jobs, u);
|
|
+ if (!o) {
|
|
/* Ok, there is no job for this in the
|
|
* transaction, but maybe there is already one
|
|
* running? */
|
|
- if (!(o = u->job))
|
|
+ o = u->job;
|
|
+ if (!o)
|
|
continue;
|
|
+ }
|
|
|
|
- if ((r = transaction_verify_order_one(m, o, j, generation, e)) < 0)
|
|
+ r = transaction_verify_order_one(tr, o, j, generation, e);
|
|
+ if (r < 0)
|
|
return r;
|
|
}
|
|
|
|
@@ -1075,13 +1052,13 @@ static int transaction_verify_order_one(Manager *m, Job *j, Job *from, unsigned
|
|
return 0;
|
|
}
|
|
|
|
-static int transaction_verify_order(Manager *m, unsigned *generation, DBusError *e) {
|
|
+static int transaction_verify_order(Transaction *tr, unsigned *generation, DBusError *e) {
|
|
Job *j;
|
|
int r;
|
|
Iterator i;
|
|
unsigned g;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
assert(generation);
|
|
|
|
/* Check if the ordering graph is cyclic. If it is, try to fix
|
|
@@ -1089,17 +1066,17 @@ static int transaction_verify_order(Manager *m, unsigned *generation, DBusError
|
|
|
|
g = (*generation)++;
|
|
|
|
- HASHMAP_FOREACH(j, m->transaction_jobs, i)
|
|
- if ((r = transaction_verify_order_one(m, j, NULL, g, e)) < 0)
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i)
|
|
+ if ((r = transaction_verify_order_one(tr, j, NULL, g, e)) < 0)
|
|
return r;
|
|
|
|
return 0;
|
|
}
|
|
|
|
-static void transaction_collect_garbage(Manager *m) {
|
|
+static void transaction_collect_garbage(Transaction *tr) {
|
|
bool again;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
|
|
/* Drop jobs that are not required by any other job */
|
|
|
|
@@ -1109,7 +1086,7 @@ static void transaction_collect_garbage(Manager *m) {
|
|
|
|
again = false;
|
|
|
|
- HASHMAP_FOREACH(j, m->transaction_jobs, i) {
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
if (j->object_list) {
|
|
/* log_debug("Keeping job %s/%s because of %s/%s", */
|
|
/* j->unit->id, job_type_to_string(j->type), */
|
|
@@ -1119,7 +1096,7 @@ static void transaction_collect_garbage(Manager *m) {
|
|
}
|
|
|
|
/* log_debug("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); */
|
|
- transaction_delete_job(m, j, true);
|
|
+ transaction_delete_job(tr, j, true);
|
|
again = true;
|
|
break;
|
|
}
|
|
@@ -1127,16 +1104,16 @@ static void transaction_collect_garbage(Manager *m) {
|
|
} while (again);
|
|
}
|
|
|
|
-static int transaction_is_destructive(Manager *m, DBusError *e) {
|
|
+static int transaction_is_destructive(Transaction *tr, DBusError *e) {
|
|
Iterator i;
|
|
Job *j;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
|
|
/* Checks whether applying this transaction means that
|
|
* existing jobs would be replaced */
|
|
|
|
- HASHMAP_FOREACH(j, m->transaction_jobs, i) {
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
|
|
/* Assume merged */
|
|
assert(!j->transaction_prev);
|
|
@@ -1154,9 +1131,9 @@ static int transaction_is_destructive(Manager *m, DBusError *e) {
|
|
return 0;
|
|
}
|
|
|
|
-static void transaction_minimize_impact(Manager *m) {
|
|
+static void transaction_minimize_impact(Transaction *tr) {
|
|
bool again;
|
|
- assert(m);
|
|
+ assert(tr);
|
|
|
|
/* Drops all unnecessary jobs that reverse already active jobs
|
|
* or that stop a running service. */
|
|
@@ -1167,7 +1144,7 @@ static void transaction_minimize_impact(Manager *m) {
|
|
|
|
again = false;
|
|
|
|
- HASHMAP_FOREACH(j, m->transaction_jobs, i) {
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
LIST_FOREACH(transaction, j, j) {
|
|
bool stops_running_service, changes_existing_job;
|
|
|
|
@@ -1198,7 +1175,7 @@ static void transaction_minimize_impact(Manager *m) {
|
|
/* Ok, let's get rid of this */
|
|
log_debug("Deleting %s/%s to minimize impact.", j->unit->id, job_type_to_string(j->type));
|
|
|
|
- transaction_delete_job(m, j, true);
|
|
+ transaction_delete_job(tr, j, true);
|
|
again = true;
|
|
break;
|
|
}
|
|
@@ -1210,7 +1187,7 @@ static void transaction_minimize_impact(Manager *m) {
|
|
} while (again);
|
|
}
|
|
|
|
-static int transaction_apply(Manager *m, JobMode mode) {
|
|
+static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
Iterator i;
|
|
Job *j;
|
|
int r;
|
|
@@ -1225,7 +1202,7 @@ static int transaction_apply(Manager *m, JobMode mode) {
|
|
HASHMAP_FOREACH(j, m->jobs, i) {
|
|
assert(j->installed);
|
|
|
|
- if (hashmap_get(m->transaction_jobs, j->unit))
|
|
+ if (hashmap_get(tr->jobs, j->unit))
|
|
continue;
|
|
|
|
/* 'j' itself is safe to remove, but if other jobs
|
|
@@ -1236,7 +1213,7 @@ static int transaction_apply(Manager *m, JobMode mode) {
|
|
}
|
|
}
|
|
|
|
- HASHMAP_FOREACH(j, m->transaction_jobs, i) {
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
/* Assume merged */
|
|
assert(!j->transaction_prev);
|
|
assert(!j->transaction_next);
|
|
@@ -1244,11 +1221,12 @@ static int transaction_apply(Manager *m, JobMode mode) {
|
|
if (j->installed)
|
|
continue;
|
|
|
|
- if ((r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j)) < 0)
|
|
+ r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j);
|
|
+ if (r < 0)
|
|
goto rollback;
|
|
}
|
|
|
|
- while ((j = hashmap_steal_first(m->transaction_jobs))) {
|
|
+ while ((j = hashmap_steal_first(tr->jobs))) {
|
|
Job *uj;
|
|
if (j->installed) {
|
|
/* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
|
|
@@ -1271,6 +1249,9 @@ static int transaction_apply(Manager *m, JobMode mode) {
|
|
assert(!j->transaction_next);
|
|
assert(!j->transaction_prev);
|
|
|
|
+ /* Clean the job dependencies */
|
|
+ transaction_unlink_job(tr, j, false);
|
|
+
|
|
job_add_to_run_queue(j);
|
|
job_add_to_dbus_queue(j);
|
|
job_start_timer(j);
|
|
@@ -1278,14 +1259,13 @@ static int transaction_apply(Manager *m, JobMode mode) {
|
|
log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
|
}
|
|
|
|
- /* As last step, kill all remaining job dependencies. */
|
|
- transaction_clean_dependencies(m);
|
|
+ assert(!tr->anchor);
|
|
|
|
return 0;
|
|
|
|
rollback:
|
|
|
|
- HASHMAP_FOREACH(j, m->transaction_jobs, i) {
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
if (j->installed)
|
|
continue;
|
|
|
|
@@ -1295,41 +1275,42 @@ rollback:
|
|
return r;
|
|
}
|
|
|
|
-static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
|
|
+static int transaction_activate(Transaction *tr, Manager *m, JobMode mode, DBusError *e) {
|
|
int r;
|
|
unsigned generation = 1;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
|
|
- /* This applies the changes recorded in transaction_jobs to
|
|
+ /* This applies the changes recorded in tr->jobs to
|
|
* the actual list of jobs, if possible. */
|
|
|
|
/* First step: figure out which jobs matter */
|
|
- transaction_find_jobs_that_matter_to_anchor(m, NULL, generation++);
|
|
+ transaction_find_jobs_that_matter_to_anchor(tr, NULL, generation++);
|
|
|
|
/* Second step: Try not to stop any running services if
|
|
* we don't have to. Don't try to reverse running
|
|
* jobs if we don't have to. */
|
|
if (mode == JOB_FAIL)
|
|
- transaction_minimize_impact(m);
|
|
+ transaction_minimize_impact(tr);
|
|
|
|
/* Third step: Drop redundant jobs */
|
|
- transaction_drop_redundant(m);
|
|
+ transaction_drop_redundant(tr);
|
|
|
|
for (;;) {
|
|
/* Fourth step: Let's remove unneeded jobs that might
|
|
* be lurking. */
|
|
if (mode != JOB_ISOLATE)
|
|
- transaction_collect_garbage(m);
|
|
+ transaction_collect_garbage(tr);
|
|
|
|
/* Fifth step: verify order makes sense and correct
|
|
* cycles if necessary and possible */
|
|
- if ((r = transaction_verify_order(m, &generation, e)) >= 0)
|
|
+ r = transaction_verify_order(tr, &generation, e);
|
|
+ if (r >= 0)
|
|
break;
|
|
|
|
if (r != -EAGAIN) {
|
|
log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
|
|
- goto rollback;
|
|
+ return r;
|
|
}
|
|
|
|
/* Let's see if the resulting transaction ordering
|
|
@@ -1340,60 +1321,60 @@ static int transaction_activate(Manager *m, JobMode mode, DBusError *e) {
|
|
/* Sixth step: let's drop unmergeable entries if
|
|
* necessary and possible, merge entries we can
|
|
* merge */
|
|
- if ((r = transaction_merge_jobs(m, e)) >= 0)
|
|
+ r = transaction_merge_jobs(tr, e);
|
|
+ if (r >= 0)
|
|
break;
|
|
|
|
if (r != -EAGAIN) {
|
|
log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
|
|
- goto rollback;
|
|
+ return r;
|
|
}
|
|
|
|
/* Seventh step: an entry got dropped, let's garbage
|
|
* collect its dependencies. */
|
|
if (mode != JOB_ISOLATE)
|
|
- transaction_collect_garbage(m);
|
|
+ transaction_collect_garbage(tr);
|
|
|
|
/* Let's see if the resulting transaction still has
|
|
* unmergeable entries ... */
|
|
}
|
|
|
|
/* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
|
|
- transaction_drop_redundant(m);
|
|
+ transaction_drop_redundant(tr);
|
|
|
|
/* Ninth step: check whether we can actually apply this */
|
|
- if (mode == JOB_FAIL)
|
|
- if ((r = transaction_is_destructive(m, e)) < 0) {
|
|
+ if (mode == JOB_FAIL) {
|
|
+ r = transaction_is_destructive(tr, e);
|
|
+ if (r < 0) {
|
|
log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
|
|
- goto rollback;
|
|
+ return r;
|
|
}
|
|
+ }
|
|
|
|
/* Tenth step: apply changes */
|
|
- if ((r = transaction_apply(m, mode)) < 0) {
|
|
+ r = transaction_apply(tr, m, mode);
|
|
+ if (r < 0) {
|
|
log_warning("Failed to apply transaction: %s", strerror(-r));
|
|
- goto rollback;
|
|
+ return r;
|
|
}
|
|
|
|
- assert(hashmap_isempty(m->transaction_jobs));
|
|
- assert(!m->transaction_anchor);
|
|
+ assert(hashmap_isempty(tr->jobs));
|
|
+ assert(!tr->anchor);
|
|
|
|
return 0;
|
|
-
|
|
-rollback:
|
|
- transaction_abort(m);
|
|
- return r;
|
|
}
|
|
|
|
-static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool override, bool *is_new) {
|
|
+static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, bool override, bool *is_new) {
|
|
Job *j, *f;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
assert(unit);
|
|
|
|
/* Looks for an existing prospective job and returns that. If
|
|
* it doesn't exist it is created and added to the prospective
|
|
* jobs list. */
|
|
|
|
- f = hashmap_get(m->transaction_jobs, unit);
|
|
+ f = hashmap_get(tr->jobs, unit);
|
|
|
|
LIST_FOREACH(transaction, j, f) {
|
|
assert(j->unit == unit);
|
|
@@ -1407,8 +1388,11 @@ static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool o
|
|
|
|
if (unit->job && unit->job->type == type)
|
|
j = unit->job;
|
|
- else if (!(j = job_new(m, type, unit)))
|
|
- return NULL;
|
|
+ else {
|
|
+ j = job_new(unit->manager, type, unit);
|
|
+ if (!j)
|
|
+ return NULL;
|
|
+ }
|
|
|
|
j->generation = 0;
|
|
j->marker = NULL;
|
|
@@ -1417,7 +1401,7 @@ static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool o
|
|
|
|
LIST_PREPEND(Job, transaction, f, j);
|
|
|
|
- if (hashmap_replace(m->transaction_jobs, unit, f) < 0) {
|
|
+ if (hashmap_replace(tr->jobs, unit, f) < 0) {
|
|
LIST_REMOVE(Job, transaction, f, j);
|
|
job_free(j);
|
|
return NULL;
|
|
@@ -1431,16 +1415,16 @@ static Job* transaction_add_one_job(Manager *m, JobType type, Unit *unit, bool o
|
|
return j;
|
|
}
|
|
|
|
-static void transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies) {
|
|
- assert(m);
|
|
+static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies) {
|
|
+ assert(tr);
|
|
assert(j);
|
|
|
|
if (j->transaction_prev)
|
|
j->transaction_prev->transaction_next = j->transaction_next;
|
|
else if (j->transaction_next)
|
|
- hashmap_replace(m->transaction_jobs, j->unit, j->transaction_next);
|
|
+ hashmap_replace(tr->jobs, j->unit, j->transaction_next);
|
|
else
|
|
- hashmap_remove_value(m->transaction_jobs, j->unit, j);
|
|
+ hashmap_remove_value(tr->jobs, j->unit, j);
|
|
|
|
if (j->transaction_next)
|
|
j->transaction_next->transaction_prev = j->transaction_prev;
|
|
@@ -1448,24 +1432,24 @@ static void transaction_unlink_job(Manager *m, Job *j, bool delete_dependencies)
|
|
j->transaction_prev = j->transaction_next = NULL;
|
|
|
|
while (j->subject_list)
|
|
- job_dependency_free(j->subject_list);
|
|
+ job_dependency_free(j->subject_list, tr);
|
|
|
|
while (j->object_list) {
|
|
Job *other = j->object_list->matters ? j->object_list->subject : NULL;
|
|
|
|
- job_dependency_free(j->object_list);
|
|
+ job_dependency_free(j->object_list, tr);
|
|
|
|
if (other && delete_dependencies) {
|
|
log_debug("Deleting job %s/%s as dependency of job %s/%s",
|
|
other->unit->id, job_type_to_string(other->type),
|
|
j->unit->id, job_type_to_string(j->type));
|
|
- transaction_delete_job(m, other, delete_dependencies);
|
|
+ transaction_delete_job(tr, other, delete_dependencies);
|
|
}
|
|
}
|
|
}
|
|
|
|
static int transaction_add_job_and_dependencies(
|
|
- Manager *m,
|
|
+ Transaction *tr,
|
|
JobType type,
|
|
Unit *unit,
|
|
Job *by,
|
|
@@ -1482,7 +1466,7 @@ static int transaction_add_job_and_dependencies(
|
|
int r;
|
|
bool is_new;
|
|
|
|
- assert(m);
|
|
+ assert(tr);
|
|
assert(type < _JOB_TYPE_MAX);
|
|
assert(unit);
|
|
|
|
@@ -1519,13 +1503,14 @@ static int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
/* First add the job. */
|
|
- if (!(ret = transaction_add_one_job(m, type, unit, override, &is_new)))
|
|
+ ret = transaction_add_one_job(tr, type, unit, override, &is_new);
|
|
+ if (!ret)
|
|
return -ENOMEM;
|
|
|
|
ret->ignore_order = ret->ignore_order || ignore_order;
|
|
|
|
/* Then, add a link to the job. */
|
|
- if (!job_dependency_new(by, ret, matters, conflicts))
|
|
+ if (!job_dependency_new(by, ret, matters, conflicts, tr))
|
|
return -ENOMEM;
|
|
|
|
if (is_new && !ignore_requirements) {
|
|
@@ -1534,120 +1519,136 @@ static int transaction_add_job_and_dependencies(
|
|
/* If we are following some other unit, make sure we
|
|
* add all dependencies of everybody following. */
|
|
if (unit_following_set(ret->unit, &following) > 0) {
|
|
- SET_FOREACH(dep, following, i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
|
|
+ SET_FOREACH(dep, following, i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, type, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
|
|
set_free(following);
|
|
}
|
|
|
|
/* Finally, recursively add in all dependencies. */
|
|
if (type == JOB_START || type == JOB_RELOAD_OR_START) {
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_BIND_TO], i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
|
|
-
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_BIND_TO], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL)) < 0) {
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE], i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
|
|
-
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL)) < 0) {
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTS], i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL)) < 0) {
|
|
-
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTS], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL)) < 0) {
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
|
|
}
|
|
|
|
if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
|
|
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
|
|
-
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_BOUND_BY], i)
|
|
- if ((r = transaction_add_job_and_dependencies(m, type, dep, ret, true, override, false, false, ignore_order, e, NULL)) < 0) {
|
|
-
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_BOUND_BY], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
}
|
|
+ }
|
|
}
|
|
|
|
if (type == JOB_RELOAD || type == JOB_RELOAD_OR_START) {
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_PROPAGATE_RELOAD_TO], i) {
|
|
- r = transaction_add_job_and_dependencies(m, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
-
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
if (r < 0) {
|
|
log_warning("Cannot add dependency reload job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
@@ -1669,12 +1670,13 @@ fail:
|
|
return r;
|
|
}
|
|
|
|
-static int transaction_add_isolate_jobs(Manager *m) {
|
|
+static int transaction_add_isolate_jobs(Transaction *tr, Manager *m) {
|
|
Iterator i;
|
|
Unit *u;
|
|
char *k;
|
|
int r;
|
|
|
|
+ assert(tr);
|
|
assert(m);
|
|
|
|
HASHMAP_FOREACH_KEY(u, k, m->units, i) {
|
|
@@ -1691,19 +1693,43 @@ static int transaction_add_isolate_jobs(Manager *m) {
|
|
continue;
|
|
|
|
/* Is there already something listed for this? */
|
|
- if (hashmap_get(m->transaction_jobs, u))
|
|
+ if (hashmap_get(tr->jobs, u))
|
|
continue;
|
|
|
|
- if ((r = transaction_add_job_and_dependencies(m, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL)) < 0)
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL);
|
|
+ if (r < 0)
|
|
log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->id, strerror(-r));
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
+static Transaction *transaction_new(void) {
|
|
+ Transaction *tr;
|
|
+
|
|
+ tr = new0(Transaction, 1);
|
|
+ if (!tr)
|
|
+ return NULL;
|
|
+
|
|
+ tr->jobs = hashmap_new(trivial_hash_func, trivial_compare_func);
|
|
+ if (!tr->jobs) {
|
|
+ free(tr);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ return tr;
|
|
+}
|
|
+
|
|
+static void transaction_free(Transaction *tr) {
|
|
+ assert(hashmap_isempty(tr->jobs));
|
|
+ hashmap_free(tr->jobs);
|
|
+ free(tr);
|
|
+}
|
|
+
|
|
int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
|
|
int r;
|
|
Job *ret;
|
|
+ Transaction *tr;
|
|
|
|
assert(m);
|
|
assert(type < _JOB_TYPE_MAX);
|
|
@@ -1722,28 +1748,38 @@ int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool ove
|
|
|
|
log_debug("Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
|
|
|
|
- if ((r = transaction_add_job_and_dependencies(m, type, unit, NULL, true, override, false,
|
|
- mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
|
|
- mode == JOB_IGNORE_DEPENDENCIES, e, &ret)) < 0) {
|
|
- transaction_abort(m);
|
|
- return r;
|
|
- }
|
|
+ tr = transaction_new();
|
|
+ if (!tr)
|
|
+ return -ENOMEM;
|
|
|
|
- if (mode == JOB_ISOLATE)
|
|
- if ((r = transaction_add_isolate_jobs(m)) < 0) {
|
|
- transaction_abort(m);
|
|
- return r;
|
|
- }
|
|
+ r = transaction_add_job_and_dependencies(tr, type, unit, NULL, true, override, false,
|
|
+ mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
|
|
+ mode == JOB_IGNORE_DEPENDENCIES, e, &ret);
|
|
+ if (r < 0)
|
|
+ goto tr_abort;
|
|
|
|
- if ((r = transaction_activate(m, mode, e)) < 0)
|
|
- return r;
|
|
+ if (mode == JOB_ISOLATE) {
|
|
+ r = transaction_add_isolate_jobs(tr, m);
|
|
+ if (r < 0)
|
|
+ goto tr_abort;
|
|
+ }
|
|
+
|
|
+ r = transaction_activate(tr, m, mode, e);
|
|
+ if (r < 0)
|
|
+ goto tr_abort;
|
|
|
|
log_debug("Enqueued job %s/%s as %u", unit->id, job_type_to_string(type), (unsigned) ret->id);
|
|
|
|
if (_ret)
|
|
*_ret = ret;
|
|
|
|
+ transaction_free(tr);
|
|
return 0;
|
|
+
|
|
+tr_abort:
|
|
+ transaction_abort(tr);
|
|
+ transaction_free(tr);
|
|
+ return r;
|
|
}
|
|
|
|
int manager_add_job_by_name(Manager *m, JobType type, const char *name, JobMode mode, bool override, DBusError *e, Job **_ret) {
|
|
@@ -1907,8 +1943,6 @@ void manager_clear_jobs(Manager *m) {
|
|
|
|
assert(m);
|
|
|
|
- transaction_abort(m);
|
|
-
|
|
while ((j = hashmap_first(m->jobs)))
|
|
job_finish_and_invalidate(j, JOB_CANCELED);
|
|
}
|
|
diff --git a/src/manager.h b/src/manager.h
|
|
index 64d8d0e..b982d7e 100644
|
|
--- a/src/manager.h
|
|
+++ b/src/manager.h
|
|
@@ -33,6 +33,7 @@
|
|
#define MANAGER_MAX_NAMES 131072 /* 128K */
|
|
|
|
typedef struct Manager Manager;
|
|
+typedef struct Transaction Transaction;
|
|
typedef enum WatchType WatchType;
|
|
typedef struct Watch Watch;
|
|
|
|
@@ -91,6 +92,12 @@ struct Watch {
|
|
#include "dbus.h"
|
|
#include "path-lookup.h"
|
|
|
|
+struct Transaction {
|
|
+ /* Jobs to be added */
|
|
+ Hashmap *jobs; /* Unit object => Job object list 1:1 */
|
|
+ JobDependency *anchor;
|
|
+};
|
|
+
|
|
struct Manager {
|
|
/* Note that the set of units we know of is allowed to be
|
|
* inconsistent. However the subset of it that is loaded may
|
|
@@ -123,10 +130,6 @@ struct Manager {
|
|
/* Units to check when doing GC */
|
|
LIST_HEAD(Unit, gc_queue);
|
|
|
|
- /* Jobs to be added */
|
|
- Hashmap *transaction_jobs; /* Unit object => Job object list 1:1 */
|
|
- JobDependency *transaction_anchor;
|
|
-
|
|
Hashmap *watch_pids; /* pid => Unit object n:1 */
|
|
|
|
char *notify_socket;
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From e617c9a330b60d070e3db57c1f86f3c579e03f0c Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Thu, 19 Apr 2012 23:54:11 +0200
|
|
Subject: [PATCH 08/30] manager: split transaction.[ch]
|
|
|
|
manager.c takes care of the main loop, unit management, signal handling, ...
|
|
transaction.c computes transactions.
|
|
|
|
After split:
|
|
manager.c: 65 KB
|
|
transaction.c: 40 KB
|
|
(cherry picked from commit 75778e21dfeee51036d24501e39ea7398fabe502)
|
|
|
|
Conflicts:
|
|
|
|
Makefile.am
|
|
src/manager.c
|
|
---
|
|
Makefile.am | 2 +
|
|
src/job.h | 1 +
|
|
src/manager.c | 1099 +----------------------------------------------------
|
|
src/manager.h | 7 -
|
|
src/transaction.c | 1101 +++++++++++++++++++++++++++++++++++++++++++++++++++++
|
|
src/transaction.h | 36 ++
|
|
6 files changed, 1141 insertions(+), 1105 deletions(-)
|
|
create mode 100644 src/transaction.c
|
|
create mode 100644 src/transaction.h
|
|
|
|
diff --git a/Makefile.am b/Makefile.am
|
|
index 079c118..45a0b10 100644
|
|
--- a/Makefile.am
|
|
+++ b/Makefile.am
|
|
@@ -483,6 +483,7 @@ libsystemd_core_la_SOURCES = \
|
|
src/unit.c \
|
|
src/job.c \
|
|
src/manager.c \
|
|
+ src/transaction.c \
|
|
src/path-lookup.c \
|
|
src/load-fragment.c \
|
|
src/service.c \
|
|
@@ -579,6 +580,7 @@ EXTRA_DIST += \
|
|
src/unit.h \
|
|
src/job.h \
|
|
src/manager.h \
|
|
+ src/transaction.h \
|
|
src/path-lookup.h \
|
|
src/load-fragment.h \
|
|
src/service.h \
|
|
diff --git a/src/job.h b/src/job.h
|
|
index 18ba64f..efb0af9 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -34,6 +34,7 @@ typedef enum JobMode JobMode;
|
|
typedef enum JobResult JobResult;
|
|
|
|
#include "manager.h"
|
|
+#include "transaction.h"
|
|
#include "unit.h"
|
|
#include "hashmap.h"
|
|
#include "list.h"
|
|
diff --git a/src/manager.c b/src/manager.c
|
|
index ff4249d..a129b88 100644
|
|
--- a/src/manager.c
|
|
+++ b/src/manager.c
|
|
@@ -44,6 +44,7 @@
|
|
#include <systemd/sd-daemon.h>
|
|
|
|
#include "manager.h"
|
|
+#include "transaction.h"
|
|
#include "hashmap.h"
|
|
#include "macro.h"
|
|
#include "strv.h"
|
|
@@ -628,1104 +629,6 @@ int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
|
|
return r;
|
|
}
|
|
|
|
-static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies);
|
|
-
|
|
-static void transaction_delete_job(Transaction *tr, Job *j, bool delete_dependencies) {
|
|
- assert(tr);
|
|
- assert(j);
|
|
-
|
|
- /* Deletes one job from the transaction */
|
|
-
|
|
- transaction_unlink_job(tr, j, delete_dependencies);
|
|
-
|
|
- if (!j->installed)
|
|
- job_free(j);
|
|
-}
|
|
-
|
|
-static void transaction_delete_unit(Transaction *tr, Unit *u) {
|
|
- Job *j;
|
|
-
|
|
- /* Deletes all jobs associated with a certain unit from the
|
|
- * transaction */
|
|
-
|
|
- while ((j = hashmap_get(tr->jobs, u)))
|
|
- transaction_delete_job(tr, j, true);
|
|
-}
|
|
-
|
|
-static void transaction_abort(Transaction *tr) {
|
|
- Job *j;
|
|
-
|
|
- assert(tr);
|
|
-
|
|
- while ((j = hashmap_first(tr->jobs)))
|
|
- transaction_delete_job(tr, j, true);
|
|
-
|
|
- assert(hashmap_isempty(tr->jobs));
|
|
-
|
|
- assert(!tr->anchor);
|
|
-}
|
|
-
|
|
-static void transaction_find_jobs_that_matter_to_anchor(Transaction *tr, Job *j, unsigned generation) {
|
|
- JobDependency *l;
|
|
-
|
|
- assert(tr);
|
|
-
|
|
- /* A recursive sweep through the graph that marks all units
|
|
- * that matter to the anchor job, i.e. are directly or
|
|
- * indirectly a dependency of the anchor job via paths that
|
|
- * are fully marked as mattering. */
|
|
-
|
|
- if (j)
|
|
- l = j->subject_list;
|
|
- else
|
|
- l = tr->anchor;
|
|
-
|
|
- LIST_FOREACH(subject, l, l) {
|
|
-
|
|
- /* This link does not matter */
|
|
- if (!l->matters)
|
|
- continue;
|
|
-
|
|
- /* This unit has already been marked */
|
|
- if (l->object->generation == generation)
|
|
- continue;
|
|
-
|
|
- l->object->matters_to_anchor = true;
|
|
- l->object->generation = generation;
|
|
-
|
|
- transaction_find_jobs_that_matter_to_anchor(tr, l->object, generation);
|
|
- }
|
|
-}
|
|
-
|
|
-static void transaction_merge_and_delete_job(Transaction *tr, Job *j, Job *other, JobType t) {
|
|
- JobDependency *l, *last;
|
|
-
|
|
- assert(j);
|
|
- assert(other);
|
|
- assert(j->unit == other->unit);
|
|
- assert(!j->installed);
|
|
-
|
|
- /* Merges 'other' into 'j' and then deletes j. */
|
|
-
|
|
- j->type = t;
|
|
- j->state = JOB_WAITING;
|
|
- j->override = j->override || other->override;
|
|
-
|
|
- j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
|
|
-
|
|
- /* Patch us in as new owner of the JobDependency objects */
|
|
- last = NULL;
|
|
- LIST_FOREACH(subject, l, other->subject_list) {
|
|
- assert(l->subject == other);
|
|
- l->subject = j;
|
|
- last = l;
|
|
- }
|
|
-
|
|
- /* Merge both lists */
|
|
- if (last) {
|
|
- last->subject_next = j->subject_list;
|
|
- if (j->subject_list)
|
|
- j->subject_list->subject_prev = last;
|
|
- j->subject_list = other->subject_list;
|
|
- }
|
|
-
|
|
- /* Patch us in as new owner of the JobDependency objects */
|
|
- last = NULL;
|
|
- LIST_FOREACH(object, l, other->object_list) {
|
|
- assert(l->object == other);
|
|
- l->object = j;
|
|
- last = l;
|
|
- }
|
|
-
|
|
- /* Merge both lists */
|
|
- if (last) {
|
|
- last->object_next = j->object_list;
|
|
- if (j->object_list)
|
|
- j->object_list->object_prev = last;
|
|
- j->object_list = other->object_list;
|
|
- }
|
|
-
|
|
- /* Kill the other job */
|
|
- other->subject_list = NULL;
|
|
- other->object_list = NULL;
|
|
- transaction_delete_job(tr, other, true);
|
|
-}
|
|
-static bool job_is_conflicted_by(Job *j) {
|
|
- JobDependency *l;
|
|
-
|
|
- assert(j);
|
|
-
|
|
- /* Returns true if this job is pulled in by a least one
|
|
- * ConflictedBy dependency. */
|
|
-
|
|
- LIST_FOREACH(object, l, j->object_list)
|
|
- if (l->conflicts)
|
|
- return true;
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-static int delete_one_unmergeable_job(Transaction *tr, Job *j) {
|
|
- Job *k;
|
|
-
|
|
- assert(j);
|
|
-
|
|
- /* Tries to delete one item in the linked list
|
|
- * j->transaction_next->transaction_next->... that conflicts
|
|
- * with another one, in an attempt to make an inconsistent
|
|
- * transaction work. */
|
|
-
|
|
- /* We rely here on the fact that if a merged with b does not
|
|
- * merge with c, either a or b merge with c neither */
|
|
- LIST_FOREACH(transaction, j, j)
|
|
- LIST_FOREACH(transaction, k, j->transaction_next) {
|
|
- Job *d;
|
|
-
|
|
- /* Is this one mergeable? Then skip it */
|
|
- if (job_type_is_mergeable(j->type, k->type))
|
|
- continue;
|
|
-
|
|
- /* Ok, we found two that conflict, let's see if we can
|
|
- * drop one of them */
|
|
- if (!j->matters_to_anchor && !k->matters_to_anchor) {
|
|
-
|
|
- /* Both jobs don't matter, so let's
|
|
- * find the one that is smarter to
|
|
- * remove. Let's think positive and
|
|
- * rather remove stops then starts --
|
|
- * except if something is being
|
|
- * stopped because it is conflicted by
|
|
- * another unit in which case we
|
|
- * rather remove the start. */
|
|
-
|
|
- log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
|
|
- log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
|
|
-
|
|
- if (j->type == JOB_STOP) {
|
|
-
|
|
- if (job_is_conflicted_by(j))
|
|
- d = k;
|
|
- else
|
|
- d = j;
|
|
-
|
|
- } else if (k->type == JOB_STOP) {
|
|
-
|
|
- if (job_is_conflicted_by(k))
|
|
- d = j;
|
|
- else
|
|
- d = k;
|
|
- } else
|
|
- d = j;
|
|
-
|
|
- } else if (!j->matters_to_anchor)
|
|
- d = j;
|
|
- else if (!k->matters_to_anchor)
|
|
- d = k;
|
|
- else
|
|
- return -ENOEXEC;
|
|
-
|
|
- /* Ok, we can drop one, so let's do so. */
|
|
- log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->id, job_type_to_string(d->type));
|
|
- transaction_delete_job(tr, d, true);
|
|
- return 0;
|
|
- }
|
|
-
|
|
- return -EINVAL;
|
|
-}
|
|
-
|
|
-static int transaction_merge_jobs(Transaction *tr, DBusError *e) {
|
|
- Job *j;
|
|
- Iterator i;
|
|
- int r;
|
|
-
|
|
- assert(tr);
|
|
-
|
|
- /* First step, check whether any of the jobs for one specific
|
|
- * task conflict. If so, try to drop one of them. */
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- JobType t;
|
|
- Job *k;
|
|
-
|
|
- t = j->type;
|
|
- LIST_FOREACH(transaction, k, j->transaction_next) {
|
|
- if (job_type_merge(&t, k->type) >= 0)
|
|
- continue;
|
|
-
|
|
- /* OK, we could not merge all jobs for this
|
|
- * action. Let's see if we can get rid of one
|
|
- * of them */
|
|
-
|
|
- r = delete_one_unmergeable_job(tr, j);
|
|
- if (r >= 0)
|
|
- /* Ok, we managed to drop one, now
|
|
- * let's ask our callers to call us
|
|
- * again after garbage collecting */
|
|
- return -EAGAIN;
|
|
-
|
|
- /* We couldn't merge anything. Failure */
|
|
- dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
|
|
- job_type_to_string(t), job_type_to_string(k->type), k->unit->id);
|
|
- return r;
|
|
- }
|
|
- }
|
|
-
|
|
- /* Second step, merge the jobs. */
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- JobType t = j->type;
|
|
- Job *k;
|
|
-
|
|
- /* Merge all transactions */
|
|
- LIST_FOREACH(transaction, k, j->transaction_next)
|
|
- assert_se(job_type_merge(&t, k->type) == 0);
|
|
-
|
|
- /* If an active job is mergeable, merge it too */
|
|
- if (j->unit->job)
|
|
- job_type_merge(&t, j->unit->job->type); /* Might fail. Which is OK */
|
|
-
|
|
- while ((k = j->transaction_next)) {
|
|
- if (j->installed) {
|
|
- transaction_merge_and_delete_job(tr, k, j, t);
|
|
- j = k;
|
|
- } else
|
|
- transaction_merge_and_delete_job(tr, j, k, t);
|
|
- }
|
|
-
|
|
- if (j->unit->job && !j->installed)
|
|
- transaction_merge_and_delete_job(tr, j, j->unit->job, t);
|
|
-
|
|
- assert(!j->transaction_next);
|
|
- assert(!j->transaction_prev);
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void transaction_drop_redundant(Transaction *tr) {
|
|
- bool again;
|
|
-
|
|
- assert(tr);
|
|
-
|
|
- /* Goes through the transaction and removes all jobs that are
|
|
- * a noop */
|
|
-
|
|
- do {
|
|
- Job *j;
|
|
- Iterator i;
|
|
-
|
|
- again = false;
|
|
-
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- bool changes_something = false;
|
|
- Job *k;
|
|
-
|
|
- LIST_FOREACH(transaction, k, j) {
|
|
-
|
|
- if (!job_is_anchor(k) &&
|
|
- (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
|
|
- (!k->unit->job || !job_type_is_conflicting(k->type, k->unit->job->type)))
|
|
- continue;
|
|
-
|
|
- changes_something = true;
|
|
- break;
|
|
- }
|
|
-
|
|
- if (changes_something)
|
|
- continue;
|
|
-
|
|
- /* log_debug("Found redundant job %s/%s, dropping.", j->unit->id, job_type_to_string(j->type)); */
|
|
- transaction_delete_job(tr, j, false);
|
|
- again = true;
|
|
- break;
|
|
- }
|
|
-
|
|
- } while (again);
|
|
-}
|
|
-
|
|
-static bool unit_matters_to_anchor(Unit *u, Job *j) {
|
|
- assert(u);
|
|
- assert(!j->transaction_prev);
|
|
-
|
|
- /* Checks whether at least one of the jobs for this unit
|
|
- * matters to the anchor. */
|
|
-
|
|
- LIST_FOREACH(transaction, j, j)
|
|
- if (j->matters_to_anchor)
|
|
- return true;
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
-static int transaction_verify_order_one(Transaction *tr, Job *j, Job *from, unsigned generation, DBusError *e) {
|
|
- Iterator i;
|
|
- Unit *u;
|
|
- int r;
|
|
-
|
|
- assert(tr);
|
|
- assert(j);
|
|
- assert(!j->transaction_prev);
|
|
-
|
|
- /* Does a recursive sweep through the ordering graph, looking
|
|
- * for a cycle. If we find cycle we try to break it. */
|
|
-
|
|
- /* Have we seen this before? */
|
|
- if (j->generation == generation) {
|
|
- Job *k, *delete;
|
|
-
|
|
- /* If the marker is NULL we have been here already and
|
|
- * decided the job was loop-free from here. Hence
|
|
- * shortcut things and return right-away. */
|
|
- if (!j->marker)
|
|
- return 0;
|
|
-
|
|
- /* So, the marker is not NULL and we already have been
|
|
- * here. We have a cycle. Let's try to break it. We go
|
|
- * backwards in our path and try to find a suitable
|
|
- * job to remove. We use the marker to find our way
|
|
- * back, since smart how we are we stored our way back
|
|
- * in there. */
|
|
- log_warning("Found ordering cycle on %s/%s", j->unit->id, job_type_to_string(j->type));
|
|
-
|
|
- delete = NULL;
|
|
- for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
|
|
-
|
|
- log_info("Walked on cycle path to %s/%s", k->unit->id, job_type_to_string(k->type));
|
|
-
|
|
- if (!delete &&
|
|
- !k->installed &&
|
|
- !unit_matters_to_anchor(k->unit, k)) {
|
|
- /* Ok, we can drop this one, so let's
|
|
- * do so. */
|
|
- delete = k;
|
|
- }
|
|
-
|
|
- /* Check if this in fact was the beginning of
|
|
- * the cycle */
|
|
- if (k == j)
|
|
- break;
|
|
- }
|
|
-
|
|
-
|
|
- if (delete) {
|
|
- log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->id, job_type_to_string(delete->type));
|
|
- transaction_delete_unit(tr, delete->unit);
|
|
- return -EAGAIN;
|
|
- }
|
|
-
|
|
- log_error("Unable to break cycle");
|
|
-
|
|
- dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
|
|
- return -ENOEXEC;
|
|
- }
|
|
-
|
|
- /* Make the marker point to where we come from, so that we can
|
|
- * find our way backwards if we want to break a cycle. We use
|
|
- * a special marker for the beginning: we point to
|
|
- * ourselves. */
|
|
- j->marker = from ? from : j;
|
|
- j->generation = generation;
|
|
-
|
|
- /* We assume that the the dependencies are bidirectional, and
|
|
- * hence can ignore UNIT_AFTER */
|
|
- SET_FOREACH(u, j->unit->dependencies[UNIT_BEFORE], i) {
|
|
- Job *o;
|
|
-
|
|
- /* Is there a job for this unit? */
|
|
- o = hashmap_get(tr->jobs, u);
|
|
- if (!o) {
|
|
- /* Ok, there is no job for this in the
|
|
- * transaction, but maybe there is already one
|
|
- * running? */
|
|
- o = u->job;
|
|
- if (!o)
|
|
- continue;
|
|
- }
|
|
-
|
|
- r = transaction_verify_order_one(tr, o, j, generation, e);
|
|
- if (r < 0)
|
|
- return r;
|
|
- }
|
|
-
|
|
- /* Ok, let's backtrack, and remember that this entry is not on
|
|
- * our path anymore. */
|
|
- j->marker = NULL;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static int transaction_verify_order(Transaction *tr, unsigned *generation, DBusError *e) {
|
|
- Job *j;
|
|
- int r;
|
|
- Iterator i;
|
|
- unsigned g;
|
|
-
|
|
- assert(tr);
|
|
- assert(generation);
|
|
-
|
|
- /* Check if the ordering graph is cyclic. If it is, try to fix
|
|
- * that up by dropping one of the jobs. */
|
|
-
|
|
- g = (*generation)++;
|
|
-
|
|
- HASHMAP_FOREACH(j, tr->jobs, i)
|
|
- if ((r = transaction_verify_order_one(tr, j, NULL, g, e)) < 0)
|
|
- return r;
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void transaction_collect_garbage(Transaction *tr) {
|
|
- bool again;
|
|
-
|
|
- assert(tr);
|
|
-
|
|
- /* Drop jobs that are not required by any other job */
|
|
-
|
|
- do {
|
|
- Iterator i;
|
|
- Job *j;
|
|
-
|
|
- again = false;
|
|
-
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- if (j->object_list) {
|
|
- /* log_debug("Keeping job %s/%s because of %s/%s", */
|
|
- /* j->unit->id, job_type_to_string(j->type), */
|
|
- /* j->object_list->subject ? j->object_list->subject->unit->id : "root", */
|
|
- /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
|
|
- continue;
|
|
- }
|
|
-
|
|
- /* log_debug("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); */
|
|
- transaction_delete_job(tr, j, true);
|
|
- again = true;
|
|
- break;
|
|
- }
|
|
-
|
|
- } while (again);
|
|
-}
|
|
-
|
|
-static int transaction_is_destructive(Transaction *tr, DBusError *e) {
|
|
- Iterator i;
|
|
- Job *j;
|
|
-
|
|
- assert(tr);
|
|
-
|
|
- /* Checks whether applying this transaction means that
|
|
- * existing jobs would be replaced */
|
|
-
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
-
|
|
- /* Assume merged */
|
|
- assert(!j->transaction_prev);
|
|
- assert(!j->transaction_next);
|
|
-
|
|
- if (j->unit->job &&
|
|
- j->unit->job != j &&
|
|
- !job_type_is_superset(j->type, j->unit->job->type)) {
|
|
-
|
|
- dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
|
|
- return -EEXIST;
|
|
- }
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static void transaction_minimize_impact(Transaction *tr) {
|
|
- bool again;
|
|
- assert(tr);
|
|
-
|
|
- /* Drops all unnecessary jobs that reverse already active jobs
|
|
- * or that stop a running service. */
|
|
-
|
|
- do {
|
|
- Job *j;
|
|
- Iterator i;
|
|
-
|
|
- again = false;
|
|
-
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- LIST_FOREACH(transaction, j, j) {
|
|
- bool stops_running_service, changes_existing_job;
|
|
-
|
|
- /* If it matters, we shouldn't drop it */
|
|
- if (j->matters_to_anchor)
|
|
- continue;
|
|
-
|
|
- /* Would this stop a running service?
|
|
- * Would this change an existing job?
|
|
- * If so, let's drop this entry */
|
|
-
|
|
- stops_running_service =
|
|
- j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
|
|
-
|
|
- changes_existing_job =
|
|
- j->unit->job &&
|
|
- job_type_is_conflicting(j->type, j->unit->job->type);
|
|
-
|
|
- if (!stops_running_service && !changes_existing_job)
|
|
- continue;
|
|
-
|
|
- if (stops_running_service)
|
|
- log_debug("%s/%s would stop a running service.", j->unit->id, job_type_to_string(j->type));
|
|
-
|
|
- if (changes_existing_job)
|
|
- log_debug("%s/%s would change existing job.", j->unit->id, job_type_to_string(j->type));
|
|
-
|
|
- /* Ok, let's get rid of this */
|
|
- log_debug("Deleting %s/%s to minimize impact.", j->unit->id, job_type_to_string(j->type));
|
|
-
|
|
- transaction_delete_job(tr, j, true);
|
|
- again = true;
|
|
- break;
|
|
- }
|
|
-
|
|
- if (again)
|
|
- break;
|
|
- }
|
|
-
|
|
- } while (again);
|
|
-}
|
|
-
|
|
-static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
- Iterator i;
|
|
- Job *j;
|
|
- int r;
|
|
-
|
|
- /* Moves the transaction jobs to the set of active jobs */
|
|
-
|
|
- if (mode == JOB_ISOLATE) {
|
|
-
|
|
- /* When isolating first kill all installed jobs which
|
|
- * aren't part of the new transaction */
|
|
- rescan:
|
|
- HASHMAP_FOREACH(j, m->jobs, i) {
|
|
- assert(j->installed);
|
|
-
|
|
- if (hashmap_get(tr->jobs, j->unit))
|
|
- continue;
|
|
-
|
|
- /* 'j' itself is safe to remove, but if other jobs
|
|
- are invalidated recursively, our iterator may become
|
|
- invalid and we need to start over. */
|
|
- if (job_finish_and_invalidate(j, JOB_CANCELED) > 0)
|
|
- goto rescan;
|
|
- }
|
|
- }
|
|
-
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- /* Assume merged */
|
|
- assert(!j->transaction_prev);
|
|
- assert(!j->transaction_next);
|
|
-
|
|
- if (j->installed)
|
|
- continue;
|
|
-
|
|
- r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j);
|
|
- if (r < 0)
|
|
- goto rollback;
|
|
- }
|
|
-
|
|
- while ((j = hashmap_steal_first(tr->jobs))) {
|
|
- Job *uj;
|
|
- if (j->installed) {
|
|
- /* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
|
|
- continue;
|
|
- }
|
|
-
|
|
- uj = j->unit->job;
|
|
- if (uj) {
|
|
- job_uninstall(uj);
|
|
- job_free(uj);
|
|
- }
|
|
-
|
|
- j->unit->job = j;
|
|
- j->installed = true;
|
|
- m->n_installed_jobs ++;
|
|
-
|
|
- /* We're fully installed. Now let's free data we don't
|
|
- * need anymore. */
|
|
-
|
|
- assert(!j->transaction_next);
|
|
- assert(!j->transaction_prev);
|
|
-
|
|
- /* Clean the job dependencies */
|
|
- transaction_unlink_job(tr, j, false);
|
|
-
|
|
- job_add_to_run_queue(j);
|
|
- job_add_to_dbus_queue(j);
|
|
- job_start_timer(j);
|
|
-
|
|
- log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
|
- }
|
|
-
|
|
- assert(!tr->anchor);
|
|
-
|
|
- return 0;
|
|
-
|
|
-rollback:
|
|
-
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- if (j->installed)
|
|
- continue;
|
|
-
|
|
- hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
|
|
- }
|
|
-
|
|
- return r;
|
|
-}
|
|
-
|
|
-static int transaction_activate(Transaction *tr, Manager *m, JobMode mode, DBusError *e) {
|
|
- int r;
|
|
- unsigned generation = 1;
|
|
-
|
|
- assert(tr);
|
|
-
|
|
- /* This applies the changes recorded in tr->jobs to
|
|
- * the actual list of jobs, if possible. */
|
|
-
|
|
- /* First step: figure out which jobs matter */
|
|
- transaction_find_jobs_that_matter_to_anchor(tr, NULL, generation++);
|
|
-
|
|
- /* Second step: Try not to stop any running services if
|
|
- * we don't have to. Don't try to reverse running
|
|
- * jobs if we don't have to. */
|
|
- if (mode == JOB_FAIL)
|
|
- transaction_minimize_impact(tr);
|
|
-
|
|
- /* Third step: Drop redundant jobs */
|
|
- transaction_drop_redundant(tr);
|
|
-
|
|
- for (;;) {
|
|
- /* Fourth step: Let's remove unneeded jobs that might
|
|
- * be lurking. */
|
|
- if (mode != JOB_ISOLATE)
|
|
- transaction_collect_garbage(tr);
|
|
-
|
|
- /* Fifth step: verify order makes sense and correct
|
|
- * cycles if necessary and possible */
|
|
- r = transaction_verify_order(tr, &generation, e);
|
|
- if (r >= 0)
|
|
- break;
|
|
-
|
|
- if (r != -EAGAIN) {
|
|
- log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
|
|
- return r;
|
|
- }
|
|
-
|
|
- /* Let's see if the resulting transaction ordering
|
|
- * graph is still cyclic... */
|
|
- }
|
|
-
|
|
- for (;;) {
|
|
- /* Sixth step: let's drop unmergeable entries if
|
|
- * necessary and possible, merge entries we can
|
|
- * merge */
|
|
- r = transaction_merge_jobs(tr, e);
|
|
- if (r >= 0)
|
|
- break;
|
|
-
|
|
- if (r != -EAGAIN) {
|
|
- log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
|
|
- return r;
|
|
- }
|
|
-
|
|
- /* Seventh step: an entry got dropped, let's garbage
|
|
- * collect its dependencies. */
|
|
- if (mode != JOB_ISOLATE)
|
|
- transaction_collect_garbage(tr);
|
|
-
|
|
- /* Let's see if the resulting transaction still has
|
|
- * unmergeable entries ... */
|
|
- }
|
|
-
|
|
- /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
|
|
- transaction_drop_redundant(tr);
|
|
-
|
|
- /* Ninth step: check whether we can actually apply this */
|
|
- if (mode == JOB_FAIL) {
|
|
- r = transaction_is_destructive(tr, e);
|
|
- if (r < 0) {
|
|
- log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
|
|
- return r;
|
|
- }
|
|
- }
|
|
-
|
|
- /* Tenth step: apply changes */
|
|
- r = transaction_apply(tr, m, mode);
|
|
- if (r < 0) {
|
|
- log_warning("Failed to apply transaction: %s", strerror(-r));
|
|
- return r;
|
|
- }
|
|
-
|
|
- assert(hashmap_isempty(tr->jobs));
|
|
- assert(!tr->anchor);
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, bool override, bool *is_new) {
|
|
- Job *j, *f;
|
|
-
|
|
- assert(tr);
|
|
- assert(unit);
|
|
-
|
|
- /* Looks for an existing prospective job and returns that. If
|
|
- * it doesn't exist it is created and added to the prospective
|
|
- * jobs list. */
|
|
-
|
|
- f = hashmap_get(tr->jobs, unit);
|
|
-
|
|
- LIST_FOREACH(transaction, j, f) {
|
|
- assert(j->unit == unit);
|
|
-
|
|
- if (j->type == type) {
|
|
- if (is_new)
|
|
- *is_new = false;
|
|
- return j;
|
|
- }
|
|
- }
|
|
-
|
|
- if (unit->job && unit->job->type == type)
|
|
- j = unit->job;
|
|
- else {
|
|
- j = job_new(unit->manager, type, unit);
|
|
- if (!j)
|
|
- return NULL;
|
|
- }
|
|
-
|
|
- j->generation = 0;
|
|
- j->marker = NULL;
|
|
- j->matters_to_anchor = false;
|
|
- j->override = override;
|
|
-
|
|
- LIST_PREPEND(Job, transaction, f, j);
|
|
-
|
|
- if (hashmap_replace(tr->jobs, unit, f) < 0) {
|
|
- LIST_REMOVE(Job, transaction, f, j);
|
|
- job_free(j);
|
|
- return NULL;
|
|
- }
|
|
-
|
|
- if (is_new)
|
|
- *is_new = true;
|
|
-
|
|
- /* log_debug("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); */
|
|
-
|
|
- return j;
|
|
-}
|
|
-
|
|
-static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies) {
|
|
- assert(tr);
|
|
- assert(j);
|
|
-
|
|
- if (j->transaction_prev)
|
|
- j->transaction_prev->transaction_next = j->transaction_next;
|
|
- else if (j->transaction_next)
|
|
- hashmap_replace(tr->jobs, j->unit, j->transaction_next);
|
|
- else
|
|
- hashmap_remove_value(tr->jobs, j->unit, j);
|
|
-
|
|
- if (j->transaction_next)
|
|
- j->transaction_next->transaction_prev = j->transaction_prev;
|
|
-
|
|
- j->transaction_prev = j->transaction_next = NULL;
|
|
-
|
|
- while (j->subject_list)
|
|
- job_dependency_free(j->subject_list, tr);
|
|
-
|
|
- while (j->object_list) {
|
|
- Job *other = j->object_list->matters ? j->object_list->subject : NULL;
|
|
-
|
|
- job_dependency_free(j->object_list, tr);
|
|
-
|
|
- if (other && delete_dependencies) {
|
|
- log_debug("Deleting job %s/%s as dependency of job %s/%s",
|
|
- other->unit->id, job_type_to_string(other->type),
|
|
- j->unit->id, job_type_to_string(j->type));
|
|
- transaction_delete_job(tr, other, delete_dependencies);
|
|
- }
|
|
- }
|
|
-}
|
|
-
|
|
-static int transaction_add_job_and_dependencies(
|
|
- Transaction *tr,
|
|
- JobType type,
|
|
- Unit *unit,
|
|
- Job *by,
|
|
- bool matters,
|
|
- bool override,
|
|
- bool conflicts,
|
|
- bool ignore_requirements,
|
|
- bool ignore_order,
|
|
- DBusError *e,
|
|
- Job **_ret) {
|
|
- Job *ret;
|
|
- Iterator i;
|
|
- Unit *dep;
|
|
- int r;
|
|
- bool is_new;
|
|
-
|
|
- assert(tr);
|
|
- assert(type < _JOB_TYPE_MAX);
|
|
- assert(unit);
|
|
-
|
|
- /* log_debug("Pulling in %s/%s from %s/%s", */
|
|
- /* unit->id, job_type_to_string(type), */
|
|
- /* by ? by->unit->id : "NA", */
|
|
- /* by ? job_type_to_string(by->type) : "NA"); */
|
|
-
|
|
- if (unit->load_state != UNIT_LOADED &&
|
|
- unit->load_state != UNIT_ERROR &&
|
|
- unit->load_state != UNIT_MASKED) {
|
|
- dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (type != JOB_STOP && unit->load_state == UNIT_ERROR) {
|
|
- dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
|
|
- "Unit %s failed to load: %s. "
|
|
- "See system logs and 'systemctl status %s' for details.",
|
|
- unit->id,
|
|
- strerror(-unit->load_error),
|
|
- unit->id);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (type != JOB_STOP && unit->load_state == UNIT_MASKED) {
|
|
- dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->id);
|
|
- return -EINVAL;
|
|
- }
|
|
-
|
|
- if (!unit_job_is_applicable(unit, type)) {
|
|
- dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->id);
|
|
- return -EBADR;
|
|
- }
|
|
-
|
|
- /* First add the job. */
|
|
- ret = transaction_add_one_job(tr, type, unit, override, &is_new);
|
|
- if (!ret)
|
|
- return -ENOMEM;
|
|
-
|
|
- ret->ignore_order = ret->ignore_order || ignore_order;
|
|
-
|
|
- /* Then, add a link to the job. */
|
|
- if (!job_dependency_new(by, ret, matters, conflicts, tr))
|
|
- return -ENOMEM;
|
|
-
|
|
- if (is_new && !ignore_requirements) {
|
|
- Set *following;
|
|
-
|
|
- /* If we are following some other unit, make sure we
|
|
- * add all dependencies of everybody following. */
|
|
- if (unit_following_set(ret->unit, &following) > 0) {
|
|
- SET_FOREACH(dep, following, i) {
|
|
- r = transaction_add_job_and_dependencies(tr, type, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
-
|
|
- set_free(following);
|
|
- }
|
|
-
|
|
- /* Finally, recursively add in all dependencies. */
|
|
- if (type == JOB_START || type == JOB_RELOAD_OR_START) {
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- if (r != -EBADR)
|
|
- goto fail;
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
-
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_BIND_TO], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- if (r != -EBADR)
|
|
- goto fail;
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
-
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
-
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
-
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- if (r != -EBADR)
|
|
- goto fail;
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
-
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
-
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTS], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- if (r != -EBADR)
|
|
- goto fail;
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
-
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
-
|
|
- }
|
|
-
|
|
- if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
|
|
-
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- if (r != -EBADR)
|
|
- goto fail;
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
-
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_BOUND_BY], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- if (r != -EBADR)
|
|
- goto fail;
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
- }
|
|
-
|
|
- if (type == JOB_RELOAD || type == JOB_RELOAD_OR_START) {
|
|
-
|
|
- SET_FOREACH(dep, ret->unit->dependencies[UNIT_PROPAGATE_RELOAD_TO], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
- if (r < 0) {
|
|
- log_warning("Cannot add dependency reload job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
-
|
|
- if (e)
|
|
- dbus_error_free(e);
|
|
- }
|
|
- }
|
|
- }
|
|
-
|
|
- /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
|
|
- }
|
|
-
|
|
- if (_ret)
|
|
- *_ret = ret;
|
|
-
|
|
- return 0;
|
|
-
|
|
-fail:
|
|
- return r;
|
|
-}
|
|
-
|
|
-static int transaction_add_isolate_jobs(Transaction *tr, Manager *m) {
|
|
- Iterator i;
|
|
- Unit *u;
|
|
- char *k;
|
|
- int r;
|
|
-
|
|
- assert(tr);
|
|
- assert(m);
|
|
-
|
|
- HASHMAP_FOREACH_KEY(u, k, m->units, i) {
|
|
-
|
|
- /* ignore aliases */
|
|
- if (u->id != k)
|
|
- continue;
|
|
-
|
|
- if (u->ignore_on_isolate)
|
|
- continue;
|
|
-
|
|
- /* No need to stop inactive jobs */
|
|
- if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job)
|
|
- continue;
|
|
-
|
|
- /* Is there already something listed for this? */
|
|
- if (hashmap_get(tr->jobs, u))
|
|
- continue;
|
|
-
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL);
|
|
- if (r < 0)
|
|
- log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->id, strerror(-r));
|
|
- }
|
|
-
|
|
- return 0;
|
|
-}
|
|
-
|
|
-static Transaction *transaction_new(void) {
|
|
- Transaction *tr;
|
|
-
|
|
- tr = new0(Transaction, 1);
|
|
- if (!tr)
|
|
- return NULL;
|
|
-
|
|
- tr->jobs = hashmap_new(trivial_hash_func, trivial_compare_func);
|
|
- if (!tr->jobs) {
|
|
- free(tr);
|
|
- return NULL;
|
|
- }
|
|
-
|
|
- return tr;
|
|
-}
|
|
-
|
|
-static void transaction_free(Transaction *tr) {
|
|
- assert(hashmap_isempty(tr->jobs));
|
|
- hashmap_free(tr->jobs);
|
|
- free(tr);
|
|
-}
|
|
-
|
|
int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
|
|
int r;
|
|
Job *ret;
|
|
diff --git a/src/manager.h b/src/manager.h
|
|
index b982d7e..915f6fc 100644
|
|
--- a/src/manager.h
|
|
+++ b/src/manager.h
|
|
@@ -33,7 +33,6 @@
|
|
#define MANAGER_MAX_NAMES 131072 /* 128K */
|
|
|
|
typedef struct Manager Manager;
|
|
-typedef struct Transaction Transaction;
|
|
typedef enum WatchType WatchType;
|
|
typedef struct Watch Watch;
|
|
|
|
@@ -92,12 +91,6 @@ struct Watch {
|
|
#include "dbus.h"
|
|
#include "path-lookup.h"
|
|
|
|
-struct Transaction {
|
|
- /* Jobs to be added */
|
|
- Hashmap *jobs; /* Unit object => Job object list 1:1 */
|
|
- JobDependency *anchor;
|
|
-};
|
|
-
|
|
struct Manager {
|
|
/* Note that the set of units we know of is allowed to be
|
|
* inconsistent. However the subset of it that is loaded may
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
new file mode 100644
|
|
index 0000000..8fa89a7
|
|
--- /dev/null
|
|
+++ b/src/transaction.c
|
|
@@ -0,0 +1,1101 @@
|
|
+#include "transaction.h"
|
|
+#include "bus-errors.h"
|
|
+
|
|
+static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies);
|
|
+
|
|
+static void transaction_delete_job(Transaction *tr, Job *j, bool delete_dependencies) {
|
|
+ assert(tr);
|
|
+ assert(j);
|
|
+
|
|
+ /* Deletes one job from the transaction */
|
|
+
|
|
+ transaction_unlink_job(tr, j, delete_dependencies);
|
|
+
|
|
+ if (!j->installed)
|
|
+ job_free(j);
|
|
+}
|
|
+
|
|
+static void transaction_delete_unit(Transaction *tr, Unit *u) {
|
|
+ Job *j;
|
|
+
|
|
+ /* Deletes all jobs associated with a certain unit from the
|
|
+ * transaction */
|
|
+
|
|
+ while ((j = hashmap_get(tr->jobs, u)))
|
|
+ transaction_delete_job(tr, j, true);
|
|
+}
|
|
+
|
|
+void transaction_abort(Transaction *tr) {
|
|
+ Job *j;
|
|
+
|
|
+ assert(tr);
|
|
+
|
|
+ while ((j = hashmap_first(tr->jobs)))
|
|
+ transaction_delete_job(tr, j, true);
|
|
+
|
|
+ assert(hashmap_isempty(tr->jobs));
|
|
+
|
|
+ assert(!tr->anchor);
|
|
+}
|
|
+
|
|
+static void transaction_find_jobs_that_matter_to_anchor(Transaction *tr, Job *j, unsigned generation) {
|
|
+ JobDependency *l;
|
|
+
|
|
+ assert(tr);
|
|
+
|
|
+ /* A recursive sweep through the graph that marks all units
|
|
+ * that matter to the anchor job, i.e. are directly or
|
|
+ * indirectly a dependency of the anchor job via paths that
|
|
+ * are fully marked as mattering. */
|
|
+
|
|
+ if (j)
|
|
+ l = j->subject_list;
|
|
+ else
|
|
+ l = tr->anchor;
|
|
+
|
|
+ LIST_FOREACH(subject, l, l) {
|
|
+
|
|
+ /* This link does not matter */
|
|
+ if (!l->matters)
|
|
+ continue;
|
|
+
|
|
+ /* This unit has already been marked */
|
|
+ if (l->object->generation == generation)
|
|
+ continue;
|
|
+
|
|
+ l->object->matters_to_anchor = true;
|
|
+ l->object->generation = generation;
|
|
+
|
|
+ transaction_find_jobs_that_matter_to_anchor(tr, l->object, generation);
|
|
+ }
|
|
+}
|
|
+
|
|
+static void transaction_merge_and_delete_job(Transaction *tr, Job *j, Job *other, JobType t) {
|
|
+ JobDependency *l, *last;
|
|
+
|
|
+ assert(j);
|
|
+ assert(other);
|
|
+ assert(j->unit == other->unit);
|
|
+ assert(!j->installed);
|
|
+
|
|
+ /* Merges 'other' into 'j' and then deletes 'other'. */
|
|
+
|
|
+ j->type = t;
|
|
+ j->state = JOB_WAITING;
|
|
+ j->override = j->override || other->override;
|
|
+
|
|
+ j->matters_to_anchor = j->matters_to_anchor || other->matters_to_anchor;
|
|
+
|
|
+ /* Patch us in as new owner of the JobDependency objects */
|
|
+ last = NULL;
|
|
+ LIST_FOREACH(subject, l, other->subject_list) {
|
|
+ assert(l->subject == other);
|
|
+ l->subject = j;
|
|
+ last = l;
|
|
+ }
|
|
+
|
|
+ /* Merge both lists */
|
|
+ if (last) {
|
|
+ last->subject_next = j->subject_list;
|
|
+ if (j->subject_list)
|
|
+ j->subject_list->subject_prev = last;
|
|
+ j->subject_list = other->subject_list;
|
|
+ }
|
|
+
|
|
+ /* Patch us in as new owner of the JobDependency objects */
|
|
+ last = NULL;
|
|
+ LIST_FOREACH(object, l, other->object_list) {
|
|
+ assert(l->object == other);
|
|
+ l->object = j;
|
|
+ last = l;
|
|
+ }
|
|
+
|
|
+ /* Merge both lists */
|
|
+ if (last) {
|
|
+ last->object_next = j->object_list;
|
|
+ if (j->object_list)
|
|
+ j->object_list->object_prev = last;
|
|
+ j->object_list = other->object_list;
|
|
+ }
|
|
+
|
|
+ /* Kill the other job */
|
|
+ other->subject_list = NULL;
|
|
+ other->object_list = NULL;
|
|
+ transaction_delete_job(tr, other, true);
|
|
+}
|
|
+
|
|
+static bool job_is_conflicted_by(Job *j) {
|
|
+ JobDependency *l;
|
|
+
|
|
+ assert(j);
|
|
+
|
|
+ /* Returns true if this job is pulled in by a least one
|
|
+ * ConflictedBy dependency. */
|
|
+
|
|
+ LIST_FOREACH(object, l, j->object_list)
|
|
+ if (l->conflicts)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static int delete_one_unmergeable_job(Transaction *tr, Job *j) {
|
|
+ Job *k;
|
|
+
|
|
+ assert(j);
|
|
+
|
|
+ /* Tries to delete one item in the linked list
|
|
+ * j->transaction_next->transaction_next->... that conflicts
|
|
+ * with another one, in an attempt to make an inconsistent
|
|
+ * transaction work. */
|
|
+
|
|
+ /* We rely here on the fact that if a merged with b does not
|
|
+ * merge with c, either a or b merge with c neither */
|
|
+ LIST_FOREACH(transaction, j, j)
|
|
+ LIST_FOREACH(transaction, k, j->transaction_next) {
|
|
+ Job *d;
|
|
+
|
|
+ /* Is this one mergeable? Then skip it */
|
|
+ if (job_type_is_mergeable(j->type, k->type))
|
|
+ continue;
|
|
+
|
|
+ /* Ok, we found two that conflict, let's see if we can
|
|
+ * drop one of them */
|
|
+ if (!j->matters_to_anchor && !k->matters_to_anchor) {
|
|
+
|
|
+ /* Both jobs don't matter, so let's
|
|
+ * find the one that is smarter to
|
|
+ * remove. Let's think positive and
|
|
+ * rather remove stops then starts --
|
|
+ * except if something is being
|
|
+ * stopped because it is conflicted by
|
|
+ * another unit in which case we
|
|
+ * rather remove the start. */
|
|
+
|
|
+ log_debug("Looking at job %s/%s conflicted_by=%s", j->unit->id, job_type_to_string(j->type), yes_no(j->type == JOB_STOP && job_is_conflicted_by(j)));
|
|
+ log_debug("Looking at job %s/%s conflicted_by=%s", k->unit->id, job_type_to_string(k->type), yes_no(k->type == JOB_STOP && job_is_conflicted_by(k)));
|
|
+
|
|
+ if (j->type == JOB_STOP) {
|
|
+
|
|
+ if (job_is_conflicted_by(j))
|
|
+ d = k;
|
|
+ else
|
|
+ d = j;
|
|
+
|
|
+ } else if (k->type == JOB_STOP) {
|
|
+
|
|
+ if (job_is_conflicted_by(k))
|
|
+ d = j;
|
|
+ else
|
|
+ d = k;
|
|
+ } else
|
|
+ d = j;
|
|
+
|
|
+ } else if (!j->matters_to_anchor)
|
|
+ d = j;
|
|
+ else if (!k->matters_to_anchor)
|
|
+ d = k;
|
|
+ else
|
|
+ return -ENOEXEC;
|
|
+
|
|
+ /* Ok, we can drop one, so let's do so. */
|
|
+ log_debug("Fixing conflicting jobs by deleting job %s/%s", d->unit->id, job_type_to_string(d->type));
|
|
+ transaction_delete_job(tr, d, true);
|
|
+ return 0;
|
|
+ }
|
|
+
|
|
+ return -EINVAL;
|
|
+}
|
|
+
|
|
+static int transaction_merge_jobs(Transaction *tr, DBusError *e) {
|
|
+ Job *j;
|
|
+ Iterator i;
|
|
+ int r;
|
|
+
|
|
+ assert(tr);
|
|
+
|
|
+ /* First step, check whether any of the jobs for one specific
|
|
+ * task conflict. If so, try to drop one of them. */
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+ JobType t;
|
|
+ Job *k;
|
|
+
|
|
+ t = j->type;
|
|
+ LIST_FOREACH(transaction, k, j->transaction_next) {
|
|
+ if (job_type_merge(&t, k->type) >= 0)
|
|
+ continue;
|
|
+
|
|
+ /* OK, we could not merge all jobs for this
|
|
+ * action. Let's see if we can get rid of one
|
|
+ * of them */
|
|
+
|
|
+ r = delete_one_unmergeable_job(tr, j);
|
|
+ if (r >= 0)
|
|
+ /* Ok, we managed to drop one, now
|
|
+ * let's ask our callers to call us
|
|
+ * again after garbage collecting */
|
|
+ return -EAGAIN;
|
|
+
|
|
+ /* We couldn't merge anything. Failure */
|
|
+ dbus_set_error(e, BUS_ERROR_TRANSACTION_JOBS_CONFLICTING, "Transaction contains conflicting jobs '%s' and '%s' for %s. Probably contradicting requirement dependencies configured.",
|
|
+ job_type_to_string(t), job_type_to_string(k->type), k->unit->id);
|
|
+ return r;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Second step, merge the jobs. */
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+ JobType t = j->type;
|
|
+ Job *k;
|
|
+
|
|
+ /* Merge all transactions */
|
|
+ LIST_FOREACH(transaction, k, j->transaction_next)
|
|
+ assert_se(job_type_merge(&t, k->type) == 0);
|
|
+
|
|
+ /* If an active job is mergeable, merge it too */
|
|
+ if (j->unit->job)
|
|
+ job_type_merge(&t, j->unit->job->type); /* Might fail. Which is OK */
|
|
+
|
|
+ while ((k = j->transaction_next)) {
|
|
+ if (j->installed) {
|
|
+ transaction_merge_and_delete_job(tr, k, j, t);
|
|
+ j = k;
|
|
+ } else
|
|
+ transaction_merge_and_delete_job(tr, j, k, t);
|
|
+ }
|
|
+
|
|
+ if (j->unit->job && !j->installed)
|
|
+ transaction_merge_and_delete_job(tr, j, j->unit->job, t);
|
|
+
|
|
+ assert(!j->transaction_next);
|
|
+ assert(!j->transaction_prev);
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void transaction_drop_redundant(Transaction *tr) {
|
|
+ bool again;
|
|
+
|
|
+ assert(tr);
|
|
+
|
|
+ /* Goes through the transaction and removes all jobs that are
|
|
+ * a noop */
|
|
+
|
|
+ do {
|
|
+ Job *j;
|
|
+ Iterator i;
|
|
+
|
|
+ again = false;
|
|
+
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+ bool changes_something = false;
|
|
+ Job *k;
|
|
+
|
|
+ LIST_FOREACH(transaction, k, j) {
|
|
+
|
|
+ if (!job_is_anchor(k) &&
|
|
+ (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
|
|
+ (!k->unit->job || !job_type_is_conflicting(k->type, k->unit->job->type)))
|
|
+ continue;
|
|
+
|
|
+ changes_something = true;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (changes_something)
|
|
+ continue;
|
|
+
|
|
+ /* log_debug("Found redundant job %s/%s, dropping.", j->unit->id, job_type_to_string(j->type)); */
|
|
+ transaction_delete_job(tr, j, false);
|
|
+ again = true;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ } while (again);
|
|
+}
|
|
+
|
|
+static bool unit_matters_to_anchor(Unit *u, Job *j) {
|
|
+ assert(u);
|
|
+ assert(!j->transaction_prev);
|
|
+
|
|
+ /* Checks whether at least one of the jobs for this unit
|
|
+ * matters to the anchor. */
|
|
+
|
|
+ LIST_FOREACH(transaction, j, j)
|
|
+ if (j->matters_to_anchor)
|
|
+ return true;
|
|
+
|
|
+ return false;
|
|
+}
|
|
+
|
|
+static int transaction_verify_order_one(Transaction *tr, Job *j, Job *from, unsigned generation, DBusError *e) {
|
|
+ Iterator i;
|
|
+ Unit *u;
|
|
+ int r;
|
|
+
|
|
+ assert(tr);
|
|
+ assert(j);
|
|
+ assert(!j->transaction_prev);
|
|
+
|
|
+ /* Does a recursive sweep through the ordering graph, looking
|
|
+ * for a cycle. If we find cycle we try to break it. */
|
|
+
|
|
+ /* Have we seen this before? */
|
|
+ if (j->generation == generation) {
|
|
+ Job *k, *delete;
|
|
+
|
|
+ /* If the marker is NULL we have been here already and
|
|
+ * decided the job was loop-free from here. Hence
|
|
+ * shortcut things and return right-away. */
|
|
+ if (!j->marker)
|
|
+ return 0;
|
|
+
|
|
+ /* So, the marker is not NULL and we already have been
|
|
+ * here. We have a cycle. Let's try to break it. We go
|
|
+ * backwards in our path and try to find a suitable
|
|
+ * job to remove. We use the marker to find our way
|
|
+ * back, since smart how we are we stored our way back
|
|
+ * in there. */
|
|
+ log_warning("Found ordering cycle on %s/%s", j->unit->id, job_type_to_string(j->type));
|
|
+
|
|
+ delete = NULL;
|
|
+ for (k = from; k; k = ((k->generation == generation && k->marker != k) ? k->marker : NULL)) {
|
|
+
|
|
+ log_info("Walked on cycle path to %s/%s", k->unit->id, job_type_to_string(k->type));
|
|
+
|
|
+ if (!delete &&
|
|
+ !k->installed &&
|
|
+ !unit_matters_to_anchor(k->unit, k)) {
|
|
+ /* Ok, we can drop this one, so let's
|
|
+ * do so. */
|
|
+ delete = k;
|
|
+ }
|
|
+
|
|
+ /* Check if this in fact was the beginning of
|
|
+ * the cycle */
|
|
+ if (k == j)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+
|
|
+ if (delete) {
|
|
+ log_warning("Breaking ordering cycle by deleting job %s/%s", delete->unit->id, job_type_to_string(delete->type));
|
|
+ transaction_delete_unit(tr, delete->unit);
|
|
+ return -EAGAIN;
|
|
+ }
|
|
+
|
|
+ log_error("Unable to break cycle");
|
|
+
|
|
+ dbus_set_error(e, BUS_ERROR_TRANSACTION_ORDER_IS_CYCLIC, "Transaction order is cyclic. See system logs for details.");
|
|
+ return -ENOEXEC;
|
|
+ }
|
|
+
|
|
+ /* Make the marker point to where we come from, so that we can
|
|
+ * find our way backwards if we want to break a cycle. We use
|
|
+ * a special marker for the beginning: we point to
|
|
+ * ourselves. */
|
|
+ j->marker = from ? from : j;
|
|
+ j->generation = generation;
|
|
+
|
|
+ /* We assume that the the dependencies are bidirectional, and
|
|
+ * hence can ignore UNIT_AFTER */
|
|
+ SET_FOREACH(u, j->unit->dependencies[UNIT_BEFORE], i) {
|
|
+ Job *o;
|
|
+
|
|
+ /* Is there a job for this unit? */
|
|
+ o = hashmap_get(tr->jobs, u);
|
|
+ if (!o) {
|
|
+ /* Ok, there is no job for this in the
|
|
+ * transaction, but maybe there is already one
|
|
+ * running? */
|
|
+ o = u->job;
|
|
+ if (!o)
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ r = transaction_verify_order_one(tr, o, j, generation, e);
|
|
+ if (r < 0)
|
|
+ return r;
|
|
+ }
|
|
+
|
|
+ /* Ok, let's backtrack, and remember that this entry is not on
|
|
+ * our path anymore. */
|
|
+ j->marker = NULL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int transaction_verify_order(Transaction *tr, unsigned *generation, DBusError *e) {
|
|
+ Job *j;
|
|
+ int r;
|
|
+ Iterator i;
|
|
+ unsigned g;
|
|
+
|
|
+ assert(tr);
|
|
+ assert(generation);
|
|
+
|
|
+ /* Check if the ordering graph is cyclic. If it is, try to fix
|
|
+ * that up by dropping one of the jobs. */
|
|
+
|
|
+ g = (*generation)++;
|
|
+
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i)
|
|
+ if ((r = transaction_verify_order_one(tr, j, NULL, g, e)) < 0)
|
|
+ return r;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void transaction_collect_garbage(Transaction *tr) {
|
|
+ bool again;
|
|
+
|
|
+ assert(tr);
|
|
+
|
|
+ /* Drop jobs that are not required by any other job */
|
|
+
|
|
+ do {
|
|
+ Iterator i;
|
|
+ Job *j;
|
|
+
|
|
+ again = false;
|
|
+
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+ if (j->object_list) {
|
|
+ /* log_debug("Keeping job %s/%s because of %s/%s", */
|
|
+ /* j->unit->id, job_type_to_string(j->type), */
|
|
+ /* j->object_list->subject ? j->object_list->subject->unit->id : "root", */
|
|
+ /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ /* log_debug("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); */
|
|
+ transaction_delete_job(tr, j, true);
|
|
+ again = true;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ } while (again);
|
|
+}
|
|
+
|
|
+static int transaction_is_destructive(Transaction *tr, DBusError *e) {
|
|
+ Iterator i;
|
|
+ Job *j;
|
|
+
|
|
+ assert(tr);
|
|
+
|
|
+ /* Checks whether applying this transaction means that
|
|
+ * existing jobs would be replaced */
|
|
+
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+
|
|
+ /* Assume merged */
|
|
+ assert(!j->transaction_prev);
|
|
+ assert(!j->transaction_next);
|
|
+
|
|
+ if (j->unit->job &&
|
|
+ j->unit->job != j &&
|
|
+ !job_type_is_superset(j->type, j->unit->job->type)) {
|
|
+
|
|
+ dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
|
|
+ return -EEXIST;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static void transaction_minimize_impact(Transaction *tr) {
|
|
+ bool again;
|
|
+ assert(tr);
|
|
+
|
|
+ /* Drops all unnecessary jobs that reverse already active jobs
|
|
+ * or that stop a running service. */
|
|
+
|
|
+ do {
|
|
+ Job *j;
|
|
+ Iterator i;
|
|
+
|
|
+ again = false;
|
|
+
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+ LIST_FOREACH(transaction, j, j) {
|
|
+ bool stops_running_service, changes_existing_job;
|
|
+
|
|
+ /* If it matters, we shouldn't drop it */
|
|
+ if (j->matters_to_anchor)
|
|
+ continue;
|
|
+
|
|
+ /* Would this stop a running service?
|
|
+ * Would this change an existing job?
|
|
+ * If so, let's drop this entry */
|
|
+
|
|
+ stops_running_service =
|
|
+ j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
|
|
+
|
|
+ changes_existing_job =
|
|
+ j->unit->job &&
|
|
+ job_type_is_conflicting(j->type, j->unit->job->type);
|
|
+
|
|
+ if (!stops_running_service && !changes_existing_job)
|
|
+ continue;
|
|
+
|
|
+ if (stops_running_service)
|
|
+ log_debug("%s/%s would stop a running service.", j->unit->id, job_type_to_string(j->type));
|
|
+
|
|
+ if (changes_existing_job)
|
|
+ log_debug("%s/%s would change existing job.", j->unit->id, job_type_to_string(j->type));
|
|
+
|
|
+ /* Ok, let's get rid of this */
|
|
+ log_debug("Deleting %s/%s to minimize impact.", j->unit->id, job_type_to_string(j->type));
|
|
+
|
|
+ transaction_delete_job(tr, j, true);
|
|
+ again = true;
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ if (again)
|
|
+ break;
|
|
+ }
|
|
+
|
|
+ } while (again);
|
|
+}
|
|
+
|
|
+static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
+ Iterator i;
|
|
+ Job *j;
|
|
+ int r;
|
|
+
|
|
+ /* Moves the transaction jobs to the set of active jobs */
|
|
+
|
|
+ if (mode == JOB_ISOLATE) {
|
|
+
|
|
+ /* When isolating first kill all installed jobs which
|
|
+ * aren't part of the new transaction */
|
|
+ rescan:
|
|
+ HASHMAP_FOREACH(j, m->jobs, i) {
|
|
+ assert(j->installed);
|
|
+
|
|
+ if (hashmap_get(tr->jobs, j->unit))
|
|
+ continue;
|
|
+
|
|
+ /* 'j' itself is safe to remove, but if other jobs
|
|
+ are invalidated recursively, our iterator may become
|
|
+ invalid and we need to start over. */
|
|
+ if (job_finish_and_invalidate(j, JOB_CANCELED) > 0)
|
|
+ goto rescan;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+ /* Assume merged */
|
|
+ assert(!j->transaction_prev);
|
|
+ assert(!j->transaction_next);
|
|
+
|
|
+ if (j->installed)
|
|
+ continue;
|
|
+
|
|
+ r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j);
|
|
+ if (r < 0)
|
|
+ goto rollback;
|
|
+ }
|
|
+
|
|
+ while ((j = hashmap_steal_first(tr->jobs))) {
|
|
+ Job *uj;
|
|
+ if (j->installed) {
|
|
+ /* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ uj = j->unit->job;
|
|
+ if (uj) {
|
|
+ job_uninstall(uj);
|
|
+ job_free(uj);
|
|
+ }
|
|
+
|
|
+ j->unit->job = j;
|
|
+ j->installed = true;
|
|
+ m->n_installed_jobs ++;
|
|
+
|
|
+ /* We're fully installed. Now let's free data we don't
|
|
+ * need anymore. */
|
|
+
|
|
+ assert(!j->transaction_next);
|
|
+ assert(!j->transaction_prev);
|
|
+
|
|
+ /* Clean the job dependencies */
|
|
+ transaction_unlink_job(tr, j, false);
|
|
+
|
|
+ job_add_to_run_queue(j);
|
|
+ job_add_to_dbus_queue(j);
|
|
+ job_start_timer(j);
|
|
+
|
|
+ log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
|
+ }
|
|
+
|
|
+ assert(!tr->anchor);
|
|
+
|
|
+ return 0;
|
|
+
|
|
+rollback:
|
|
+
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+ if (j->installed)
|
|
+ continue;
|
|
+
|
|
+ hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
|
|
+ }
|
|
+
|
|
+ return r;
|
|
+}
|
|
+
|
|
+int transaction_activate(Transaction *tr, Manager *m, JobMode mode, DBusError *e) {
|
|
+ int r;
|
|
+ unsigned generation = 1;
|
|
+
|
|
+ assert(tr);
|
|
+
|
|
+ /* This applies the changes recorded in tr->jobs to
|
|
+ * the actual list of jobs, if possible. */
|
|
+
|
|
+ /* First step: figure out which jobs matter */
|
|
+ transaction_find_jobs_that_matter_to_anchor(tr, NULL, generation++);
|
|
+
|
|
+ /* Second step: Try not to stop any running services if
|
|
+ * we don't have to. Don't try to reverse running
|
|
+ * jobs if we don't have to. */
|
|
+ if (mode == JOB_FAIL)
|
|
+ transaction_minimize_impact(tr);
|
|
+
|
|
+ /* Third step: Drop redundant jobs */
|
|
+ transaction_drop_redundant(tr);
|
|
+
|
|
+ for (;;) {
|
|
+ /* Fourth step: Let's remove unneeded jobs that might
|
|
+ * be lurking. */
|
|
+ if (mode != JOB_ISOLATE)
|
|
+ transaction_collect_garbage(tr);
|
|
+
|
|
+ /* Fifth step: verify order makes sense and correct
|
|
+ * cycles if necessary and possible */
|
|
+ r = transaction_verify_order(tr, &generation, e);
|
|
+ if (r >= 0)
|
|
+ break;
|
|
+
|
|
+ if (r != -EAGAIN) {
|
|
+ log_warning("Requested transaction contains an unfixable cyclic ordering dependency: %s", bus_error(e, r));
|
|
+ return r;
|
|
+ }
|
|
+
|
|
+ /* Let's see if the resulting transaction ordering
|
|
+ * graph is still cyclic... */
|
|
+ }
|
|
+
|
|
+ for (;;) {
|
|
+ /* Sixth step: let's drop unmergeable entries if
|
|
+ * necessary and possible, merge entries we can
|
|
+ * merge */
|
|
+ r = transaction_merge_jobs(tr, e);
|
|
+ if (r >= 0)
|
|
+ break;
|
|
+
|
|
+ if (r != -EAGAIN) {
|
|
+ log_warning("Requested transaction contains unmergeable jobs: %s", bus_error(e, r));
|
|
+ return r;
|
|
+ }
|
|
+
|
|
+ /* Seventh step: an entry got dropped, let's garbage
|
|
+ * collect its dependencies. */
|
|
+ if (mode != JOB_ISOLATE)
|
|
+ transaction_collect_garbage(tr);
|
|
+
|
|
+ /* Let's see if the resulting transaction still has
|
|
+ * unmergeable entries ... */
|
|
+ }
|
|
+
|
|
+ /* Eights step: Drop redundant jobs again, if the merging now allows us to drop more. */
|
|
+ transaction_drop_redundant(tr);
|
|
+
|
|
+ /* Ninth step: check whether we can actually apply this */
|
|
+ if (mode == JOB_FAIL) {
|
|
+ r = transaction_is_destructive(tr, e);
|
|
+ if (r < 0) {
|
|
+ log_notice("Requested transaction contradicts existing jobs: %s", bus_error(e, r));
|
|
+ return r;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* Tenth step: apply changes */
|
|
+ r = transaction_apply(tr, m, mode);
|
|
+ if (r < 0) {
|
|
+ log_warning("Failed to apply transaction: %s", strerror(-r));
|
|
+ return r;
|
|
+ }
|
|
+
|
|
+ assert(hashmap_isempty(tr->jobs));
|
|
+ assert(!tr->anchor);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, bool override, bool *is_new) {
|
|
+ Job *j, *f;
|
|
+
|
|
+ assert(tr);
|
|
+ assert(unit);
|
|
+
|
|
+ /* Looks for an existing prospective job and returns that. If
|
|
+ * it doesn't exist it is created and added to the prospective
|
|
+ * jobs list. */
|
|
+
|
|
+ f = hashmap_get(tr->jobs, unit);
|
|
+
|
|
+ LIST_FOREACH(transaction, j, f) {
|
|
+ assert(j->unit == unit);
|
|
+
|
|
+ if (j->type == type) {
|
|
+ if (is_new)
|
|
+ *is_new = false;
|
|
+ return j;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (unit->job && unit->job->type == type)
|
|
+ j = unit->job;
|
|
+ else {
|
|
+ j = job_new(unit->manager, type, unit);
|
|
+ if (!j)
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ j->generation = 0;
|
|
+ j->marker = NULL;
|
|
+ j->matters_to_anchor = false;
|
|
+ j->override = override;
|
|
+
|
|
+ LIST_PREPEND(Job, transaction, f, j);
|
|
+
|
|
+ if (hashmap_replace(tr->jobs, unit, f) < 0) {
|
|
+ LIST_REMOVE(Job, transaction, f, j);
|
|
+ job_free(j);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ if (is_new)
|
|
+ *is_new = true;
|
|
+
|
|
+ /* log_debug("Added job %s/%s to transaction.", unit->id, job_type_to_string(type)); */
|
|
+
|
|
+ return j;
|
|
+}
|
|
+
|
|
+static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies) {
|
|
+ assert(tr);
|
|
+ assert(j);
|
|
+
|
|
+ if (j->transaction_prev)
|
|
+ j->transaction_prev->transaction_next = j->transaction_next;
|
|
+ else if (j->transaction_next)
|
|
+ hashmap_replace(tr->jobs, j->unit, j->transaction_next);
|
|
+ else
|
|
+ hashmap_remove_value(tr->jobs, j->unit, j);
|
|
+
|
|
+ if (j->transaction_next)
|
|
+ j->transaction_next->transaction_prev = j->transaction_prev;
|
|
+
|
|
+ j->transaction_prev = j->transaction_next = NULL;
|
|
+
|
|
+ while (j->subject_list)
|
|
+ job_dependency_free(j->subject_list, tr);
|
|
+
|
|
+ while (j->object_list) {
|
|
+ Job *other = j->object_list->matters ? j->object_list->subject : NULL;
|
|
+
|
|
+ job_dependency_free(j->object_list, tr);
|
|
+
|
|
+ if (other && delete_dependencies) {
|
|
+ log_debug("Deleting job %s/%s as dependency of job %s/%s",
|
|
+ other->unit->id, job_type_to_string(other->type),
|
|
+ j->unit->id, job_type_to_string(j->type));
|
|
+ transaction_delete_job(tr, other, delete_dependencies);
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+int transaction_add_job_and_dependencies(
|
|
+ Transaction *tr,
|
|
+ JobType type,
|
|
+ Unit *unit,
|
|
+ Job *by,
|
|
+ bool matters,
|
|
+ bool override,
|
|
+ bool conflicts,
|
|
+ bool ignore_requirements,
|
|
+ bool ignore_order,
|
|
+ DBusError *e,
|
|
+ Job **_ret) {
|
|
+ Job *ret;
|
|
+ Iterator i;
|
|
+ Unit *dep;
|
|
+ int r;
|
|
+ bool is_new;
|
|
+
|
|
+ assert(tr);
|
|
+ assert(type < _JOB_TYPE_MAX);
|
|
+ assert(unit);
|
|
+
|
|
+ /* log_debug("Pulling in %s/%s from %s/%s", */
|
|
+ /* unit->id, job_type_to_string(type), */
|
|
+ /* by ? by->unit->id : "NA", */
|
|
+ /* by ? job_type_to_string(by->type) : "NA"); */
|
|
+
|
|
+ if (unit->load_state != UNIT_LOADED &&
|
|
+ unit->load_state != UNIT_ERROR &&
|
|
+ unit->load_state != UNIT_MASKED) {
|
|
+ dbus_set_error(e, BUS_ERROR_LOAD_FAILED, "Unit %s is not loaded properly.", unit->id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (type != JOB_STOP && unit->load_state == UNIT_ERROR) {
|
|
+ dbus_set_error(e, BUS_ERROR_LOAD_FAILED,
|
|
+ "Unit %s failed to load: %s. "
|
|
+ "See system logs and 'systemctl status %s' for details.",
|
|
+ unit->id,
|
|
+ strerror(-unit->load_error),
|
|
+ unit->id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (type != JOB_STOP && unit->load_state == UNIT_MASKED) {
|
|
+ dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->id);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (!unit_job_is_applicable(unit, type)) {
|
|
+ dbus_set_error(e, BUS_ERROR_JOB_TYPE_NOT_APPLICABLE, "Job type %s is not applicable for unit %s.", job_type_to_string(type), unit->id);
|
|
+ return -EBADR;
|
|
+ }
|
|
+
|
|
+ /* First add the job. */
|
|
+ ret = transaction_add_one_job(tr, type, unit, override, &is_new);
|
|
+ if (!ret)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ ret->ignore_order = ret->ignore_order || ignore_order;
|
|
+
|
|
+ /* Then, add a link to the job. */
|
|
+ if (!job_dependency_new(by, ret, matters, conflicts, tr))
|
|
+ return -ENOMEM;
|
|
+
|
|
+ if (is_new && !ignore_requirements) {
|
|
+ Set *following;
|
|
+
|
|
+ /* If we are following some other unit, make sure we
|
|
+ * add all dependencies of everybody following. */
|
|
+ if (unit_following_set(ret->unit, &following) > 0) {
|
|
+ SET_FOREACH(dep, following, i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, type, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ set_free(following);
|
|
+ }
|
|
+
|
|
+ /* Finally, recursively add in all dependencies. */
|
|
+ if (type == JOB_START || type == JOB_RELOAD_OR_START) {
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ if (r != -EBADR)
|
|
+ goto fail;
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_BIND_TO], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ if (r != -EBADR)
|
|
+ goto fail;
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ if (r != -EBADR)
|
|
+ goto fail;
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTS], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ if (r != -EBADR)
|
|
+ goto fail;
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ }
|
|
+
|
|
+ if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
|
|
+
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ if (r != -EBADR)
|
|
+ goto fail;
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_BOUND_BY], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ if (r != -EBADR)
|
|
+ goto fail;
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ if (type == JOB_RELOAD || type == JOB_RELOAD_OR_START) {
|
|
+
|
|
+ SET_FOREACH(dep, ret->unit->dependencies[UNIT_PROPAGATE_RELOAD_TO], i) {
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
+ if (r < 0) {
|
|
+ log_warning("Cannot add dependency reload job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
+
|
|
+ if (e)
|
|
+ dbus_error_free(e);
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
|
|
+ }
|
|
+
|
|
+ if (_ret)
|
|
+ *_ret = ret;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+fail:
|
|
+ return r;
|
|
+}
|
|
+
|
|
+int transaction_add_isolate_jobs(Transaction *tr, Manager *m) {
|
|
+ Iterator i;
|
|
+ Unit *u;
|
|
+ char *k;
|
|
+ int r;
|
|
+
|
|
+ assert(tr);
|
|
+ assert(m);
|
|
+
|
|
+ HASHMAP_FOREACH_KEY(u, k, m->units, i) {
|
|
+
|
|
+ /* ignore aliases */
|
|
+ if (u->id != k)
|
|
+ continue;
|
|
+
|
|
+ if (u->ignore_on_isolate)
|
|
+ continue;
|
|
+
|
|
+ /* No need to stop inactive jobs */
|
|
+ if (UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(u)) && !u->job)
|
|
+ continue;
|
|
+
|
|
+ /* Is there already something listed for this? */
|
|
+ if (hashmap_get(tr->jobs, u))
|
|
+ continue;
|
|
+
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL);
|
|
+ if (r < 0)
|
|
+ log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->id, strerror(-r));
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+Transaction *transaction_new(void) {
|
|
+ Transaction *tr;
|
|
+
|
|
+ tr = new0(Transaction, 1);
|
|
+ if (!tr)
|
|
+ return NULL;
|
|
+
|
|
+ tr->jobs = hashmap_new(trivial_hash_func, trivial_compare_func);
|
|
+ if (!tr->jobs) {
|
|
+ free(tr);
|
|
+ return NULL;
|
|
+ }
|
|
+
|
|
+ return tr;
|
|
+}
|
|
+
|
|
+void transaction_free(Transaction *tr) {
|
|
+ assert(hashmap_isempty(tr->jobs));
|
|
+ hashmap_free(tr->jobs);
|
|
+ free(tr);
|
|
+}
|
|
diff --git a/src/transaction.h b/src/transaction.h
|
|
new file mode 100644
|
|
index 0000000..d519ffb
|
|
--- /dev/null
|
|
+++ b/src/transaction.h
|
|
@@ -0,0 +1,36 @@
|
|
+#ifndef footransactionhfoo
|
|
+#define footransactionhfoo
|
|
+
|
|
+typedef struct Transaction Transaction;
|
|
+
|
|
+#include "unit.h"
|
|
+#include "manager.h"
|
|
+#include "job.h"
|
|
+#include "hashmap.h"
|
|
+
|
|
+struct Transaction {
|
|
+ /* Jobs to be added */
|
|
+ Hashmap *jobs; /* Unit object => Job object list 1:1 */
|
|
+ JobDependency *anchor;
|
|
+};
|
|
+
|
|
+Transaction *transaction_new(void);
|
|
+void transaction_free(Transaction *tr);
|
|
+
|
|
+int transaction_add_job_and_dependencies(
|
|
+ Transaction *tr,
|
|
+ JobType type,
|
|
+ Unit *unit,
|
|
+ Job *by,
|
|
+ bool matters,
|
|
+ bool override,
|
|
+ bool conflicts,
|
|
+ bool ignore_requirements,
|
|
+ bool ignore_order,
|
|
+ DBusError *e,
|
|
+ Job **_ret);
|
|
+int transaction_activate(Transaction *tr, Manager *m, JobMode mode, DBusError *e);
|
|
+int transaction_add_isolate_jobs(Transaction *tr, Manager *m);
|
|
+void transaction_abort(Transaction *tr);
|
|
+
|
|
+#endif
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From bd61057f3d44d7bb4e279310a16646eb8ab9bb8c Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Wed, 18 Apr 2012 01:39:20 +0200
|
|
Subject: [PATCH 09/30] job: job_new() can find the manager from the unit
|
|
(cherry picked from commit
|
|
668ad332a404736474749cbcc8404af3e4447170)
|
|
|
|
---
|
|
src/job.c | 7 +++----
|
|
src/job.h | 2 +-
|
|
src/transaction.c | 2 +-
|
|
3 files changed, 5 insertions(+), 6 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index 6d48748..c30bfda 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -33,18 +33,17 @@
|
|
#include "log.h"
|
|
#include "dbus-job.h"
|
|
|
|
-Job* job_new(Manager *m, JobType type, Unit *unit) {
|
|
+Job* job_new(Unit *unit, JobType type) {
|
|
Job *j;
|
|
|
|
- assert(m);
|
|
assert(type < _JOB_TYPE_MAX);
|
|
assert(unit);
|
|
|
|
if (!(j = new0(Job, 1)))
|
|
return NULL;
|
|
|
|
- j->manager = m;
|
|
- j->id = m->current_job_id++;
|
|
+ j->manager = unit->manager;
|
|
+ j->id = j->manager->current_job_id++;
|
|
j->type = type;
|
|
j->unit = unit;
|
|
|
|
diff --git a/src/job.h b/src/job.h
|
|
index efb0af9..faa10e3 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -137,7 +137,7 @@ struct Job {
|
|
bool ignore_order:1;
|
|
};
|
|
|
|
-Job* job_new(Manager *m, JobType type, Unit *unit);
|
|
+Job* job_new(Unit *unit, JobType type);
|
|
void job_uninstall(Job *j);
|
|
void job_free(Job *job);
|
|
void job_dump(Job *j, FILE*f, const char *prefix);
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index 8fa89a7..1344e2f 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -763,7 +763,7 @@ static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, b
|
|
if (unit->job && unit->job->type == type)
|
|
j = unit->job;
|
|
else {
|
|
- j = job_new(unit->manager, type, unit);
|
|
+ j = job_new(unit, type);
|
|
if (!j)
|
|
return NULL;
|
|
}
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 0d6df78cb48dfbd1c37444ee3702ba59d87ca352 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Wed, 18 Apr 2012 15:21:24 +0200
|
|
Subject: [PATCH 10/30] job: jobs shouldn't need to know about transaction
|
|
anchors
|
|
|
|
Let the transactions maintain their own anchor links.
|
|
(cherry picked from commit 1da4264fbd0fa5e6b1bd5e0ac4674a3503774369)
|
|
---
|
|
src/job.c | 8 ++------
|
|
src/job.h | 5 ++---
|
|
src/transaction.c | 18 +++++++++++++++---
|
|
3 files changed, 19 insertions(+), 12 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index c30bfda..a5ce2d4 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -96,7 +96,7 @@ void job_free(Job *j) {
|
|
free(j);
|
|
}
|
|
|
|
-JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts, Transaction *tr) {
|
|
+JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts) {
|
|
JobDependency *l;
|
|
|
|
assert(object);
|
|
@@ -116,21 +116,17 @@ JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool
|
|
|
|
if (subject)
|
|
LIST_PREPEND(JobDependency, subject, subject->subject_list, l);
|
|
- else
|
|
- LIST_PREPEND(JobDependency, subject, tr->anchor, l);
|
|
|
|
LIST_PREPEND(JobDependency, object, object->object_list, l);
|
|
|
|
return l;
|
|
}
|
|
|
|
-void job_dependency_free(JobDependency *l, Transaction *tr) {
|
|
+void job_dependency_free(JobDependency *l) {
|
|
assert(l);
|
|
|
|
if (l->subject)
|
|
LIST_REMOVE(JobDependency, subject, l->subject->subject_list, l);
|
|
- else
|
|
- LIST_REMOVE(JobDependency, subject, tr->anchor, l);
|
|
|
|
LIST_REMOVE(JobDependency, object, l->object->object_list, l);
|
|
|
|
diff --git a/src/job.h b/src/job.h
|
|
index faa10e3..cbd10c5 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -34,7 +34,6 @@ typedef enum JobMode JobMode;
|
|
typedef enum JobResult JobResult;
|
|
|
|
#include "manager.h"
|
|
-#include "transaction.h"
|
|
#include "unit.h"
|
|
#include "hashmap.h"
|
|
#include "list.h"
|
|
@@ -142,8 +141,8 @@ void job_uninstall(Job *j);
|
|
void job_free(Job *job);
|
|
void job_dump(Job *j, FILE*f, const char *prefix);
|
|
|
|
-JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts, Transaction *tr);
|
|
-void job_dependency_free(JobDependency *l, Transaction *tr);
|
|
+JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts);
|
|
+void job_dependency_free(JobDependency *l);
|
|
|
|
bool job_is_anchor(Job *j);
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index 1344e2f..2a6f1de 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -789,6 +789,12 @@ static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, b
|
|
return j;
|
|
}
|
|
|
|
+static void transaction_job_dependency_free(Transaction *tr, JobDependency *l) {
|
|
+ if (!l->subject)
|
|
+ LIST_REMOVE(JobDependency, subject, tr->anchor, l);
|
|
+ job_dependency_free(l);
|
|
+}
|
|
+
|
|
static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies) {
|
|
assert(tr);
|
|
assert(j);
|
|
@@ -806,12 +812,12 @@ static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependen
|
|
j->transaction_prev = j->transaction_next = NULL;
|
|
|
|
while (j->subject_list)
|
|
- job_dependency_free(j->subject_list, tr);
|
|
+ transaction_job_dependency_free(tr, j->subject_list);
|
|
|
|
while (j->object_list) {
|
|
Job *other = j->object_list->matters ? j->object_list->subject : NULL;
|
|
|
|
- job_dependency_free(j->object_list, tr);
|
|
+ transaction_job_dependency_free(tr, j->object_list);
|
|
|
|
if (other && delete_dependencies) {
|
|
log_debug("Deleting job %s/%s as dependency of job %s/%s",
|
|
@@ -835,6 +841,7 @@ int transaction_add_job_and_dependencies(
|
|
DBusError *e,
|
|
Job **_ret) {
|
|
Job *ret;
|
|
+ JobDependency *l;
|
|
Iterator i;
|
|
Unit *dep;
|
|
int r;
|
|
@@ -884,9 +891,14 @@ int transaction_add_job_and_dependencies(
|
|
ret->ignore_order = ret->ignore_order || ignore_order;
|
|
|
|
/* Then, add a link to the job. */
|
|
- if (!job_dependency_new(by, ret, matters, conflicts, tr))
|
|
+ l = job_dependency_new(by, ret, matters, conflicts);
|
|
+ if (!l)
|
|
return -ENOMEM;
|
|
|
|
+ /* If the link has no subject job, it's the anchor link. */
|
|
+ if (!by)
|
|
+ LIST_PREPEND(JobDependency, subject, tr->anchor, l);
|
|
+
|
|
if (is_new && !ignore_requirements) {
|
|
Set *following;
|
|
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 0799fe0aa4a9e74a0a11cedeb76a630c6db853d5 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Wed, 18 Apr 2012 18:15:49 +0200
|
|
Subject: [PATCH 11/30] transaction: do not add installed jobs to the
|
|
transaction
|
|
|
|
Do not attempt to optimize away the job creation by refering to installed jobs.
|
|
We do not want to disturb installed jobs until commiting the transaction.
|
|
|
|
(A later patch to job merging will make the separation of transaction jobs and
|
|
installed jobs complete.)
|
|
(cherry picked from commit 3c956cfee29fe2642fbe257f55adb3583a4af878)
|
|
---
|
|
src/transaction.c | 10 +++-------
|
|
1 files changed, 3 insertions(+), 7 deletions(-)
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index 2a6f1de..c00dd45 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -760,13 +760,9 @@ static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, b
|
|
}
|
|
}
|
|
|
|
- if (unit->job && unit->job->type == type)
|
|
- j = unit->job;
|
|
- else {
|
|
- j = job_new(unit, type);
|
|
- if (!j)
|
|
- return NULL;
|
|
- }
|
|
+ j = job_new(unit, type);
|
|
+ if (!j)
|
|
+ return NULL;
|
|
|
|
j->generation = 0;
|
|
j->marker = NULL;
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 7f60fce10d4c111bcd274d0fcd83a0139b53a9bc Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 00:33:26 +0200
|
|
Subject: [PATCH 12/30] transaction: maintain anchor_job
|
|
|
|
Track which job is the anchor in the transaction.
|
|
(cherry picked from commit b94fbd30781d7533492cec65bf7d86fa19a1a074)
|
|
---
|
|
src/manager.c | 7 +++----
|
|
src/transaction.c | 37 ++++++++++++++++++-------------------
|
|
src/transaction.h | 4 ++--
|
|
3 files changed, 23 insertions(+), 25 deletions(-)
|
|
|
|
diff --git a/src/manager.c b/src/manager.c
|
|
index a129b88..86ec858 100644
|
|
--- a/src/manager.c
|
|
+++ b/src/manager.c
|
|
@@ -631,7 +631,6 @@ int manager_startup(Manager *m, FILE *serialization, FDSet *fds) {
|
|
|
|
int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool override, DBusError *e, Job **_ret) {
|
|
int r;
|
|
- Job *ret;
|
|
Transaction *tr;
|
|
|
|
assert(m);
|
|
@@ -657,7 +656,7 @@ int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool ove
|
|
|
|
r = transaction_add_job_and_dependencies(tr, type, unit, NULL, true, override, false,
|
|
mode == JOB_IGNORE_DEPENDENCIES || mode == JOB_IGNORE_REQUIREMENTS,
|
|
- mode == JOB_IGNORE_DEPENDENCIES, e, &ret);
|
|
+ mode == JOB_IGNORE_DEPENDENCIES, e);
|
|
if (r < 0)
|
|
goto tr_abort;
|
|
|
|
@@ -671,10 +670,10 @@ int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool ove
|
|
if (r < 0)
|
|
goto tr_abort;
|
|
|
|
- log_debug("Enqueued job %s/%s as %u", unit->id, job_type_to_string(type), (unsigned) ret->id);
|
|
+ log_debug("Enqueued job %s/%s as %u", unit->id, job_type_to_string(type), (unsigned) tr->anchor_job->id);
|
|
|
|
if (_ret)
|
|
- *_ret = ret;
|
|
+ *_ret = tr->anchor_job;
|
|
|
|
transaction_free(tr);
|
|
return 0;
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index c00dd45..d7ecfdb 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -834,8 +834,7 @@ int transaction_add_job_and_dependencies(
|
|
bool conflicts,
|
|
bool ignore_requirements,
|
|
bool ignore_order,
|
|
- DBusError *e,
|
|
- Job **_ret) {
|
|
+ DBusError *e) {
|
|
Job *ret;
|
|
JobDependency *l;
|
|
Iterator i;
|
|
@@ -892,8 +891,11 @@ int transaction_add_job_and_dependencies(
|
|
return -ENOMEM;
|
|
|
|
/* If the link has no subject job, it's the anchor link. */
|
|
- if (!by)
|
|
+ if (!by) {
|
|
LIST_PREPEND(JobDependency, subject, tr->anchor, l);
|
|
+ if (!tr->anchor_job)
|
|
+ tr->anchor_job = ret;
|
|
+ }
|
|
|
|
if (is_new && !ignore_requirements) {
|
|
Set *following;
|
|
@@ -902,7 +904,7 @@ int transaction_add_job_and_dependencies(
|
|
* add all dependencies of everybody following. */
|
|
if (unit_following_set(ret->unit, &following) > 0) {
|
|
SET_FOREACH(dep, following, i) {
|
|
- r = transaction_add_job_and_dependencies(tr, type, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, type, dep, ret, false, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
@@ -917,7 +919,7 @@ int transaction_add_job_and_dependencies(
|
|
/* Finally, recursively add in all dependencies. */
|
|
if (type == JOB_START || type == JOB_RELOAD_OR_START) {
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
@@ -928,7 +930,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_BIND_TO], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
@@ -939,7 +941,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, !override, override, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, !override, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
@@ -949,7 +951,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, false, false, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, false, false, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
@@ -959,7 +961,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, true, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
@@ -970,7 +972,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
@@ -980,7 +982,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTS], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, true, override, true, false, ignore_order, e);
|
|
if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
@@ -991,7 +993,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_CONFLICTED_BY], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_STOP, dep, ret, false, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
@@ -1005,7 +1007,7 @@ int transaction_add_job_and_dependencies(
|
|
if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
@@ -1016,7 +1018,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_BOUND_BY], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
if (r != -EBADR)
|
|
goto fail;
|
|
@@ -1030,7 +1032,7 @@ int transaction_add_job_and_dependencies(
|
|
if (type == JOB_RELOAD || type == JOB_RELOAD_OR_START) {
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_PROPAGATE_RELOAD_TO], i) {
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
log_warning("Cannot add dependency reload job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
@@ -1043,9 +1045,6 @@ int transaction_add_job_and_dependencies(
|
|
/* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
|
|
}
|
|
|
|
- if (_ret)
|
|
- *_ret = ret;
|
|
-
|
|
return 0;
|
|
|
|
fail:
|
|
@@ -1078,7 +1077,7 @@ int transaction_add_isolate_jobs(Transaction *tr, Manager *m) {
|
|
if (hashmap_get(tr->jobs, u))
|
|
continue;
|
|
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, NULL, true, false, false, false, false, NULL, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, NULL, true, false, false, false, false, NULL);
|
|
if (r < 0)
|
|
log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->id, strerror(-r));
|
|
}
|
|
diff --git a/src/transaction.h b/src/transaction.h
|
|
index d519ffb..4818cea 100644
|
|
--- a/src/transaction.h
|
|
+++ b/src/transaction.h
|
|
@@ -12,6 +12,7 @@ struct Transaction {
|
|
/* Jobs to be added */
|
|
Hashmap *jobs; /* Unit object => Job object list 1:1 */
|
|
JobDependency *anchor;
|
|
+ Job *anchor_job; /* the job the user asked for */
|
|
};
|
|
|
|
Transaction *transaction_new(void);
|
|
@@ -27,8 +28,7 @@ int transaction_add_job_and_dependencies(
|
|
bool conflicts,
|
|
bool ignore_requirements,
|
|
bool ignore_order,
|
|
- DBusError *e,
|
|
- Job **_ret);
|
|
+ DBusError *e);
|
|
int transaction_activate(Transaction *tr, Manager *m, JobMode mode, DBusError *e);
|
|
int transaction_add_isolate_jobs(Transaction *tr, Manager *m);
|
|
void transaction_abort(Transaction *tr);
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 3020dc5e2b840b8732cfdb81c322a47120a0a42f Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 10:33:37 +0200
|
|
Subject: [PATCH 13/30] transaction: change the linking of isolate jobs to the
|
|
anchor
|
|
|
|
When isolating, the JOB_STOP jobs have no parent job, so they are all peers
|
|
of the real anchor job. This is a bit odd.
|
|
|
|
Link them from the anchor job.
|
|
(cherry picked from commit 4483f694983382b4092e3e295ffb59926cff96d9)
|
|
---
|
|
src/transaction.c | 6 +++---
|
|
1 files changed, 3 insertions(+), 3 deletions(-)
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index d7ecfdb..09abe00 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -893,8 +893,8 @@ int transaction_add_job_and_dependencies(
|
|
/* If the link has no subject job, it's the anchor link. */
|
|
if (!by) {
|
|
LIST_PREPEND(JobDependency, subject, tr->anchor, l);
|
|
- if (!tr->anchor_job)
|
|
- tr->anchor_job = ret;
|
|
+ assert(!tr->anchor_job);
|
|
+ tr->anchor_job = ret;
|
|
}
|
|
|
|
if (is_new && !ignore_requirements) {
|
|
@@ -1077,7 +1077,7 @@ int transaction_add_isolate_jobs(Transaction *tr, Manager *m) {
|
|
if (hashmap_get(tr->jobs, u))
|
|
continue;
|
|
|
|
- r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, NULL, true, false, false, false, false, NULL);
|
|
+ r = transaction_add_job_and_dependencies(tr, JOB_STOP, u, tr->anchor_job, true, false, false, false, false, NULL);
|
|
if (r < 0)
|
|
log_warning("Cannot add isolate job for unit %s, ignoring: %s", u->id, strerror(-r));
|
|
}
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 1ceeee2cf56d60ac37199ba9ebbe02144200857c Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 01:20:14 +0200
|
|
Subject: [PATCH 14/30] transaction: simplify
|
|
transaction_find_jobs_that_matter_to_anchor() (cherry
|
|
picked from commit
|
|
0d9989aa68963037a18fb7ed4f309f6155927d70)
|
|
|
|
---
|
|
src/transaction.c | 19 ++++++-------------
|
|
1 files changed, 6 insertions(+), 13 deletions(-)
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index 09abe00..cac58e6 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -38,22 +38,18 @@ void transaction_abort(Transaction *tr) {
|
|
assert(!tr->anchor);
|
|
}
|
|
|
|
-static void transaction_find_jobs_that_matter_to_anchor(Transaction *tr, Job *j, unsigned generation) {
|
|
+static void transaction_find_jobs_that_matter_to_anchor(Job *j, unsigned generation) {
|
|
JobDependency *l;
|
|
|
|
- assert(tr);
|
|
-
|
|
/* A recursive sweep through the graph that marks all units
|
|
* that matter to the anchor job, i.e. are directly or
|
|
* indirectly a dependency of the anchor job via paths that
|
|
* are fully marked as mattering. */
|
|
|
|
- if (j)
|
|
- l = j->subject_list;
|
|
- else
|
|
- l = tr->anchor;
|
|
+ j->matters_to_anchor = true;
|
|
+ j->generation = generation;
|
|
|
|
- LIST_FOREACH(subject, l, l) {
|
|
+ LIST_FOREACH(subject, l, j->subject_list) {
|
|
|
|
/* This link does not matter */
|
|
if (!l->matters)
|
|
@@ -63,10 +59,7 @@ static void transaction_find_jobs_that_matter_to_anchor(Transaction *tr, Job *j,
|
|
if (l->object->generation == generation)
|
|
continue;
|
|
|
|
- l->object->matters_to_anchor = true;
|
|
- l->object->generation = generation;
|
|
-
|
|
- transaction_find_jobs_that_matter_to_anchor(tr, l->object, generation);
|
|
+ transaction_find_jobs_that_matter_to_anchor(l->object, generation);
|
|
}
|
|
}
|
|
|
|
@@ -659,7 +652,7 @@ int transaction_activate(Transaction *tr, Manager *m, JobMode mode, DBusError *e
|
|
* the actual list of jobs, if possible. */
|
|
|
|
/* First step: figure out which jobs matter */
|
|
- transaction_find_jobs_that_matter_to_anchor(tr, NULL, generation++);
|
|
+ transaction_find_jobs_that_matter_to_anchor(tr->anchor_job, generation++);
|
|
|
|
/* Second step: Try not to stop any running services if
|
|
* we don't have to. Don't try to reverse running
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From ba088fafbe639c1a8e859406e9f82d0d8bffc804 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 02:48:24 +0200
|
|
Subject: [PATCH 15/30] transaction: avoid garbage collecting the anchor job
|
|
|
|
Make sure the anchor job is never considered garbage, even if it has no links
|
|
leading to it (this will be allowed in the next patch).
|
|
(cherry picked from commit 38809d9dfed4c75d9e97c4e5da2ff957723c4cad)
|
|
---
|
|
src/transaction.c | 2 +-
|
|
1 files changed, 1 insertions(+), 1 deletions(-)
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index cac58e6..ddb02c0 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -454,7 +454,7 @@ static void transaction_collect_garbage(Transaction *tr) {
|
|
again = false;
|
|
|
|
HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- if (j->object_list) {
|
|
+ if (tr->anchor_job == j || j->object_list) {
|
|
/* log_debug("Keeping job %s/%s because of %s/%s", */
|
|
/* j->unit->id, job_type_to_string(j->type), */
|
|
/* j->object_list->subject ? j->object_list->subject->unit->id : "root", */
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 436271253850ec21fccfaa435fa2c6ce445721ac Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 02:11:14 +0200
|
|
Subject: [PATCH 16/30] transaction: remove the anchor link
|
|
|
|
tr->anchor_job is sufficient.
|
|
(cherry picked from commit e6eda1f23efab618bb26e7015230d8552b401dc6)
|
|
---
|
|
src/job.c | 12 ------------
|
|
src/job.h | 2 --
|
|
src/transaction.c | 31 ++++++++-----------------------
|
|
src/transaction.h | 1 -
|
|
4 files changed, 8 insertions(+), 38 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index a5ce2d4..6b93b63 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -151,18 +151,6 @@ void job_dump(Job *j, FILE*f, const char *prefix) {
|
|
prefix, yes_no(j->override));
|
|
}
|
|
|
|
-bool job_is_anchor(Job *j) {
|
|
- JobDependency *l;
|
|
-
|
|
- assert(j);
|
|
-
|
|
- LIST_FOREACH(object, l, j->object_list)
|
|
- if (!l->subject)
|
|
- return true;
|
|
-
|
|
- return false;
|
|
-}
|
|
-
|
|
/*
|
|
* Merging is commutative, so imagine the matrix as symmetric. We store only
|
|
* its lower triangle to avoid duplication. We don't store the main diagonal,
|
|
diff --git a/src/job.h b/src/job.h
|
|
index cbd10c5..6b06c2a 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -144,8 +144,6 @@ void job_dump(Job *j, FILE*f, const char *prefix);
|
|
JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts);
|
|
void job_dependency_free(JobDependency *l);
|
|
|
|
-bool job_is_anchor(Job *j);
|
|
-
|
|
int job_merge(Job *j, Job *other);
|
|
|
|
JobType job_type_lookup_merge(JobType a, JobType b);
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index ddb02c0..39cfe54 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -34,8 +34,6 @@ void transaction_abort(Transaction *tr) {
|
|
transaction_delete_job(tr, j, true);
|
|
|
|
assert(hashmap_isempty(tr->jobs));
|
|
-
|
|
- assert(!tr->anchor);
|
|
}
|
|
|
|
static void transaction_find_jobs_that_matter_to_anchor(Job *j, unsigned generation) {
|
|
@@ -287,7 +285,7 @@ static void transaction_drop_redundant(Transaction *tr) {
|
|
|
|
LIST_FOREACH(transaction, k, j) {
|
|
|
|
- if (!job_is_anchor(k) &&
|
|
+ if (tr->anchor_job != k &&
|
|
(k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
|
|
(!k->unit->job || !job_type_is_conflicting(k->type, k->unit->job->type)))
|
|
continue;
|
|
@@ -626,8 +624,6 @@ static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
|
}
|
|
|
|
- assert(!tr->anchor);
|
|
-
|
|
return 0;
|
|
|
|
rollback:
|
|
@@ -726,7 +722,6 @@ int transaction_activate(Transaction *tr, Manager *m, JobMode mode, DBusError *e
|
|
}
|
|
|
|
assert(hashmap_isempty(tr->jobs));
|
|
- assert(!tr->anchor);
|
|
|
|
return 0;
|
|
}
|
|
@@ -778,12 +773,6 @@ static Job* transaction_add_one_job(Transaction *tr, JobType type, Unit *unit, b
|
|
return j;
|
|
}
|
|
|
|
-static void transaction_job_dependency_free(Transaction *tr, JobDependency *l) {
|
|
- if (!l->subject)
|
|
- LIST_REMOVE(JobDependency, subject, tr->anchor, l);
|
|
- job_dependency_free(l);
|
|
-}
|
|
-
|
|
static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependencies) {
|
|
assert(tr);
|
|
assert(j);
|
|
@@ -801,12 +790,12 @@ static void transaction_unlink_job(Transaction *tr, Job *j, bool delete_dependen
|
|
j->transaction_prev = j->transaction_next = NULL;
|
|
|
|
while (j->subject_list)
|
|
- transaction_job_dependency_free(tr, j->subject_list);
|
|
+ job_dependency_free(j->subject_list);
|
|
|
|
while (j->object_list) {
|
|
Job *other = j->object_list->matters ? j->object_list->subject : NULL;
|
|
|
|
- transaction_job_dependency_free(tr, j->object_list);
|
|
+ job_dependency_free(j->object_list);
|
|
|
|
if (other && delete_dependencies) {
|
|
log_debug("Deleting job %s/%s as dependency of job %s/%s",
|
|
@@ -829,7 +818,6 @@ int transaction_add_job_and_dependencies(
|
|
bool ignore_order,
|
|
DBusError *e) {
|
|
Job *ret;
|
|
- JobDependency *l;
|
|
Iterator i;
|
|
Unit *dep;
|
|
int r;
|
|
@@ -879,17 +867,14 @@ int transaction_add_job_and_dependencies(
|
|
ret->ignore_order = ret->ignore_order || ignore_order;
|
|
|
|
/* Then, add a link to the job. */
|
|
- l = job_dependency_new(by, ret, matters, conflicts);
|
|
- if (!l)
|
|
- return -ENOMEM;
|
|
-
|
|
- /* If the link has no subject job, it's the anchor link. */
|
|
- if (!by) {
|
|
- LIST_PREPEND(JobDependency, subject, tr->anchor, l);
|
|
+ if (by) {
|
|
+ if (!job_dependency_new(by, ret, matters, conflicts))
|
|
+ return -ENOMEM;
|
|
+ } else {
|
|
+ /* If the job has no parent job, it is the anchor job. */
|
|
assert(!tr->anchor_job);
|
|
tr->anchor_job = ret;
|
|
}
|
|
-
|
|
if (is_new && !ignore_requirements) {
|
|
Set *following;
|
|
|
|
diff --git a/src/transaction.h b/src/transaction.h
|
|
index 4818cea..74d7461 100644
|
|
--- a/src/transaction.h
|
|
+++ b/src/transaction.h
|
|
@@ -11,7 +11,6 @@ typedef struct Transaction Transaction;
|
|
struct Transaction {
|
|
/* Jobs to be added */
|
|
Hashmap *jobs; /* Unit object => Job object list 1:1 */
|
|
- JobDependency *anchor;
|
|
Job *anchor_job; /* the job the user asked for */
|
|
};
|
|
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From cdfe041c84fb813d883de58003e4a986ebde35e6 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Thu, 19 Apr 2012 23:56:26 +0200
|
|
Subject: [PATCH 17/30] transaction: remove a couple of asserts
|
|
|
|
We already asserted these facts in the previous loop.
|
|
(cherry picked from commit f1c2bdca422dba1bc5615f72662dee5ce69c147b)
|
|
---
|
|
src/transaction.c | 3 ---
|
|
1 files changed, 0 insertions(+), 3 deletions(-)
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index 39cfe54..41f7b82 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -611,9 +611,6 @@ static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
/* We're fully installed. Now let's free data we don't
|
|
* need anymore. */
|
|
|
|
- assert(!j->transaction_next);
|
|
- assert(!j->transaction_prev);
|
|
-
|
|
/* Clean the job dependencies */
|
|
transaction_unlink_job(tr, j, false);
|
|
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From ff3ec7f35ab49a1e306f90b33866f4dc7fba282f Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 02:04:01 +0200
|
|
Subject: [PATCH 18/30] job: separate job_install()
|
|
|
|
Let the jobs install themselves.
|
|
(cherry picked from commit 05d576f1f77699ea5c2e5ed0e04451b14dfc45a0)
|
|
---
|
|
src/job.c | 44 +++++++++++++++++++++++++++++---------------
|
|
src/job.h | 3 ++-
|
|
src/transaction.c | 18 ++----------------
|
|
3 files changed, 33 insertions(+), 32 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index 6b93b63..c5bf9cd 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -54,21 +54,6 @@ Job* job_new(Unit *unit, JobType type) {
|
|
return j;
|
|
}
|
|
|
|
-void job_uninstall(Job *j) {
|
|
- assert(j->installed);
|
|
- /* Detach from next 'bigger' objects */
|
|
-
|
|
- bus_job_send_removed_signal(j);
|
|
-
|
|
- if (j->unit->job == j) {
|
|
- j->unit->job = NULL;
|
|
- unit_add_to_gc_queue(j->unit);
|
|
- }
|
|
-
|
|
- hashmap_remove(j->manager->jobs, UINT32_TO_PTR(j->id));
|
|
- j->installed = false;
|
|
-}
|
|
-
|
|
void job_free(Job *j) {
|
|
assert(j);
|
|
assert(!j->installed);
|
|
@@ -96,6 +81,35 @@ void job_free(Job *j) {
|
|
free(j);
|
|
}
|
|
|
|
+void job_uninstall(Job *j) {
|
|
+ assert(j->installed);
|
|
+ /* Detach from next 'bigger' objects */
|
|
+
|
|
+ bus_job_send_removed_signal(j);
|
|
+
|
|
+ if (j->unit->job == j) {
|
|
+ j->unit->job = NULL;
|
|
+ unit_add_to_gc_queue(j->unit);
|
|
+ }
|
|
+
|
|
+ hashmap_remove(j->manager->jobs, UINT32_TO_PTR(j->id));
|
|
+ j->installed = false;
|
|
+}
|
|
+
|
|
+void job_install(Job *j) {
|
|
+ Job *uj = j->unit->job;
|
|
+
|
|
+ if (uj) {
|
|
+ job_uninstall(uj);
|
|
+ job_free(uj);
|
|
+ }
|
|
+
|
|
+ j->unit->job = j;
|
|
+ j->installed = true;
|
|
+ j->manager->n_installed_jobs ++;
|
|
+ log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
|
+}
|
|
+
|
|
JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts) {
|
|
JobDependency *l;
|
|
|
|
diff --git a/src/job.h b/src/job.h
|
|
index 6b06c2a..8fa9046 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -137,8 +137,9 @@ struct Job {
|
|
};
|
|
|
|
Job* job_new(Unit *unit, JobType type);
|
|
-void job_uninstall(Job *j);
|
|
void job_free(Job *job);
|
|
+void job_install(Job *j);
|
|
+void job_uninstall(Job *j);
|
|
void job_dump(Job *j, FILE*f, const char *prefix);
|
|
|
|
JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts);
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index 41f7b82..d495cbd 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -592,33 +592,19 @@ static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
}
|
|
|
|
while ((j = hashmap_steal_first(tr->jobs))) {
|
|
- Job *uj;
|
|
if (j->installed) {
|
|
/* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
|
|
continue;
|
|
}
|
|
|
|
- uj = j->unit->job;
|
|
- if (uj) {
|
|
- job_uninstall(uj);
|
|
- job_free(uj);
|
|
- }
|
|
-
|
|
- j->unit->job = j;
|
|
- j->installed = true;
|
|
- m->n_installed_jobs ++;
|
|
-
|
|
- /* We're fully installed. Now let's free data we don't
|
|
- * need anymore. */
|
|
-
|
|
/* Clean the job dependencies */
|
|
transaction_unlink_job(tr, j, false);
|
|
|
|
+ job_install(j);
|
|
+
|
|
job_add_to_run_queue(j);
|
|
job_add_to_dbus_queue(j);
|
|
job_start_timer(j);
|
|
-
|
|
- log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
|
}
|
|
|
|
return 0;
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 78c1339da1446bac0f4a5fbb0f1add66a6246bec Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 09:38:43 +0200
|
|
Subject: [PATCH 19/30] transaction: rework merging with installed jobs
|
|
|
|
Previously transactions could reference installed jobs. It made some issues
|
|
difficult to fix.
|
|
|
|
This sets new rules for jobs:
|
|
A job cannot be both a member of a transaction and installed. When jobs are
|
|
created, they are linked to a transaction. The whole transaction is constructed
|
|
(with merging of jobs within, etc.). When it's complete, all the jobs are
|
|
unlinked from it one by one and let to install themselves. It is during the
|
|
installation when merging with previously installed jobs (from older
|
|
transactions) is contemplated.
|
|
|
|
Merging with installed jobs has different rules than merging within a
|
|
transaction:
|
|
- An installed conflicting job gets cancelled. It cannot be simply deleted,
|
|
because someone might be waiting for its completion on DBus.
|
|
- An installed, but still waiting, job can be safely merged into.
|
|
- An installed and running job can be tricky. For some job types it is safe to
|
|
just merge. For the other types we merge anyway, but put the job back into
|
|
JOB_WAITING to allow it to run again. This may be suboptimal, but it is not
|
|
currently possible to have more than one installed job for a unit.
|
|
|
|
Note this also fixes a bug where the anchor job could be deleted during merging
|
|
within the transaction.
|
|
(cherry picked from commit 656bbffc6c45bdd8d5c28a96ca948ba16c546547)
|
|
---
|
|
src/job.c | 58 ++++++++++++++++++++++++++++++++++++++++++++++++++--
|
|
src/job.h | 2 +-
|
|
src/transaction.c | 21 +++++++++++--------
|
|
3 files changed, 68 insertions(+), 13 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index c5bf9cd..e6737df 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -96,18 +96,70 @@ void job_uninstall(Job *j) {
|
|
j->installed = false;
|
|
}
|
|
|
|
-void job_install(Job *j) {
|
|
+static bool job_type_allows_late_merge(JobType t) {
|
|
+ /* Tells whether it is OK to merge a job of type 't' with an already
|
|
+ * running job.
|
|
+ * Reloads cannot be merged this way. Think of the sequence:
|
|
+ * 1. Reload of a daemon is in progress; the daemon has already loaded
|
|
+ * its config file, but hasn't completed the reload operation yet.
|
|
+ * 2. Edit foo's config file.
|
|
+ * 3. Trigger another reload to have the daemon use the new config.
|
|
+ * Should the second reload job be merged into the first one, the daemon
|
|
+ * would not know about the new config.
|
|
+ * JOB_RESTART jobs on the other hand can be merged, because they get
|
|
+ * patched into JOB_START after stopping the unit. So if we see a
|
|
+ * JOB_RESTART running, it means the unit hasn't stopped yet and at
|
|
+ * this time the merge is still allowed. */
|
|
+ return !(t == JOB_RELOAD || t == JOB_RELOAD_OR_START);
|
|
+}
|
|
+
|
|
+static void job_merge_into_installed(Job *j, Job *other) {
|
|
+ assert(j->installed);
|
|
+ assert(j->unit == other->unit);
|
|
+
|
|
+ j->type = job_type_lookup_merge(j->type, other->type);
|
|
+ assert(j->type >= 0);
|
|
+
|
|
+ j->override = j->override || other->override;
|
|
+}
|
|
+
|
|
+Job* job_install(Job *j) {
|
|
Job *uj = j->unit->job;
|
|
|
|
+ assert(!j->installed);
|
|
+
|
|
if (uj) {
|
|
- job_uninstall(uj);
|
|
- job_free(uj);
|
|
+ if (job_type_is_conflicting(uj->type, j->type))
|
|
+ job_finish_and_invalidate(uj, JOB_CANCELED);
|
|
+ else {
|
|
+ /* not conflicting, i.e. mergeable */
|
|
+
|
|
+ if (uj->state == JOB_WAITING ||
|
|
+ (job_type_allows_late_merge(j->type) && job_type_is_superset(uj->type, j->type))) {
|
|
+ job_merge_into_installed(uj, j);
|
|
+ log_debug("Merged into installed job %s/%s as %u",
|
|
+ uj->unit->id, job_type_to_string(uj->type), (unsigned) uj->id);
|
|
+ return uj;
|
|
+ } else {
|
|
+ /* already running and not safe to merge into */
|
|
+ /* Patch uj to become a merged job and re-run it. */
|
|
+ /* XXX It should be safer to queue j to run after uj finishes, but it is
|
|
+ * not currently possible to have more than one installed job per unit. */
|
|
+ job_merge_into_installed(uj, j);
|
|
+ log_debug("Merged into running job, re-running: %s/%s as %u",
|
|
+ uj->unit->id, job_type_to_string(uj->type), (unsigned) uj->id);
|
|
+ uj->state = JOB_WAITING;
|
|
+ return uj;
|
|
+ }
|
|
+ }
|
|
}
|
|
|
|
+ /* Install the job */
|
|
j->unit->job = j;
|
|
j->installed = true;
|
|
j->manager->n_installed_jobs ++;
|
|
log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
|
+ return j;
|
|
}
|
|
|
|
JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts) {
|
|
diff --git a/src/job.h b/src/job.h
|
|
index 8fa9046..eab0e07 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -138,7 +138,7 @@ struct Job {
|
|
|
|
Job* job_new(Unit *unit, JobType type);
|
|
void job_free(Job *job);
|
|
-void job_install(Job *j);
|
|
+Job* job_install(Job *j);
|
|
void job_uninstall(Job *j);
|
|
void job_dump(Job *j, FILE*f, const char *prefix);
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index d495cbd..aa0cedf 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -243,21 +243,14 @@ static int transaction_merge_jobs(Transaction *tr, DBusError *e) {
|
|
LIST_FOREACH(transaction, k, j->transaction_next)
|
|
assert_se(job_type_merge(&t, k->type) == 0);
|
|
|
|
- /* If an active job is mergeable, merge it too */
|
|
- if (j->unit->job)
|
|
- job_type_merge(&t, j->unit->job->type); /* Might fail. Which is OK */
|
|
-
|
|
while ((k = j->transaction_next)) {
|
|
- if (j->installed) {
|
|
+ if (tr->anchor_job == k) {
|
|
transaction_merge_and_delete_job(tr, k, j, t);
|
|
j = k;
|
|
} else
|
|
transaction_merge_and_delete_job(tr, j, k, t);
|
|
}
|
|
|
|
- if (j->unit->job && !j->installed)
|
|
- transaction_merge_and_delete_job(tr, j, j->unit->job, t);
|
|
-
|
|
assert(!j->transaction_next);
|
|
assert(!j->transaction_prev);
|
|
}
|
|
@@ -592,6 +585,8 @@ static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
}
|
|
|
|
while ((j = hashmap_steal_first(tr->jobs))) {
|
|
+ Job *installed_job;
|
|
+
|
|
if (j->installed) {
|
|
/* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
|
|
continue;
|
|
@@ -600,7 +595,15 @@ static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
/* Clean the job dependencies */
|
|
transaction_unlink_job(tr, j, false);
|
|
|
|
- job_install(j);
|
|
+ installed_job = job_install(j);
|
|
+ if (installed_job != j) {
|
|
+ /* j has been merged into a previously installed job */
|
|
+ if (tr->anchor_job == j)
|
|
+ tr->anchor_job = installed_job;
|
|
+ hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
|
|
+ job_free(j);
|
|
+ j = installed_job;
|
|
+ }
|
|
|
|
job_add_to_run_queue(j);
|
|
job_add_to_dbus_queue(j);
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 3d47dab6ed99778f9812ec4039135feb950f02da Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 10:02:05 +0200
|
|
Subject: [PATCH 20/30] transaction: remove checks for installed
|
|
|
|
Transactions cannot contain installed jobs anymore. Remove the now pointless
|
|
checks.
|
|
(cherry picked from commit d6a093d098054b6fe866441251ad9485c9e31584)
|
|
---
|
|
src/job.c | 7 +++----
|
|
src/transaction.c | 21 +++------------------
|
|
2 files changed, 6 insertions(+), 22 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index e6737df..fb759a5 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -83,14 +83,13 @@ void job_free(Job *j) {
|
|
|
|
void job_uninstall(Job *j) {
|
|
assert(j->installed);
|
|
+ assert(j->unit->job == j);
|
|
/* Detach from next 'bigger' objects */
|
|
|
|
bus_job_send_removed_signal(j);
|
|
|
|
- if (j->unit->job == j) {
|
|
- j->unit->job = NULL;
|
|
- unit_add_to_gc_queue(j->unit);
|
|
- }
|
|
+ j->unit->job = NULL;
|
|
+ unit_add_to_gc_queue(j->unit);
|
|
|
|
hashmap_remove(j->manager->jobs, UINT32_TO_PTR(j->id));
|
|
j->installed = false;
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index aa0cedf..c3e1e13 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -11,8 +11,7 @@ static void transaction_delete_job(Transaction *tr, Job *j, bool delete_dependen
|
|
|
|
transaction_unlink_job(tr, j, delete_dependencies);
|
|
|
|
- if (!j->installed)
|
|
- job_free(j);
|
|
+ job_free(j);
|
|
}
|
|
|
|
static void transaction_delete_unit(Transaction *tr, Unit *u) {
|
|
@@ -279,7 +278,7 @@ static void transaction_drop_redundant(Transaction *tr) {
|
|
LIST_FOREACH(transaction, k, j) {
|
|
|
|
if (tr->anchor_job != k &&
|
|
- (k->installed || job_type_is_redundant(k->type, unit_active_state(k->unit))) &&
|
|
+ job_type_is_redundant(k->type, unit_active_state(k->unit)) &&
|
|
(!k->unit->job || !job_type_is_conflicting(k->type, k->unit->job->type)))
|
|
continue;
|
|
|
|
@@ -349,7 +348,6 @@ static int transaction_verify_order_one(Transaction *tr, Job *j, Job *from, unsi
|
|
log_info("Walked on cycle path to %s/%s", k->unit->id, job_type_to_string(k->type));
|
|
|
|
if (!delete &&
|
|
- !k->installed &&
|
|
!unit_matters_to_anchor(k->unit, k)) {
|
|
/* Ok, we can drop this one, so let's
|
|
* do so. */
|
|
@@ -478,7 +476,6 @@ static int transaction_is_destructive(Transaction *tr, DBusError *e) {
|
|
assert(!j->transaction_next);
|
|
|
|
if (j->unit->job &&
|
|
- j->unit->job != j &&
|
|
!job_type_is_superset(j->type, j->unit->job->type)) {
|
|
|
|
dbus_set_error(e, BUS_ERROR_TRANSACTION_IS_DESTRUCTIVE, "Transaction is destructive.");
|
|
@@ -576,9 +573,6 @@ static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
assert(!j->transaction_prev);
|
|
assert(!j->transaction_next);
|
|
|
|
- if (j->installed)
|
|
- continue;
|
|
-
|
|
r = hashmap_put(m->jobs, UINT32_TO_PTR(j->id), j);
|
|
if (r < 0)
|
|
goto rollback;
|
|
@@ -587,11 +581,6 @@ static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
while ((j = hashmap_steal_first(tr->jobs))) {
|
|
Job *installed_job;
|
|
|
|
- if (j->installed) {
|
|
- /* log_debug("Skipping already installed job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id); */
|
|
- continue;
|
|
- }
|
|
-
|
|
/* Clean the job dependencies */
|
|
transaction_unlink_job(tr, j, false);
|
|
|
|
@@ -614,12 +603,8 @@ static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
|
|
rollback:
|
|
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- if (j->installed)
|
|
- continue;
|
|
-
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i)
|
|
hashmap_remove(m->jobs, UINT32_TO_PTR(j->id));
|
|
- }
|
|
|
|
return r;
|
|
}
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 55284fc0ba28bf10d4fa535a86921bad23505c96 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 12:28:31 +0200
|
|
Subject: [PATCH 21/30] dbus-job: allow multiple bus clients
|
|
|
|
Merging of jobs can result in more than one client being interested in a job.
|
|
(cherry picked from commit 97e6a11996855800f68dc41c13537784198c8b61)
|
|
---
|
|
src/dbus-job.c | 146 +++++++++++++++++++++++++++++++---------------------
|
|
src/dbus-manager.c | 6 ++-
|
|
src/dbus.c | 14 +++--
|
|
src/job.c | 21 +++++++-
|
|
src/job.h | 15 ++++-
|
|
5 files changed, 131 insertions(+), 71 deletions(-)
|
|
|
|
diff --git a/src/dbus-job.c b/src/dbus-job.c
|
|
index ab6d610..aa1b4eb 100644
|
|
--- a/src/dbus-job.c
|
|
+++ b/src/dbus-job.c
|
|
@@ -225,61 +225,71 @@ const DBusObjectPathVTable bus_job_vtable = {
|
|
.message_function = bus_job_message_handler
|
|
};
|
|
|
|
-static int job_send_message(Job *j, DBusMessage *m) {
|
|
+static int job_send_message(Job *j, DBusMessage* (*new_message)(Job *j)) {
|
|
+ DBusMessage *m = NULL;
|
|
int r;
|
|
|
|
assert(j);
|
|
- assert(m);
|
|
+ assert(new_message);
|
|
|
|
if (bus_has_subscriber(j->manager)) {
|
|
- if ((r = bus_broadcast(j->manager, m)) < 0)
|
|
+ m = new_message(j);
|
|
+ if (!m)
|
|
+ goto oom;
|
|
+ r = bus_broadcast(j->manager, m);
|
|
+ dbus_message_unref(m);
|
|
+ if (r < 0)
|
|
return r;
|
|
|
|
- } else if (j->bus_client) {
|
|
+ } else {
|
|
/* If nobody is subscribed, we just send the message
|
|
- * to the client which created the job */
|
|
+ * to the client(s) which created the job */
|
|
+ JobBusClient *cl;
|
|
+ assert(j->bus_client_list);
|
|
+ LIST_FOREACH(client, cl, j->bus_client_list) {
|
|
+ assert(cl->bus);
|
|
|
|
- assert(j->bus);
|
|
+ m = new_message(j);
|
|
+ if (!m)
|
|
+ goto oom;
|
|
|
|
- if (!dbus_message_set_destination(m, j->bus_client))
|
|
- return -ENOMEM;
|
|
+ if (!dbus_message_set_destination(m, cl->name))
|
|
+ goto oom;
|
|
+
|
|
+ if (!dbus_connection_send(cl->bus, m, NULL))
|
|
+ goto oom;
|
|
|
|
- if (!dbus_connection_send(j->bus, m, NULL))
|
|
- return -ENOMEM;
|
|
+ dbus_message_unref(m);
|
|
+ m = NULL;
|
|
+ }
|
|
}
|
|
|
|
return 0;
|
|
+oom:
|
|
+ if (m)
|
|
+ dbus_message_unref(m);
|
|
+ return -ENOMEM;
|
|
}
|
|
|
|
-void bus_job_send_change_signal(Job *j) {
|
|
- char *p = NULL;
|
|
+static DBusMessage* new_change_signal_message(Job *j) {
|
|
DBusMessage *m = NULL;
|
|
+ char *p = NULL;
|
|
|
|
- assert(j);
|
|
-
|
|
- if (j->in_dbus_queue) {
|
|
- LIST_REMOVE(Job, dbus_queue, j->manager->dbus_job_queue, j);
|
|
- j->in_dbus_queue = false;
|
|
- }
|
|
-
|
|
- if (!bus_has_subscriber(j->manager) && !j->bus_client) {
|
|
- j->sent_dbus_new_signal = true;
|
|
- return;
|
|
- }
|
|
-
|
|
- if (!(p = job_dbus_path(j)))
|
|
+ p = job_dbus_path(j);
|
|
+ if (!p)
|
|
goto oom;
|
|
|
|
if (j->sent_dbus_new_signal) {
|
|
/* Send a properties changed signal */
|
|
-
|
|
- if (!(m = bus_properties_changed_new(p, "org.freedesktop.systemd1.Job", INVALIDATING_PROPERTIES)))
|
|
+ m = bus_properties_changed_new(p, "org.freedesktop.systemd1.Job", INVALIDATING_PROPERTIES);
|
|
+ if (!m)
|
|
goto oom;
|
|
|
|
} else {
|
|
/* Send a new signal */
|
|
|
|
- if (!(m = dbus_message_new_signal("/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager", "JobNew")))
|
|
+ m = dbus_message_new_signal("/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager", "JobNew");
|
|
+ if (!m)
|
|
goto oom;
|
|
|
|
if (!dbus_message_append_args(m,
|
|
@@ -289,42 +299,26 @@ void bus_job_send_change_signal(Job *j) {
|
|
goto oom;
|
|
}
|
|
|
|
- if (job_send_message(j, m) < 0)
|
|
- goto oom;
|
|
-
|
|
- free(p);
|
|
- dbus_message_unref(m);
|
|
-
|
|
- j->sent_dbus_new_signal = true;
|
|
-
|
|
- return;
|
|
+ return m;
|
|
|
|
oom:
|
|
- free(p);
|
|
-
|
|
if (m)
|
|
dbus_message_unref(m);
|
|
-
|
|
- log_error("Failed to allocate job change signal.");
|
|
+ free(p);
|
|
+ return NULL;
|
|
}
|
|
|
|
-void bus_job_send_removed_signal(Job *j) {
|
|
- char *p = NULL;
|
|
+static DBusMessage* new_removed_signal_message(Job *j) {
|
|
DBusMessage *m = NULL;
|
|
+ char *p = NULL;
|
|
const char *r;
|
|
|
|
- assert(j);
|
|
-
|
|
- if (!bus_has_subscriber(j->manager) && !j->bus_client)
|
|
- return;
|
|
-
|
|
- if (!j->sent_dbus_new_signal)
|
|
- bus_job_send_change_signal(j);
|
|
-
|
|
- if (!(p = job_dbus_path(j)))
|
|
+ p = job_dbus_path(j);
|
|
+ if (!p)
|
|
goto oom;
|
|
|
|
- if (!(m = dbus_message_new_signal("/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager", "JobRemoved")))
|
|
+ m = dbus_message_new_signal("/org/freedesktop/systemd1", "org.freedesktop.systemd1.Manager", "JobRemoved");
|
|
+ if (!m)
|
|
goto oom;
|
|
|
|
r = job_result_to_string(j->result);
|
|
@@ -336,19 +330,53 @@ void bus_job_send_removed_signal(Job *j) {
|
|
DBUS_TYPE_INVALID))
|
|
goto oom;
|
|
|
|
- if (job_send_message(j, m) < 0)
|
|
- goto oom;
|
|
+ return m;
|
|
|
|
+oom:
|
|
+ if (m)
|
|
+ dbus_message_unref(m);
|
|
free(p);
|
|
- dbus_message_unref(m);
|
|
+ return NULL;
|
|
+}
|
|
+
|
|
+void bus_job_send_change_signal(Job *j) {
|
|
+ assert(j);
|
|
+
|
|
+ if (j->in_dbus_queue) {
|
|
+ LIST_REMOVE(Job, dbus_queue, j->manager->dbus_job_queue, j);
|
|
+ j->in_dbus_queue = false;
|
|
+ }
|
|
+
|
|
+ if (!bus_has_subscriber(j->manager) && !j->bus_client_list) {
|
|
+ j->sent_dbus_new_signal = true;
|
|
+ return;
|
|
+ }
|
|
+
|
|
+ if (job_send_message(j, new_change_signal_message) < 0)
|
|
+ goto oom;
|
|
+
|
|
+ j->sent_dbus_new_signal = true;
|
|
|
|
return;
|
|
|
|
oom:
|
|
- free(p);
|
|
+ log_error("Failed to allocate job change signal.");
|
|
+}
|
|
|
|
- if (m)
|
|
- dbus_message_unref(m);
|
|
+void bus_job_send_removed_signal(Job *j) {
|
|
+ assert(j);
|
|
+
|
|
+ if (!bus_has_subscriber(j->manager) && !j->bus_client_list)
|
|
+ return;
|
|
+
|
|
+ if (!j->sent_dbus_new_signal)
|
|
+ bus_job_send_change_signal(j);
|
|
|
|
+ if (job_send_message(j, new_removed_signal_message) < 0)
|
|
+ goto oom;
|
|
+
|
|
+ return;
|
|
+
|
|
+oom:
|
|
log_error("Failed to allocate job remove signal.");
|
|
}
|
|
diff --git a/src/dbus-manager.c b/src/dbus-manager.c
|
|
index 6d272cb..0b78375 100644
|
|
--- a/src/dbus-manager.c
|
|
+++ b/src/dbus-manager.c
|
|
@@ -1435,6 +1435,7 @@ static DBusHandlerResult bus_manager_message_handler(DBusConnection *connection,
|
|
const char *name, *smode, *old_name = NULL;
|
|
JobMode mode;
|
|
Job *j;
|
|
+ JobBusClient *cl;
|
|
Unit *u;
|
|
bool b;
|
|
|
|
@@ -1492,10 +1493,11 @@ static DBusHandlerResult bus_manager_message_handler(DBusConnection *connection,
|
|
if ((r = manager_add_job(m, job_type, u, mode, true, &error, &j)) < 0)
|
|
return bus_send_error_reply(connection, message, &error, r);
|
|
|
|
- if (!(j->bus_client = strdup(message_get_sender_with_fallback(message))))
|
|
+ cl = job_bus_client_new(connection, message_get_sender_with_fallback(message));
|
|
+ if (!cl)
|
|
goto oom;
|
|
|
|
- j->bus = connection;
|
|
+ LIST_PREPEND(JobBusClient, client, j->bus_client_list, cl);
|
|
|
|
if (!(reply = dbus_message_new_method_return(message)))
|
|
goto oom;
|
|
diff --git a/src/dbus.c b/src/dbus.c
|
|
index 8e6e9fd..50e8c9c 100644
|
|
--- a/src/dbus.c
|
|
+++ b/src/dbus.c
|
|
@@ -1166,13 +1166,15 @@ static void shutdown_connection(Manager *m, DBusConnection *c) {
|
|
Job *j;
|
|
Iterator i;
|
|
|
|
- HASHMAP_FOREACH(j, m->jobs, i)
|
|
- if (j->bus == c) {
|
|
- free(j->bus_client);
|
|
- j->bus_client = NULL;
|
|
-
|
|
- j->bus = NULL;
|
|
+ HASHMAP_FOREACH(j, m->jobs, i) {
|
|
+ JobBusClient *cl, *nextcl;
|
|
+ LIST_FOREACH_SAFE(client, cl, nextcl, j->bus_client_list) {
|
|
+ if (cl->bus == c) {
|
|
+ LIST_REMOVE(JobBusClient, client, j->bus_client_list, cl);
|
|
+ free(cl);
|
|
+ }
|
|
}
|
|
+ }
|
|
|
|
set_remove(m->bus_connections, c);
|
|
set_remove(m->bus_connections_for_dispatch, c);
|
|
diff --git a/src/job.c b/src/job.c
|
|
index fb759a5..de74751 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -33,6 +33,20 @@
|
|
#include "log.h"
|
|
#include "dbus-job.h"
|
|
|
|
+JobBusClient* job_bus_client_new(DBusConnection *connection, const char *name) {
|
|
+ JobBusClient *cl;
|
|
+ size_t name_len;
|
|
+
|
|
+ name_len = strlen(name);
|
|
+ cl = malloc0(sizeof(JobBusClient) + name_len + 1);
|
|
+ if (!cl)
|
|
+ return NULL;
|
|
+
|
|
+ cl->bus = connection;
|
|
+ memcpy(cl->name, name, name_len + 1);
|
|
+ return cl;
|
|
+}
|
|
+
|
|
Job* job_new(Unit *unit, JobType type) {
|
|
Job *j;
|
|
|
|
@@ -55,6 +69,8 @@ Job* job_new(Unit *unit, JobType type) {
|
|
}
|
|
|
|
void job_free(Job *j) {
|
|
+ JobBusClient *cl;
|
|
+
|
|
assert(j);
|
|
assert(!j->installed);
|
|
assert(!j->transaction_prev);
|
|
@@ -77,7 +93,10 @@ void job_free(Job *j) {
|
|
close_nointr_nofail(j->timer_watch.fd);
|
|
}
|
|
|
|
- free(j->bus_client);
|
|
+ while ((cl = j->bus_client_list)) {
|
|
+ LIST_REMOVE(JobBusClient, client, j->bus_client_list, cl);
|
|
+ free(cl);
|
|
+ }
|
|
free(j);
|
|
}
|
|
|
|
diff --git a/src/job.h b/src/job.h
|
|
index eab0e07..2025b5b 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -28,6 +28,7 @@
|
|
|
|
typedef struct Job Job;
|
|
typedef struct JobDependency JobDependency;
|
|
+typedef struct JobBusClient JobBusClient;
|
|
typedef enum JobType JobType;
|
|
typedef enum JobState JobState;
|
|
typedef enum JobMode JobMode;
|
|
@@ -99,6 +100,13 @@ struct JobDependency {
|
|
bool conflicts;
|
|
};
|
|
|
|
+struct JobBusClient {
|
|
+ LIST_FIELDS(JobBusClient, client);
|
|
+ /* Note that this bus object is not ref counted here. */
|
|
+ DBusConnection *bus;
|
|
+ char name[0];
|
|
+};
|
|
+
|
|
struct Job {
|
|
Manager *manager;
|
|
Unit *unit;
|
|
@@ -121,9 +129,8 @@ struct Job {
|
|
|
|
Watch timer_watch;
|
|
|
|
- /* Note that this bus object is not ref counted here. */
|
|
- DBusConnection *bus;
|
|
- char *bus_client;
|
|
+ /* There can be more than one client, because of job merging. */
|
|
+ LIST_HEAD(JobBusClient, bus_client_list);
|
|
|
|
JobResult result;
|
|
|
|
@@ -136,6 +143,8 @@ struct Job {
|
|
bool ignore_order:1;
|
|
};
|
|
|
|
+JobBusClient* job_bus_client_new(DBusConnection *connection, const char *name);
|
|
+
|
|
Job* job_new(Unit *unit, JobType type);
|
|
void job_free(Job *job);
|
|
Job* job_install(Job *j);
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 45aca411e44be019c9ba1bc1500e0b7df73bbf75 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Fri, 20 Apr 2012 14:44:00 +0200
|
|
Subject: [PATCH 22/30] transaction: add starting requirements for JOB_RESTART
|
|
|
|
While having a Requires= dependency between units, the dependency is started
|
|
automatically on "systemctl start", but it's not started on "systemctl
|
|
restart".
|
|
|
|
JOB_RESTART jobs did not pull the dependencies for starting into the
|
|
transaction.
|
|
|
|
https://bugzilla.redhat.com/show_bug.cgi?id=802770
|
|
|
|
Note that the other bug noted in comment #2 has been fixed already by avoiding
|
|
the deletion of anchor jobs.
|
|
(cherry picked from commit 65304075249449a713b4e4842b8538ef4aa1c725)
|
|
---
|
|
src/transaction.c | 2 +-
|
|
1 files changed, 1 insertions(+), 1 deletions(-)
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index c3e1e13..a2efcbc 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -866,7 +866,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
/* Finally, recursively add in all dependencies. */
|
|
- if (type == JOB_START || type == JOB_RELOAD_OR_START) {
|
|
+ if (type == JOB_START || type == JOB_RELOAD_OR_START || type == JOB_RESTART) {
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i) {
|
|
r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 029699f2de954461054efb21e91994ca6e9df46a Mon Sep 17 00:00:00 2001
|
|
From: Lennart Poettering <lennart@poettering.net>
|
|
Date: Sun, 22 Apr 2012 15:22:52 +0200
|
|
Subject: [PATCH 23/30] transaction: downgrade warnings about masked units
|
|
(cherry picked from commit
|
|
59e132a7f416d7c4a33a46d791f250e03d2c2cd0)
|
|
|
|
---
|
|
src/transaction.c | 11 +++++++----
|
|
1 files changed, 7 insertions(+), 4 deletions(-)
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index a2efcbc..a36f1df 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -822,7 +822,7 @@ int transaction_add_job_and_dependencies(
|
|
|
|
if (type != JOB_STOP && unit->load_state == UNIT_MASKED) {
|
|
dbus_set_error(e, BUS_ERROR_MASKED, "Unit %s is masked.", unit->id);
|
|
- return -EINVAL;
|
|
+ return -EADDRNOTAVAIL;
|
|
}
|
|
|
|
if (!unit_job_is_applicable(unit, type)) {
|
|
@@ -892,7 +892,8 @@ int transaction_add_job_and_dependencies(
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES_OVERRIDABLE], i) {
|
|
r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, !override, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
- log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
+ log_full(r == -EADDRNOTAVAIL ? LOG_DEBUG : LOG_WARNING,
|
|
+ "Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
@@ -902,7 +903,8 @@ int transaction_add_job_and_dependencies(
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_WANTS], i) {
|
|
r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, false, false, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
- log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
+ log_full(r == -EADDRNOTAVAIL ? LOG_DEBUG : LOG_WARNING,
|
|
+ "Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
@@ -923,7 +925,8 @@ int transaction_add_job_and_dependencies(
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUISITE_OVERRIDABLE], i) {
|
|
r = transaction_add_job_and_dependencies(tr, JOB_VERIFY_ACTIVE, dep, ret, !override, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
- log_warning("Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
+ log_full(r == -EADDRNOTAVAIL ? LOG_DEBUG : LOG_WARNING,
|
|
+ "Cannot add dependency job for unit %s, ignoring: %s", dep->id, bus_error(e, r));
|
|
|
|
if (e)
|
|
dbus_error_free(e);
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 0ee2f52343ed0da6dc9006be54c9de57b762e563 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Sat, 21 Apr 2012 21:40:40 +0200
|
|
Subject: [PATCH 24/30] transaction: improve readability
|
|
|
|
The functions looked complicated with the nested loops with breaks,
|
|
continues, and "while (again)".
|
|
Here using goto actually makes them easier to understand.
|
|
|
|
Also correcting the comment about redundant jobs.
|
|
(cherry picked from commit 055163ad15a5ca1eb5626c63fa7163e152698e2b)
|
|
---
|
|
src/transaction.c | 152 ++++++++++++++++++++++-------------------------------
|
|
1 files changed, 62 insertions(+), 90 deletions(-)
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index a36f1df..2bd6541 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -258,44 +258,32 @@ static int transaction_merge_jobs(Transaction *tr, DBusError *e) {
|
|
}
|
|
|
|
static void transaction_drop_redundant(Transaction *tr) {
|
|
- bool again;
|
|
-
|
|
- assert(tr);
|
|
-
|
|
- /* Goes through the transaction and removes all jobs that are
|
|
- * a noop */
|
|
-
|
|
- do {
|
|
- Job *j;
|
|
- Iterator i;
|
|
-
|
|
- again = false;
|
|
-
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- bool changes_something = false;
|
|
- Job *k;
|
|
+ Job *j;
|
|
+ Iterator i;
|
|
|
|
- LIST_FOREACH(transaction, k, j) {
|
|
+ /* Goes through the transaction and removes all jobs of the units
|
|
+ * whose jobs are all noops. If not all of a unit's jobs are
|
|
+ * redundant, they are kept. */
|
|
|
|
- if (tr->anchor_job != k &&
|
|
- job_type_is_redundant(k->type, unit_active_state(k->unit)) &&
|
|
- (!k->unit->job || !job_type_is_conflicting(k->type, k->unit->job->type)))
|
|
- continue;
|
|
+ assert(tr);
|
|
|
|
- changes_something = true;
|
|
- break;
|
|
- }
|
|
+rescan:
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+ Job *k;
|
|
|
|
- if (changes_something)
|
|
- continue;
|
|
+ LIST_FOREACH(transaction, k, j) {
|
|
|
|
- /* log_debug("Found redundant job %s/%s, dropping.", j->unit->id, job_type_to_string(j->type)); */
|
|
- transaction_delete_job(tr, j, false);
|
|
- again = true;
|
|
- break;
|
|
+ if (tr->anchor_job == k ||
|
|
+ !job_type_is_redundant(k->type, unit_active_state(k->unit)) ||
|
|
+ (k->unit->job && job_type_is_conflicting(k->type, k->unit->job->type)))
|
|
+ goto next_unit;
|
|
}
|
|
|
|
- } while (again);
|
|
+ /* log_debug("Found redundant job %s/%s, dropping.", j->unit->id, job_type_to_string(j->type)); */
|
|
+ transaction_delete_job(tr, j, false);
|
|
+ goto rescan;
|
|
+ next_unit:;
|
|
+ }
|
|
}
|
|
|
|
static bool unit_matters_to_anchor(Unit *u, Job *j) {
|
|
@@ -430,34 +418,27 @@ static int transaction_verify_order(Transaction *tr, unsigned *generation, DBusE
|
|
}
|
|
|
|
static void transaction_collect_garbage(Transaction *tr) {
|
|
- bool again;
|
|
+ Iterator i;
|
|
+ Job *j;
|
|
|
|
assert(tr);
|
|
|
|
/* Drop jobs that are not required by any other job */
|
|
|
|
- do {
|
|
- Iterator i;
|
|
- Job *j;
|
|
-
|
|
- again = false;
|
|
-
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- if (tr->anchor_job == j || j->object_list) {
|
|
- /* log_debug("Keeping job %s/%s because of %s/%s", */
|
|
- /* j->unit->id, job_type_to_string(j->type), */
|
|
- /* j->object_list->subject ? j->object_list->subject->unit->id : "root", */
|
|
- /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
|
|
- continue;
|
|
- }
|
|
-
|
|
- /* log_debug("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); */
|
|
- transaction_delete_job(tr, j, true);
|
|
- again = true;
|
|
- break;
|
|
+rescan:
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+ if (tr->anchor_job == j || j->object_list) {
|
|
+ /* log_debug("Keeping job %s/%s because of %s/%s", */
|
|
+ /* j->unit->id, job_type_to_string(j->type), */
|
|
+ /* j->object_list->subject ? j->object_list->subject->unit->id : "root", */
|
|
+ /* j->object_list->subject ? job_type_to_string(j->object_list->subject->type) : "root"); */
|
|
+ continue;
|
|
}
|
|
|
|
- } while (again);
|
|
+ /* log_debug("Garbage collecting job %s/%s", j->unit->id, job_type_to_string(j->type)); */
|
|
+ transaction_delete_job(tr, j, true);
|
|
+ goto rescan;
|
|
+ }
|
|
}
|
|
|
|
static int transaction_is_destructive(Transaction *tr, DBusError *e) {
|
|
@@ -487,59 +468,50 @@ static int transaction_is_destructive(Transaction *tr, DBusError *e) {
|
|
}
|
|
|
|
static void transaction_minimize_impact(Transaction *tr) {
|
|
- bool again;
|
|
+ Job *j;
|
|
+ Iterator i;
|
|
+
|
|
assert(tr);
|
|
|
|
/* Drops all unnecessary jobs that reverse already active jobs
|
|
* or that stop a running service. */
|
|
|
|
- do {
|
|
- Job *j;
|
|
- Iterator i;
|
|
-
|
|
- again = false;
|
|
-
|
|
- HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
- LIST_FOREACH(transaction, j, j) {
|
|
- bool stops_running_service, changes_existing_job;
|
|
+rescan:
|
|
+ HASHMAP_FOREACH(j, tr->jobs, i) {
|
|
+ LIST_FOREACH(transaction, j, j) {
|
|
+ bool stops_running_service, changes_existing_job;
|
|
|
|
- /* If it matters, we shouldn't drop it */
|
|
- if (j->matters_to_anchor)
|
|
- continue;
|
|
+ /* If it matters, we shouldn't drop it */
|
|
+ if (j->matters_to_anchor)
|
|
+ continue;
|
|
|
|
- /* Would this stop a running service?
|
|
- * Would this change an existing job?
|
|
- * If so, let's drop this entry */
|
|
+ /* Would this stop a running service?
|
|
+ * Would this change an existing job?
|
|
+ * If so, let's drop this entry */
|
|
|
|
- stops_running_service =
|
|
- j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
|
|
+ stops_running_service =
|
|
+ j->type == JOB_STOP && UNIT_IS_ACTIVE_OR_ACTIVATING(unit_active_state(j->unit));
|
|
|
|
- changes_existing_job =
|
|
- j->unit->job &&
|
|
- job_type_is_conflicting(j->type, j->unit->job->type);
|
|
+ changes_existing_job =
|
|
+ j->unit->job &&
|
|
+ job_type_is_conflicting(j->type, j->unit->job->type);
|
|
|
|
- if (!stops_running_service && !changes_existing_job)
|
|
- continue;
|
|
+ if (!stops_running_service && !changes_existing_job)
|
|
+ continue;
|
|
|
|
- if (stops_running_service)
|
|
- log_debug("%s/%s would stop a running service.", j->unit->id, job_type_to_string(j->type));
|
|
+ if (stops_running_service)
|
|
+ log_debug("%s/%s would stop a running service.", j->unit->id, job_type_to_string(j->type));
|
|
|
|
- if (changes_existing_job)
|
|
- log_debug("%s/%s would change existing job.", j->unit->id, job_type_to_string(j->type));
|
|
+ if (changes_existing_job)
|
|
+ log_debug("%s/%s would change existing job.", j->unit->id, job_type_to_string(j->type));
|
|
|
|
- /* Ok, let's get rid of this */
|
|
- log_debug("Deleting %s/%s to minimize impact.", j->unit->id, job_type_to_string(j->type));
|
|
+ /* Ok, let's get rid of this */
|
|
+ log_debug("Deleting %s/%s to minimize impact.", j->unit->id, job_type_to_string(j->type));
|
|
|
|
- transaction_delete_job(tr, j, true);
|
|
- again = true;
|
|
- break;
|
|
- }
|
|
-
|
|
- if (again)
|
|
- break;
|
|
+ transaction_delete_job(tr, j, true);
|
|
+ goto rescan;
|
|
}
|
|
-
|
|
- } while (again);
|
|
+ }
|
|
}
|
|
|
|
static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From f613fa87549229f257444c41f5155fb1f5f30f44 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Sun, 22 Apr 2012 02:09:04 +0200
|
|
Subject: [PATCH 25/30] transaction: fix detection of cycles involving
|
|
installed jobs
|
|
|
|
A transaction can be acyclic, but when it's added to installed jobs,
|
|
a cycle may result.
|
|
|
|
transaction_verify_order_one() attempts to detect these cases, but it
|
|
fails because the installed jobs often have the exact generation number
|
|
that makes them look as if they were walked already.
|
|
|
|
Fix it by resetting the generation numbers of all installed jobs before
|
|
detecting cycles.
|
|
|
|
An alternative fix could be to add the generation counter to the
|
|
Manager and use it instead of starting always from 1 in
|
|
transaction_activate(). But I prefer not having to worry about it
|
|
wrapping around.
|
|
(cherry picked from commit 4e7bd268ae1f39675988b9ac61b9378a67e3ae3e)
|
|
---
|
|
src/transaction.c | 8 ++++++++
|
|
1 files changed, 8 insertions(+), 0 deletions(-)
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index 2bd6541..9676b57 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -582,6 +582,8 @@ rollback:
|
|
}
|
|
|
|
int transaction_activate(Transaction *tr, Manager *m, JobMode mode, DBusError *e) {
|
|
+ Iterator i;
|
|
+ Job *j;
|
|
int r;
|
|
unsigned generation = 1;
|
|
|
|
@@ -590,6 +592,12 @@ int transaction_activate(Transaction *tr, Manager *m, JobMode mode, DBusError *e
|
|
/* This applies the changes recorded in tr->jobs to
|
|
* the actual list of jobs, if possible. */
|
|
|
|
+ /* Reset the generation counter of all installed jobs. The detection of cycles
|
|
+ * looks at installed jobs. If they had a non-zero generation from some previous
|
|
+ * walk of the graph, the algorithm would break. */
|
|
+ HASHMAP_FOREACH(j, m->jobs, i)
|
|
+ j->generation = 0;
|
|
+
|
|
/* First step: figure out which jobs matter */
|
|
transaction_find_jobs_that_matter_to_anchor(tr->anchor_job, generation++);
|
|
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 374998cf2930534951bc0dace5025468cdf3c247 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Sun, 22 Apr 2012 10:54:58 +0200
|
|
Subject: [PATCH 26/30] transaction: abort does not need to use recursive
|
|
deletion
|
|
|
|
Recursion is unnecessary, because we're deleting all transaction jobs
|
|
anyway. And the recursive deletion produces debug messages that are
|
|
pointless in transaction abort.
|
|
(cherry picked from commit 1b9cea0caa85dce6d9f117638a296b141c49a8fd)
|
|
---
|
|
src/transaction.c | 2 +-
|
|
1 files changed, 1 insertions(+), 1 deletions(-)
|
|
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index 9676b57..b0a26f2 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -30,7 +30,7 @@ void transaction_abort(Transaction *tr) {
|
|
assert(tr);
|
|
|
|
while ((j = hashmap_first(tr->jobs)))
|
|
- transaction_delete_job(tr, j, true);
|
|
+ transaction_delete_job(tr, j, false);
|
|
|
|
assert(hashmap_isempty(tr->jobs));
|
|
}
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 0d806f0c0bebf755867cfe0b3cc41d55aebea66a Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Mon, 23 Apr 2012 01:24:04 +0200
|
|
Subject: [PATCH 27/30] job: serialize jobs properly
|
|
|
|
Jobs were not preserved correctly over a daemon-reload operation.
|
|
A systemctl process waiting for a job completion received a job removal
|
|
signal. The job itself changed its id. The job timeout started ticking all
|
|
over again.
|
|
|
|
This fixes the deficiencies.
|
|
(cherry picked from commit 39a18c60d07319ebfcfd476556729c2cadd616d6)
|
|
---
|
|
src/dbus-job.c | 6 +-
|
|
src/job.c | 162 +++++++++++++++++++++++++++++++++++++++++++++++++++++---
|
|
src/job.h | 6 ++
|
|
src/unit.c | 45 ++++++++++++---
|
|
src/unit.h | 4 +-
|
|
5 files changed, 203 insertions(+), 20 deletions(-)
|
|
|
|
diff --git a/src/dbus-job.c b/src/dbus-job.c
|
|
index aa1b4eb..63e5846 100644
|
|
--- a/src/dbus-job.c
|
|
+++ b/src/dbus-job.c
|
|
@@ -232,7 +232,7 @@ static int job_send_message(Job *j, DBusMessage* (*new_message)(Job *j)) {
|
|
assert(j);
|
|
assert(new_message);
|
|
|
|
- if (bus_has_subscriber(j->manager)) {
|
|
+ if (bus_has_subscriber(j->manager) || j->forgot_bus_clients) {
|
|
m = new_message(j);
|
|
if (!m)
|
|
goto oom;
|
|
@@ -347,7 +347,7 @@ void bus_job_send_change_signal(Job *j) {
|
|
j->in_dbus_queue = false;
|
|
}
|
|
|
|
- if (!bus_has_subscriber(j->manager) && !j->bus_client_list) {
|
|
+ if (!bus_has_subscriber(j->manager) && !j->bus_client_list && !j->forgot_bus_clients) {
|
|
j->sent_dbus_new_signal = true;
|
|
return;
|
|
}
|
|
@@ -366,7 +366,7 @@ oom:
|
|
void bus_job_send_removed_signal(Job *j) {
|
|
assert(j);
|
|
|
|
- if (!bus_has_subscriber(j->manager) && !j->bus_client_list)
|
|
+ if (!bus_has_subscriber(j->manager) && !j->bus_client_list && !j->forgot_bus_clients)
|
|
return;
|
|
|
|
if (!j->sent_dbus_new_signal)
|
|
diff --git a/src/job.c b/src/job.c
|
|
index de74751..69df024 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -47,22 +47,36 @@ JobBusClient* job_bus_client_new(DBusConnection *connection, const char *name) {
|
|
return cl;
|
|
}
|
|
|
|
-Job* job_new(Unit *unit, JobType type) {
|
|
+Job* job_new_raw(Unit *unit) {
|
|
Job *j;
|
|
|
|
- assert(type < _JOB_TYPE_MAX);
|
|
+ /* used for deserialization */
|
|
+
|
|
assert(unit);
|
|
|
|
- if (!(j = new0(Job, 1)))
|
|
+ j = new0(Job, 1);
|
|
+ if (!j)
|
|
return NULL;
|
|
|
|
j->manager = unit->manager;
|
|
- j->id = j->manager->current_job_id++;
|
|
- j->type = type;
|
|
j->unit = unit;
|
|
-
|
|
j->timer_watch.type = WATCH_INVALID;
|
|
|
|
+ return j;
|
|
+}
|
|
+
|
|
+Job* job_new(Unit *unit, JobType type) {
|
|
+ Job *j;
|
|
+
|
|
+ assert(type < _JOB_TYPE_MAX);
|
|
+
|
|
+ j = job_new_raw(unit);
|
|
+ if (!j)
|
|
+ return NULL;
|
|
+
|
|
+ j->id = j->manager->current_job_id++;
|
|
+ j->type = type;
|
|
+
|
|
/* We don't link it here, that's what job_dependency() is for */
|
|
|
|
return j;
|
|
@@ -105,7 +119,9 @@ void job_uninstall(Job *j) {
|
|
assert(j->unit->job == j);
|
|
/* Detach from next 'bigger' objects */
|
|
|
|
- bus_job_send_removed_signal(j);
|
|
+ /* daemon-reload should be transparent to job observers */
|
|
+ if (j->manager->n_reloading <= 0)
|
|
+ bus_job_send_removed_signal(j);
|
|
|
|
j->unit->job = NULL;
|
|
unit_add_to_gc_queue(j->unit);
|
|
@@ -180,6 +196,18 @@ Job* job_install(Job *j) {
|
|
return j;
|
|
}
|
|
|
|
+void job_install_deserialized(Job *j) {
|
|
+ assert(!j->installed);
|
|
+
|
|
+ if (j->unit->job) {
|
|
+ log_debug("Unit %s already has a job installed. Not installing deserialized job.", j->unit->id);
|
|
+ return;
|
|
+ }
|
|
+ j->unit->job = j;
|
|
+ j->installed = true;
|
|
+ log_debug("Reinstalled deserialized job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
|
+}
|
|
+
|
|
JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts) {
|
|
JobDependency *l;
|
|
|
|
@@ -732,6 +760,126 @@ void job_timer_event(Job *j, uint64_t n_elapsed, Watch *w) {
|
|
job_finish_and_invalidate(j, JOB_TIMEOUT);
|
|
}
|
|
|
|
+int job_serialize(Job *j, FILE *f, FDSet *fds) {
|
|
+ fprintf(f, "job-id=%u\n", j->id);
|
|
+ fprintf(f, "job-type=%s\n", job_type_to_string(j->type));
|
|
+ fprintf(f, "job-state=%s\n", job_state_to_string(j->state));
|
|
+ fprintf(f, "job-override=%s\n", yes_no(j->override));
|
|
+ fprintf(f, "job-sent-dbus-new-signal=%s\n", yes_no(j->sent_dbus_new_signal));
|
|
+ fprintf(f, "job-ignore-order=%s\n", yes_no(j->ignore_order));
|
|
+ /* Cannot save bus clients. Just note the fact that we're losing
|
|
+ * them. job_send_message() will fallback to broadcasting. */
|
|
+ fprintf(f, "job-forgot-bus-clients=%s\n",
|
|
+ yes_no(j->forgot_bus_clients || j->bus_client_list));
|
|
+ if (j->timer_watch.type == WATCH_JOB_TIMER) {
|
|
+ int copy = fdset_put_dup(fds, j->timer_watch.fd);
|
|
+ if (copy < 0)
|
|
+ return copy;
|
|
+ fprintf(f, "job-timer-watch-fd=%d\n", copy);
|
|
+ }
|
|
+
|
|
+ /* End marker */
|
|
+ fputc('\n', f);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+int job_deserialize(Job *j, FILE *f, FDSet *fds) {
|
|
+ for (;;) {
|
|
+ char line[LINE_MAX], *l, *v;
|
|
+ size_t k;
|
|
+
|
|
+ if (!fgets(line, sizeof(line), f)) {
|
|
+ if (feof(f))
|
|
+ return 0;
|
|
+ return -errno;
|
|
+ }
|
|
+
|
|
+ char_array_0(line);
|
|
+ l = strstrip(line);
|
|
+
|
|
+ /* End marker */
|
|
+ if (l[0] == 0)
|
|
+ return 0;
|
|
+
|
|
+ k = strcspn(l, "=");
|
|
+
|
|
+ if (l[k] == '=') {
|
|
+ l[k] = 0;
|
|
+ v = l+k+1;
|
|
+ } else
|
|
+ v = l+k;
|
|
+
|
|
+ if (streq(l, "job-id")) {
|
|
+ if (safe_atou32(v, &j->id) < 0)
|
|
+ log_debug("Failed to parse job id value %s", v);
|
|
+ } else if (streq(l, "job-type")) {
|
|
+ JobType t = job_type_from_string(v);
|
|
+ if (t < 0)
|
|
+ log_debug("Failed to parse job type %s", v);
|
|
+ else
|
|
+ j->type = t;
|
|
+ } else if (streq(l, "job-state")) {
|
|
+ JobState s = job_state_from_string(v);
|
|
+ if (s < 0)
|
|
+ log_debug("Failed to parse job state %s", v);
|
|
+ else
|
|
+ j->state = s;
|
|
+ } else if (streq(l, "job-override")) {
|
|
+ int b = parse_boolean(v);
|
|
+ if (b < 0)
|
|
+ log_debug("Failed to parse job override flag %s", v);
|
|
+ else
|
|
+ j->override = j->override || b;
|
|
+ } else if (streq(l, "job-sent-dbus-new-signal")) {
|
|
+ int b = parse_boolean(v);
|
|
+ if (b < 0)
|
|
+ log_debug("Failed to parse job sent_dbus_new_signal flag %s", v);
|
|
+ else
|
|
+ j->sent_dbus_new_signal = j->sent_dbus_new_signal || b;
|
|
+ } else if (streq(l, "job-ignore-order")) {
|
|
+ int b = parse_boolean(v);
|
|
+ if (b < 0)
|
|
+ log_debug("Failed to parse job ignore_order flag %s", v);
|
|
+ else
|
|
+ j->ignore_order = j->ignore_order || b;
|
|
+ } else if (streq(l, "job-forgot-bus-clients")) {
|
|
+ int b = parse_boolean(v);
|
|
+ if (b < 0)
|
|
+ log_debug("Failed to parse job forgot_bus_clients flag %s", v);
|
|
+ else
|
|
+ j->forgot_bus_clients = j->forgot_bus_clients || b;
|
|
+ } else if (streq(l, "job-timer-watch-fd")) {
|
|
+ int fd;
|
|
+ if (safe_atoi(v, &fd) < 0 || fd < 0 || !fdset_contains(fds, fd))
|
|
+ log_debug("Failed to parse job-timer-watch-fd value %s", v);
|
|
+ else {
|
|
+ if (j->timer_watch.type == WATCH_JOB_TIMER)
|
|
+ close_nointr_nofail(j->timer_watch.fd);
|
|
+
|
|
+ j->timer_watch.type = WATCH_JOB_TIMER;
|
|
+ j->timer_watch.fd = fdset_remove(fds, fd);
|
|
+ j->timer_watch.data.job = j;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+}
|
|
+
|
|
+int job_coldplug(Job *j) {
|
|
+ struct epoll_event ev;
|
|
+
|
|
+ if (j->timer_watch.type != WATCH_JOB_TIMER)
|
|
+ return 0;
|
|
+
|
|
+ zero(ev);
|
|
+ ev.data.ptr = &j->timer_watch;
|
|
+ ev.events = EPOLLIN;
|
|
+
|
|
+ if (epoll_ctl(j->manager->epoll_fd, EPOLL_CTL_ADD, j->timer_watch.fd, &ev) < 0)
|
|
+ return -errno;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
static const char* const job_state_table[_JOB_STATE_MAX] = {
|
|
[JOB_WAITING] = "waiting",
|
|
[JOB_RUNNING] = "running"
|
|
diff --git a/src/job.h b/src/job.h
|
|
index 2025b5b..eea8242 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -141,15 +141,21 @@ struct Job {
|
|
bool in_dbus_queue:1;
|
|
bool sent_dbus_new_signal:1;
|
|
bool ignore_order:1;
|
|
+ bool forgot_bus_clients:1;
|
|
};
|
|
|
|
JobBusClient* job_bus_client_new(DBusConnection *connection, const char *name);
|
|
|
|
Job* job_new(Unit *unit, JobType type);
|
|
+Job* job_new_raw(Unit *unit);
|
|
void job_free(Job *job);
|
|
Job* job_install(Job *j);
|
|
+void job_install_deserialized(Job *j);
|
|
void job_uninstall(Job *j);
|
|
void job_dump(Job *j, FILE*f, const char *prefix);
|
|
+int job_serialize(Job *j, FILE *f, FDSet *fds);
|
|
+int job_deserialize(Job *j, FILE *f, FDSet *fds);
|
|
+int job_coldplug(Job *j);
|
|
|
|
JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts);
|
|
void job_dependency_free(JobDependency *l);
|
|
diff --git a/src/unit.c b/src/unit.c
|
|
index 1949995..c203a31 100644
|
|
--- a/src/unit.c
|
|
+++ b/src/unit.c
|
|
@@ -2288,8 +2288,10 @@ int unit_serialize(Unit *u, FILE *f, FDSet *fds) {
|
|
if ((r = UNIT_VTABLE(u)->serialize(u, f, fds)) < 0)
|
|
return r;
|
|
|
|
- if (u->job)
|
|
- unit_serialize_item(u, f, "job", job_type_to_string(u->job->type));
|
|
+ if (u->job) {
|
|
+ fprintf(f, "job\n");
|
|
+ job_serialize(u->job, f, fds);
|
|
+ }
|
|
|
|
dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
|
|
dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
|
|
@@ -2368,13 +2370,32 @@ int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
|
|
v = l+k;
|
|
|
|
if (streq(l, "job")) {
|
|
- JobType type;
|
|
+ if (v[0] == '\0') {
|
|
+ /* new-style serialized job */
|
|
+ Job *j = job_new_raw(u);
|
|
+ if (!j)
|
|
+ return -ENOMEM;
|
|
|
|
- if ((type = job_type_from_string(v)) < 0)
|
|
- log_debug("Failed to parse job type value %s", v);
|
|
- else
|
|
- u->deserialized_job = type;
|
|
+ r = job_deserialize(j, f, fds);
|
|
+ if (r < 0) {
|
|
+ job_free(j);
|
|
+ return r;
|
|
+ }
|
|
|
|
+ job_install_deserialized(j);
|
|
+ r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
|
|
+ if (r < 0) {
|
|
+ job_free(j);
|
|
+ return r;
|
|
+ }
|
|
+ } else {
|
|
+ /* legacy */
|
|
+ JobType type = job_type_from_string(v);
|
|
+ if (type < 0)
|
|
+ log_debug("Failed to parse job type value %s", v);
|
|
+ else
|
|
+ u->deserialized_job = type;
|
|
+ }
|
|
continue;
|
|
} else if (streq(l, "inactive-exit-timestamp")) {
|
|
dual_timestamp_deserialize(v, &u->inactive_exit_timestamp);
|
|
@@ -2450,8 +2471,14 @@ int unit_coldplug(Unit *u) {
|
|
if ((r = UNIT_VTABLE(u)->coldplug(u)) < 0)
|
|
return r;
|
|
|
|
- if (u->deserialized_job >= 0) {
|
|
- if ((r = manager_add_job(u->manager, u->deserialized_job, u, JOB_IGNORE_REQUIREMENTS, false, NULL, NULL)) < 0)
|
|
+ if (u->job) {
|
|
+ r = job_coldplug(u->job);
|
|
+ if (r < 0)
|
|
+ return r;
|
|
+ } else if (u->deserialized_job >= 0) {
|
|
+ /* legacy */
|
|
+ r = manager_add_job(u->manager, u->deserialized_job, u, JOB_IGNORE_REQUIREMENTS, false, NULL, NULL);
|
|
+ if (r < 0)
|
|
return r;
|
|
|
|
u->deserialized_job = _JOB_TYPE_INVALID;
|
|
diff --git a/src/unit.h b/src/unit.h
|
|
index 756f465..5fceabb 100644
|
|
--- a/src/unit.h
|
|
+++ b/src/unit.h
|
|
@@ -200,7 +200,9 @@ struct Unit {
|
|
unsigned gc_marker;
|
|
|
|
/* When deserializing, temporarily store the job type for this
|
|
- * unit here, if there was a job scheduled */
|
|
+ * unit here, if there was a job scheduled.
|
|
+ * Only for deserializing from a legacy version. New style uses full
|
|
+ * serialized jobs. */
|
|
int deserialized_job; /* This is actually of type JobType */
|
|
|
|
/* Error code when we didn't manage to load the unit (negative) */
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From f2e0e5d5c80c8a489faa584539d7010b056dee7b Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Tue, 24 Apr 2012 11:21:03 +0200
|
|
Subject: [PATCH 28/30] transaction: cancel jobs non-recursively on isolate
|
|
|
|
Recursive cancellation of jobs would trigger OnFailure actions of
|
|
dependent jobs. This is not desirable when isolating.
|
|
|
|
Fixes https://bugzilla.redhat.com/show_bug.cgi?id=798328
|
|
(cherry picked from commit 5273510e9f228a300ec6207d4502f1c6253aed5e)
|
|
---
|
|
src/dbus-job.c | 2 +-
|
|
src/job.c | 41 ++++++++++++++++-------------------------
|
|
src/job.h | 2 +-
|
|
src/manager.c | 3 ++-
|
|
src/transaction.c | 10 ++++------
|
|
src/unit.c | 12 ++++++------
|
|
6 files changed, 30 insertions(+), 40 deletions(-)
|
|
|
|
diff --git a/src/dbus-job.c b/src/dbus-job.c
|
|
index 63e5846..8474138 100644
|
|
--- a/src/dbus-job.c
|
|
+++ b/src/dbus-job.c
|
|
@@ -100,7 +100,7 @@ static DBusHandlerResult bus_job_message_dispatch(Job *j, DBusConnection *connec
|
|
if (!(reply = dbus_message_new_method_return(message)))
|
|
goto oom;
|
|
|
|
- job_finish_and_invalidate(j, JOB_CANCELED);
|
|
+ job_finish_and_invalidate(j, JOB_CANCELED, true);
|
|
|
|
} else {
|
|
const BusBoundProperties bps[] = {
|
|
diff --git a/src/job.c b/src/job.c
|
|
index 69df024..e8e0264 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -164,7 +164,7 @@ Job* job_install(Job *j) {
|
|
|
|
if (uj) {
|
|
if (job_type_is_conflicting(uj->type, j->type))
|
|
- job_finish_and_invalidate(uj, JOB_CANCELED);
|
|
+ job_finish_and_invalidate(uj, JOB_CANCELED, true);
|
|
else {
|
|
/* not conflicting, i.e. mergeable */
|
|
|
|
@@ -496,13 +496,13 @@ int job_run_and_invalidate(Job *j) {
|
|
|
|
if ((j = manager_get_job(m, id))) {
|
|
if (r == -EALREADY)
|
|
- r = job_finish_and_invalidate(j, JOB_DONE);
|
|
+ r = job_finish_and_invalidate(j, JOB_DONE, true);
|
|
else if (r == -ENOEXEC)
|
|
- r = job_finish_and_invalidate(j, JOB_SKIPPED);
|
|
+ r = job_finish_and_invalidate(j, JOB_SKIPPED, true);
|
|
else if (r == -EAGAIN)
|
|
j->state = JOB_WAITING;
|
|
else if (r < 0)
|
|
- r = job_finish_and_invalidate(j, JOB_FAILED);
|
|
+ r = job_finish_and_invalidate(j, JOB_FAILED, true);
|
|
}
|
|
|
|
return r;
|
|
@@ -555,12 +555,11 @@ static void job_print_status_message(Unit *u, JobType t, JobResult result) {
|
|
}
|
|
}
|
|
|
|
-int job_finish_and_invalidate(Job *j, JobResult result) {
|
|
+int job_finish_and_invalidate(Job *j, JobResult result, bool recursive) {
|
|
Unit *u;
|
|
Unit *other;
|
|
JobType t;
|
|
Iterator i;
|
|
- bool recursed = false;
|
|
|
|
assert(j);
|
|
assert(j->installed);
|
|
@@ -594,7 +593,7 @@ int job_finish_and_invalidate(Job *j, JobResult result) {
|
|
job_print_status_message(u, t, result);
|
|
|
|
/* Fail depending jobs on failure */
|
|
- if (result != JOB_DONE) {
|
|
+ if (result != JOB_DONE && recursive) {
|
|
|
|
if (t == JOB_START ||
|
|
t == JOB_VERIFY_ACTIVE ||
|
|
@@ -604,29 +603,23 @@ int job_finish_and_invalidate(Job *j, JobResult result) {
|
|
if (other->job &&
|
|
(other->job->type == JOB_START ||
|
|
other->job->type == JOB_VERIFY_ACTIVE ||
|
|
- other->job->type == JOB_RELOAD_OR_START)) {
|
|
- job_finish_and_invalidate(other->job, JOB_DEPENDENCY);
|
|
- recursed = true;
|
|
- }
|
|
+ other->job->type == JOB_RELOAD_OR_START))
|
|
+ job_finish_and_invalidate(other->job, JOB_DEPENDENCY, true);
|
|
|
|
SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
|
|
if (other->job &&
|
|
(other->job->type == JOB_START ||
|
|
other->job->type == JOB_VERIFY_ACTIVE ||
|
|
- other->job->type == JOB_RELOAD_OR_START)) {
|
|
- job_finish_and_invalidate(other->job, JOB_DEPENDENCY);
|
|
- recursed = true;
|
|
- }
|
|
+ other->job->type == JOB_RELOAD_OR_START))
|
|
+ job_finish_and_invalidate(other->job, JOB_DEPENDENCY, true);
|
|
|
|
SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY_OVERRIDABLE], i)
|
|
if (other->job &&
|
|
!other->job->override &&
|
|
(other->job->type == JOB_START ||
|
|
other->job->type == JOB_VERIFY_ACTIVE ||
|
|
- other->job->type == JOB_RELOAD_OR_START)) {
|
|
- job_finish_and_invalidate(other->job, JOB_DEPENDENCY);
|
|
- recursed = true;
|
|
- }
|
|
+ other->job->type == JOB_RELOAD_OR_START))
|
|
+ job_finish_and_invalidate(other->job, JOB_DEPENDENCY, true);
|
|
|
|
} else if (t == JOB_STOP) {
|
|
|
|
@@ -634,10 +627,8 @@ int job_finish_and_invalidate(Job *j, JobResult result) {
|
|
if (other->job &&
|
|
(other->job->type == JOB_START ||
|
|
other->job->type == JOB_VERIFY_ACTIVE ||
|
|
- other->job->type == JOB_RELOAD_OR_START)) {
|
|
- job_finish_and_invalidate(other->job, JOB_DEPENDENCY);
|
|
- recursed = true;
|
|
- }
|
|
+ other->job->type == JOB_RELOAD_OR_START))
|
|
+ job_finish_and_invalidate(other->job, JOB_DEPENDENCY, true);
|
|
}
|
|
}
|
|
|
|
@@ -665,7 +656,7 @@ finish:
|
|
|
|
manager_check_finished(u->manager);
|
|
|
|
- return recursed;
|
|
+ return 0;
|
|
}
|
|
|
|
int job_start_timer(Job *j) {
|
|
@@ -757,7 +748,7 @@ void job_timer_event(Job *j, uint64_t n_elapsed, Watch *w) {
|
|
assert(w == &j->timer_watch);
|
|
|
|
log_warning("Job %s/%s timed out.", j->unit->id, job_type_to_string(j->type));
|
|
- job_finish_and_invalidate(j, JOB_TIMEOUT);
|
|
+ job_finish_and_invalidate(j, JOB_TIMEOUT, true);
|
|
}
|
|
|
|
int job_serialize(Job *j, FILE *f, FDSet *fds) {
|
|
diff --git a/src/job.h b/src/job.h
|
|
index eea8242..5b326ef 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -196,7 +196,7 @@ int job_start_timer(Job *j);
|
|
void job_timer_event(Job *j, uint64_t n_elapsed, Watch *w);
|
|
|
|
int job_run_and_invalidate(Job *j);
|
|
-int job_finish_and_invalidate(Job *j, JobResult result);
|
|
+int job_finish_and_invalidate(Job *j, JobResult result, bool recursive);
|
|
|
|
char *job_dbus_path(Job *j);
|
|
|
|
diff --git a/src/manager.c b/src/manager.c
|
|
index 86ec858..7bff456 100644
|
|
--- a/src/manager.c
|
|
+++ b/src/manager.c
|
|
@@ -846,7 +846,8 @@ void manager_clear_jobs(Manager *m) {
|
|
assert(m);
|
|
|
|
while ((j = hashmap_first(m->jobs)))
|
|
- job_finish_and_invalidate(j, JOB_CANCELED);
|
|
+ /* No need to recurse. We're cancelling all jobs. */
|
|
+ job_finish_and_invalidate(j, JOB_CANCELED, false);
|
|
}
|
|
|
|
unsigned manager_dispatch_run_queue(Manager *m) {
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index b0a26f2..f687539 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -525,18 +525,16 @@ static int transaction_apply(Transaction *tr, Manager *m, JobMode mode) {
|
|
|
|
/* When isolating first kill all installed jobs which
|
|
* aren't part of the new transaction */
|
|
- rescan:
|
|
HASHMAP_FOREACH(j, m->jobs, i) {
|
|
assert(j->installed);
|
|
|
|
if (hashmap_get(tr->jobs, j->unit))
|
|
continue;
|
|
|
|
- /* 'j' itself is safe to remove, but if other jobs
|
|
- are invalidated recursively, our iterator may become
|
|
- invalid and we need to start over. */
|
|
- if (job_finish_and_invalidate(j, JOB_CANCELED) > 0)
|
|
- goto rescan;
|
|
+ /* Not invalidating recursively. Avoids triggering
|
|
+ * OnFailure= actions of dependent jobs. Also avoids
|
|
+ * invalidating our iterator. */
|
|
+ job_finish_and_invalidate(j, JOB_CANCELED, false);
|
|
}
|
|
}
|
|
|
|
diff --git a/src/unit.c b/src/unit.c
|
|
index c203a31..ff8331c 100644
|
|
--- a/src/unit.c
|
|
+++ b/src/unit.c
|
|
@@ -1226,12 +1226,12 @@ void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_su
|
|
case JOB_VERIFY_ACTIVE:
|
|
|
|
if (UNIT_IS_ACTIVE_OR_RELOADING(ns))
|
|
- job_finish_and_invalidate(u->job, JOB_DONE);
|
|
+ job_finish_and_invalidate(u->job, JOB_DONE, true);
|
|
else if (u->job->state == JOB_RUNNING && ns != UNIT_ACTIVATING) {
|
|
unexpected = true;
|
|
|
|
if (UNIT_IS_INACTIVE_OR_FAILED(ns))
|
|
- job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE);
|
|
+ job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
|
|
}
|
|
|
|
break;
|
|
@@ -1241,12 +1241,12 @@ void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_su
|
|
|
|
if (u->job->state == JOB_RUNNING) {
|
|
if (ns == UNIT_ACTIVE)
|
|
- job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED);
|
|
+ job_finish_and_invalidate(u->job, reload_success ? JOB_DONE : JOB_FAILED, true);
|
|
else if (ns != UNIT_ACTIVATING && ns != UNIT_RELOADING) {
|
|
unexpected = true;
|
|
|
|
if (UNIT_IS_INACTIVE_OR_FAILED(ns))
|
|
- job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE);
|
|
+ job_finish_and_invalidate(u->job, ns == UNIT_FAILED ? JOB_FAILED : JOB_DONE, true);
|
|
}
|
|
}
|
|
|
|
@@ -1257,10 +1257,10 @@ void unit_notify(Unit *u, UnitActiveState os, UnitActiveState ns, bool reload_su
|
|
case JOB_TRY_RESTART:
|
|
|
|
if (UNIT_IS_INACTIVE_OR_FAILED(ns))
|
|
- job_finish_and_invalidate(u->job, JOB_DONE);
|
|
+ job_finish_and_invalidate(u->job, JOB_DONE, true);
|
|
else if (u->job->state == JOB_RUNNING && ns != UNIT_DEACTIVATING) {
|
|
unexpected = true;
|
|
- job_finish_and_invalidate(u->job, JOB_FAILED);
|
|
+ job_finish_and_invalidate(u->job, JOB_FAILED, true);
|
|
}
|
|
|
|
break;
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From 38a2f4227415ebf3519369d27a23750948f6dddf Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Wed, 25 Apr 2012 11:58:27 +0200
|
|
Subject: [PATCH 29/30] core: add NOP jobs, job type collapsing
|
|
|
|
Two of our current job types are special:
|
|
JOB_TRY_RESTART, JOB_RELOAD_OR_START.
|
|
|
|
They differ from other job types by being sensitive to the unit active state.
|
|
They perform some action when the unit is active and some other action
|
|
otherwise. This raises a question: when exactly should the unit state be
|
|
checked to make the decision?
|
|
|
|
Currently the unit state is checked when the job becomes runnable. It's more
|
|
sensible to check the state immediately when the job is added by the user.
|
|
When the user types "systemctl try-restart foo.service", he really intends
|
|
to restart the service if it's running right now. If it isn't running right
|
|
now, the restart is pointless.
|
|
|
|
Consider the example (from Bugzilla[1]):
|
|
|
|
sleep.service takes some time to start.
|
|
hello.service has After=sleep.service.
|
|
Both services get started. Two jobs will appear:
|
|
hello.service/start waiting
|
|
sleep.service/start running
|
|
Then someone runs "systemctl try-restart hello.service".
|
|
|
|
Currently the try-restart operation will block and wait for
|
|
sleep.service/start to complete.
|
|
|
|
The correct result is to complete the try-restart operation immediately
|
|
with success, because hello.service is not running. The two original
|
|
jobs must not be disturbed by this.
|
|
|
|
To fix this we introduce two new concepts:
|
|
- a new job type: JOB_NOP
|
|
A JOB_NOP job does not do anything to the unit. It does not pull in any
|
|
dependencies. It is always immediately runnable. When installed to a unit,
|
|
it sits in a special slot (u->nop_job) where it never conflicts with
|
|
the installed job (u->job) of a different type. It never merges with jobs
|
|
of other types, but it can merge into an already installed JOB_NOP job.
|
|
|
|
- "collapsing" of job types
|
|
When a job of one of the two special types is added, the state of the unit
|
|
is checked immediately and the job type changes:
|
|
JOB_TRY_RESTART -> JOB_RESTART or JOB_NOP
|
|
JOB_RELOAD_OR_START -> JOB_RELOAD or JOB_START
|
|
Should a job type JOB_RELOAD_OR_START appear later during job merging, it
|
|
collapses immediately afterwards.
|
|
Collapsing actually makes some things simpler, because there are now fewer
|
|
job types that are allowed in the transaction.
|
|
|
|
[1] Fixes: https://bugzilla.redhat.com/show_bug.cgi?id=753586
|
|
(cherry picked from commit e0209d83e7b30153f43b1a633c955f66eb2c2e4a)
|
|
|
|
Conflicts:
|
|
|
|
Makefile.am
|
|
src/job.c
|
|
---
|
|
src/job.c | 163 ++++++++++++++++++++++++++++++++-------------------
|
|
src/job.h | 44 ++++++++++----
|
|
src/manager.c | 2 +
|
|
src/test-job-type.c | 85 +++++++++++++++++----------
|
|
src/transaction.c | 18 +++---
|
|
src/unit.c | 29 +++++++++-
|
|
src/unit.h | 6 +-
|
|
7 files changed, 230 insertions(+), 117 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index e8e0264..2438f39 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -60,6 +60,7 @@ Job* job_new_raw(Unit *unit) {
|
|
|
|
j->manager = unit->manager;
|
|
j->unit = unit;
|
|
+ j->type = _JOB_TYPE_INVALID;
|
|
j->timer_watch.type = WATCH_INVALID;
|
|
|
|
return j;
|
|
@@ -115,15 +116,21 @@ void job_free(Job *j) {
|
|
}
|
|
|
|
void job_uninstall(Job *j) {
|
|
+ Job **pj;
|
|
+
|
|
assert(j->installed);
|
|
- assert(j->unit->job == j);
|
|
+
|
|
+ pj = (j->type == JOB_NOP) ? &j->unit->nop_job : &j->unit->job;
|
|
+ assert(*pj == j);
|
|
+
|
|
/* Detach from next 'bigger' objects */
|
|
|
|
/* daemon-reload should be transparent to job observers */
|
|
if (j->manager->n_reloading <= 0)
|
|
bus_job_send_removed_signal(j);
|
|
|
|
- j->unit->job = NULL;
|
|
+ *pj = NULL;
|
|
+
|
|
unit_add_to_gc_queue(j->unit);
|
|
|
|
hashmap_remove(j->manager->jobs, UINT32_TO_PTR(j->id));
|
|
@@ -144,31 +151,38 @@ static bool job_type_allows_late_merge(JobType t) {
|
|
* patched into JOB_START after stopping the unit. So if we see a
|
|
* JOB_RESTART running, it means the unit hasn't stopped yet and at
|
|
* this time the merge is still allowed. */
|
|
- return !(t == JOB_RELOAD || t == JOB_RELOAD_OR_START);
|
|
+ return t != JOB_RELOAD;
|
|
}
|
|
|
|
static void job_merge_into_installed(Job *j, Job *other) {
|
|
assert(j->installed);
|
|
assert(j->unit == other->unit);
|
|
|
|
- j->type = job_type_lookup_merge(j->type, other->type);
|
|
- assert(j->type >= 0);
|
|
+ if (j->type != JOB_NOP)
|
|
+ job_type_merge_and_collapse(&j->type, other->type, j->unit);
|
|
+ else
|
|
+ assert(other->type == JOB_NOP);
|
|
|
|
j->override = j->override || other->override;
|
|
}
|
|
|
|
Job* job_install(Job *j) {
|
|
- Job *uj = j->unit->job;
|
|
+ Job **pj;
|
|
+ Job *uj;
|
|
|
|
assert(!j->installed);
|
|
+ assert(j->type < _JOB_TYPE_MAX_IN_TRANSACTION);
|
|
+
|
|
+ pj = (j->type == JOB_NOP) ? &j->unit->nop_job : &j->unit->job;
|
|
+ uj = *pj;
|
|
|
|
if (uj) {
|
|
- if (job_type_is_conflicting(uj->type, j->type))
|
|
+ if (j->type != JOB_NOP && job_type_is_conflicting(uj->type, j->type))
|
|
job_finish_and_invalidate(uj, JOB_CANCELED, true);
|
|
else {
|
|
/* not conflicting, i.e. mergeable */
|
|
|
|
- if (uj->state == JOB_WAITING ||
|
|
+ if (j->type == JOB_NOP || uj->state == JOB_WAITING ||
|
|
(job_type_allows_late_merge(j->type) && job_type_is_superset(uj->type, j->type))) {
|
|
job_merge_into_installed(uj, j);
|
|
log_debug("Merged into installed job %s/%s as %u",
|
|
@@ -189,23 +203,33 @@ Job* job_install(Job *j) {
|
|
}
|
|
|
|
/* Install the job */
|
|
- j->unit->job = j;
|
|
+ *pj = j;
|
|
j->installed = true;
|
|
j->manager->n_installed_jobs ++;
|
|
log_debug("Installed new job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
|
return j;
|
|
}
|
|
|
|
-void job_install_deserialized(Job *j) {
|
|
+int job_install_deserialized(Job *j) {
|
|
+ Job **pj;
|
|
+
|
|
assert(!j->installed);
|
|
|
|
- if (j->unit->job) {
|
|
+ if (j->type < 0 || j->type >= _JOB_TYPE_MAX_IN_TRANSACTION) {
|
|
+ log_debug("Invalid job type %s in deserialization.", strna(job_type_to_string(j->type)));
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ pj = (j->type == JOB_NOP) ? &j->unit->nop_job : &j->unit->job;
|
|
+
|
|
+ if (*pj) {
|
|
log_debug("Unit %s already has a job installed. Not installing deserialized job.", j->unit->id);
|
|
- return;
|
|
+ return -EEXIST;
|
|
}
|
|
- j->unit->job = j;
|
|
+ *pj = j;
|
|
j->installed = true;
|
|
log_debug("Reinstalled deserialized job %s/%s as %u", j->unit->id, job_type_to_string(j->type), (unsigned) j->id);
|
|
+ return 0;
|
|
}
|
|
|
|
JobDependency* job_dependency_new(Job *subject, Job *object, bool matters, bool conflicts) {
|
|
@@ -268,6 +292,10 @@ void job_dump(Job *j, FILE*f, const char *prefix) {
|
|
* its lower triangle to avoid duplication. We don't store the main diagonal,
|
|
* because A merged with A is simply A.
|
|
*
|
|
+ * If the resulting type is collapsed immediately afterwards (to get rid of
|
|
+ * the JOB_RELOAD_OR_START, which lies outside the lookup function's domain),
|
|
+ * the following properties hold:
|
|
+ *
|
|
* Merging is associative! A merged with B merged with C is the same as
|
|
* A merged with C merged with B.
|
|
*
|
|
@@ -278,21 +306,19 @@ void job_dump(Job *j, FILE*f, const char *prefix) {
|
|
* be merged with C either.
|
|
*/
|
|
static const JobType job_merging_table[] = {
|
|
-/* What \ With * JOB_START JOB_VERIFY_ACTIVE JOB_STOP JOB_RELOAD JOB_RELOAD_OR_START JOB_RESTART JOB_TRY_RESTART */
|
|
-/************************************************************************************************************************************/
|
|
+/* What \ With * JOB_START JOB_VERIFY_ACTIVE JOB_STOP JOB_RELOAD */
|
|
+/*********************************************************************************/
|
|
/*JOB_START */
|
|
/*JOB_VERIFY_ACTIVE */ JOB_START,
|
|
/*JOB_STOP */ -1, -1,
|
|
/*JOB_RELOAD */ JOB_RELOAD_OR_START, JOB_RELOAD, -1,
|
|
-/*JOB_RELOAD_OR_START*/ JOB_RELOAD_OR_START, JOB_RELOAD_OR_START, -1, JOB_RELOAD_OR_START,
|
|
-/*JOB_RESTART */ JOB_RESTART, JOB_RESTART, -1, JOB_RESTART, JOB_RESTART,
|
|
-/*JOB_TRY_RESTART */ JOB_RESTART, JOB_TRY_RESTART, -1, JOB_TRY_RESTART, JOB_RESTART, JOB_RESTART,
|
|
+/*JOB_RESTART */ JOB_RESTART, JOB_RESTART, -1, JOB_RESTART,
|
|
};
|
|
|
|
JobType job_type_lookup_merge(JobType a, JobType b) {
|
|
- assert_cc(ELEMENTSOF(job_merging_table) == _JOB_TYPE_MAX * (_JOB_TYPE_MAX - 1) / 2);
|
|
- assert(a >= 0 && a < _JOB_TYPE_MAX);
|
|
- assert(b >= 0 && b < _JOB_TYPE_MAX);
|
|
+ assert_cc(ELEMENTSOF(job_merging_table) == _JOB_TYPE_MAX_MERGING * (_JOB_TYPE_MAX_MERGING - 1) / 2);
|
|
+ assert(a >= 0 && a < _JOB_TYPE_MAX_MERGING);
|
|
+ assert(b >= 0 && b < _JOB_TYPE_MAX_MERGING);
|
|
|
|
if (a == b)
|
|
return a;
|
|
@@ -328,24 +354,50 @@ bool job_type_is_redundant(JobType a, UnitActiveState b) {
|
|
return
|
|
b == UNIT_RELOADING;
|
|
|
|
- case JOB_RELOAD_OR_START:
|
|
- return
|
|
- b == UNIT_ACTIVATING ||
|
|
- b == UNIT_RELOADING;
|
|
-
|
|
case JOB_RESTART:
|
|
return
|
|
b == UNIT_ACTIVATING;
|
|
|
|
+ default:
|
|
+ assert_not_reached("Invalid job type");
|
|
+ }
|
|
+}
|
|
+
|
|
+void job_type_collapse(JobType *t, Unit *u) {
|
|
+ UnitActiveState s;
|
|
+
|
|
+ switch (*t) {
|
|
+
|
|
case JOB_TRY_RESTART:
|
|
- return
|
|
- b == UNIT_ACTIVATING;
|
|
+ s = unit_active_state(u);
|
|
+ if (UNIT_IS_INACTIVE_OR_DEACTIVATING(s))
|
|
+ *t = JOB_NOP;
|
|
+ else
|
|
+ *t = JOB_RESTART;
|
|
+ break;
|
|
+
|
|
+ case JOB_RELOAD_OR_START:
|
|
+ s = unit_active_state(u);
|
|
+ if (UNIT_IS_INACTIVE_OR_DEACTIVATING(s))
|
|
+ *t = JOB_START;
|
|
+ else
|
|
+ *t = JOB_RELOAD;
|
|
+ break;
|
|
|
|
default:
|
|
- assert_not_reached("Invalid job type");
|
|
+ ;
|
|
}
|
|
}
|
|
|
|
+int job_type_merge_and_collapse(JobType *a, JobType b, Unit *u) {
|
|
+ JobType t = job_type_lookup_merge(*a, b);
|
|
+ if (t < 0)
|
|
+ return -EEXIST;
|
|
+ *a = t;
|
|
+ job_type_collapse(a, u);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
bool job_is_runnable(Job *j) {
|
|
Iterator i;
|
|
Unit *other;
|
|
@@ -362,10 +414,12 @@ bool job_is_runnable(Job *j) {
|
|
if (j->ignore_order)
|
|
return true;
|
|
|
|
+ if (j->type == JOB_NOP)
|
|
+ return true;
|
|
+
|
|
if (j->type == JOB_START ||
|
|
j->type == JOB_VERIFY_ACTIVE ||
|
|
- j->type == JOB_RELOAD ||
|
|
- j->type == JOB_RELOAD_OR_START) {
|
|
+ j->type == JOB_RELOAD) {
|
|
|
|
/* Immediate result is that the job is or might be
|
|
* started. In this case lets wait for the
|
|
@@ -383,8 +437,7 @@ bool job_is_runnable(Job *j) {
|
|
SET_FOREACH(other, j->unit->dependencies[UNIT_BEFORE], i)
|
|
if (other->job &&
|
|
(other->job->type == JOB_STOP ||
|
|
- other->job->type == JOB_RESTART ||
|
|
- other->job->type == JOB_TRY_RESTART))
|
|
+ other->job->type == JOB_RESTART))
|
|
return false;
|
|
|
|
/* This means that for a service a and a service b where b
|
|
@@ -416,6 +469,7 @@ int job_run_and_invalidate(Job *j) {
|
|
|
|
assert(j);
|
|
assert(j->installed);
|
|
+ assert(j->type < _JOB_TYPE_MAX_IN_TRANSACTION);
|
|
|
|
if (j->in_run_queue) {
|
|
LIST_REMOVE(Job, run_queue, j->manager->run_queue, j);
|
|
@@ -441,15 +495,6 @@ int job_run_and_invalidate(Job *j) {
|
|
|
|
switch (j->type) {
|
|
|
|
- case JOB_RELOAD_OR_START:
|
|
- if (unit_active_state(j->unit) == UNIT_ACTIVE) {
|
|
- j->type = JOB_RELOAD;
|
|
- r = unit_reload(j->unit);
|
|
- break;
|
|
- }
|
|
- j->type = JOB_START;
|
|
- /* fall through */
|
|
-
|
|
case JOB_START:
|
|
r = unit_start(j->unit);
|
|
|
|
@@ -469,14 +514,6 @@ int job_run_and_invalidate(Job *j) {
|
|
break;
|
|
}
|
|
|
|
- case JOB_TRY_RESTART:
|
|
- if (UNIT_IS_INACTIVE_OR_DEACTIVATING(unit_active_state(j->unit))) {
|
|
- r = -ENOEXEC;
|
|
- break;
|
|
- }
|
|
- j->type = JOB_RESTART;
|
|
- /* fall through */
|
|
-
|
|
case JOB_STOP:
|
|
case JOB_RESTART:
|
|
r = unit_stop(j->unit);
|
|
@@ -490,11 +527,16 @@ int job_run_and_invalidate(Job *j) {
|
|
r = unit_reload(j->unit);
|
|
break;
|
|
|
|
+ case JOB_NOP:
|
|
+ r = -EALREADY;
|
|
+ break;
|
|
+
|
|
default:
|
|
assert_not_reached("Unknown job type");
|
|
}
|
|
|
|
- if ((j = manager_get_job(m, id))) {
|
|
+ j = manager_get_job(m, id);
|
|
+ if (j) {
|
|
if (r == -EALREADY)
|
|
r = job_finish_and_invalidate(j, JOB_DONE, true);
|
|
else if (r == -ENOEXEC)
|
|
@@ -563,6 +605,7 @@ int job_finish_and_invalidate(Job *j, JobResult result, bool recursive) {
|
|
|
|
assert(j);
|
|
assert(j->installed);
|
|
+ assert(j->type < _JOB_TYPE_MAX_IN_TRANSACTION);
|
|
|
|
job_add_to_dbus_queue(j);
|
|
|
|
@@ -596,29 +639,25 @@ int job_finish_and_invalidate(Job *j, JobResult result, bool recursive) {
|
|
if (result != JOB_DONE && recursive) {
|
|
|
|
if (t == JOB_START ||
|
|
- t == JOB_VERIFY_ACTIVE ||
|
|
- t == JOB_RELOAD_OR_START) {
|
|
+ t == JOB_VERIFY_ACTIVE) {
|
|
|
|
SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY], i)
|
|
if (other->job &&
|
|
(other->job->type == JOB_START ||
|
|
- other->job->type == JOB_VERIFY_ACTIVE ||
|
|
- other->job->type == JOB_RELOAD_OR_START))
|
|
+ other->job->type == JOB_VERIFY_ACTIVE))
|
|
job_finish_and_invalidate(other->job, JOB_DEPENDENCY, true);
|
|
|
|
SET_FOREACH(other, u->dependencies[UNIT_BOUND_BY], i)
|
|
if (other->job &&
|
|
(other->job->type == JOB_START ||
|
|
- other->job->type == JOB_VERIFY_ACTIVE ||
|
|
- other->job->type == JOB_RELOAD_OR_START))
|
|
+ other->job->type == JOB_VERIFY_ACTIVE))
|
|
job_finish_and_invalidate(other->job, JOB_DEPENDENCY, true);
|
|
|
|
SET_FOREACH(other, u->dependencies[UNIT_REQUIRED_BY_OVERRIDABLE], i)
|
|
if (other->job &&
|
|
!other->job->override &&
|
|
(other->job->type == JOB_START ||
|
|
- other->job->type == JOB_VERIFY_ACTIVE ||
|
|
- other->job->type == JOB_RELOAD_OR_START))
|
|
+ other->job->type == JOB_VERIFY_ACTIVE))
|
|
job_finish_and_invalidate(other->job, JOB_DEPENDENCY, true);
|
|
|
|
} else if (t == JOB_STOP) {
|
|
@@ -626,8 +665,7 @@ int job_finish_and_invalidate(Job *j, JobResult result, bool recursive) {
|
|
SET_FOREACH(other, u->dependencies[UNIT_CONFLICTED_BY], i)
|
|
if (other->job &&
|
|
(other->job->type == JOB_START ||
|
|
- other->job->type == JOB_VERIFY_ACTIVE ||
|
|
- other->job->type == JOB_RELOAD_OR_START))
|
|
+ other->job->type == JOB_VERIFY_ACTIVE))
|
|
job_finish_and_invalidate(other->job, JOB_DEPENDENCY, true);
|
|
}
|
|
}
|
|
@@ -807,6 +845,8 @@ int job_deserialize(Job *j, FILE *f, FDSet *fds) {
|
|
JobType t = job_type_from_string(v);
|
|
if (t < 0)
|
|
log_debug("Failed to parse job type %s", v);
|
|
+ else if (t >= _JOB_TYPE_MAX_IN_TRANSACTION)
|
|
+ log_debug("Cannot deserialize job of type %s", v);
|
|
else
|
|
j->type = t;
|
|
} else if (streq(l, "job-state")) {
|
|
@@ -886,6 +926,7 @@ static const char* const job_type_table[_JOB_TYPE_MAX] = {
|
|
[JOB_RELOAD_OR_START] = "reload-or-start",
|
|
[JOB_RESTART] = "restart",
|
|
[JOB_TRY_RESTART] = "try-restart",
|
|
+ [JOB_NOP] = "nop",
|
|
};
|
|
|
|
DEFINE_STRING_TABLE_LOOKUP(job_type, JobType);
|
|
diff --git a/src/job.h b/src/job.h
|
|
index 5b326ef..4edf3b9 100644
|
|
--- a/src/job.h
|
|
+++ b/src/job.h
|
|
@@ -46,14 +46,34 @@ enum JobType {
|
|
|
|
JOB_STOP,
|
|
|
|
- JOB_RELOAD, /* if running reload */
|
|
- JOB_RELOAD_OR_START, /* if running reload, if not running start */
|
|
+ JOB_RELOAD, /* if running, reload */
|
|
|
|
/* Note that restarts are first treated like JOB_STOP, but
|
|
* then instead of finishing are patched to become
|
|
* JOB_START. */
|
|
- JOB_RESTART, /* if running stop, then start unconditionally */
|
|
- JOB_TRY_RESTART, /* if running stop and then start */
|
|
+ JOB_RESTART, /* If running, stop. Then start unconditionally. */
|
|
+
|
|
+ _JOB_TYPE_MAX_MERGING,
|
|
+
|
|
+ /* JOB_NOP can enter into a transaction, but as it won't pull in
|
|
+ * any dependencies, it won't have to merge with anything.
|
|
+ * job_install() avoids the problem of merging JOB_NOP too (it's
|
|
+ * special-cased, only merges with other JOB_NOPs). */
|
|
+ JOB_NOP = _JOB_TYPE_MAX_MERGING, /* do nothing */
|
|
+
|
|
+ _JOB_TYPE_MAX_IN_TRANSACTION,
|
|
+
|
|
+ /* JOB_TRY_RESTART can never appear in a transaction, because
|
|
+ * it always collapses into JOB_RESTART or JOB_NOP before entering.
|
|
+ * Thus we never need to merge it with anything. */
|
|
+ JOB_TRY_RESTART = _JOB_TYPE_MAX_IN_TRANSACTION, /* if running, stop and then start */
|
|
+
|
|
+ /* JOB_RELOAD_OR_START won't enter into a transaction and cannot result
|
|
+ * from transaction merging (there's no way for JOB_RELOAD and
|
|
+ * JOB_START to meet in one transaction). It can result from a merge
|
|
+ * during job installation, but then it will immediately collapse into
|
|
+ * one of the two simpler types. */
|
|
+ JOB_RELOAD_OR_START, /* if running, reload, otherwise start */
|
|
|
|
_JOB_TYPE_MAX,
|
|
_JOB_TYPE_INVALID = -1
|
|
@@ -150,7 +170,7 @@ Job* job_new(Unit *unit, JobType type);
|
|
Job* job_new_raw(Unit *unit);
|
|
void job_free(Job *job);
|
|
Job* job_install(Job *j);
|
|
-void job_install_deserialized(Job *j);
|
|
+int job_install_deserialized(Job *j);
|
|
void job_uninstall(Job *j);
|
|
void job_dump(Job *j, FILE*f, const char *prefix);
|
|
int job_serialize(Job *j, FILE *f, FDSet *fds);
|
|
@@ -164,14 +184,6 @@ int job_merge(Job *j, Job *other);
|
|
|
|
JobType job_type_lookup_merge(JobType a, JobType b);
|
|
|
|
-static inline int job_type_merge(JobType *a, JobType b) {
|
|
- JobType t = job_type_lookup_merge(*a, b);
|
|
- if (t < 0)
|
|
- return -EEXIST;
|
|
- *a = t;
|
|
- return 0;
|
|
-}
|
|
-
|
|
static inline bool job_type_is_mergeable(JobType a, JobType b) {
|
|
return job_type_lookup_merge(a, b) >= 0;
|
|
}
|
|
@@ -187,6 +199,12 @@ static inline bool job_type_is_superset(JobType a, JobType b) {
|
|
|
|
bool job_type_is_redundant(JobType a, UnitActiveState b);
|
|
|
|
+/* Collapses a state-dependent job type into a simpler type by observing
|
|
+ * the state of the unit which it is going to be applied to. */
|
|
+void job_type_collapse(JobType *t, Unit *u);
|
|
+
|
|
+int job_type_merge_and_collapse(JobType *a, JobType b, Unit *u);
|
|
+
|
|
bool job_is_runnable(Job *j);
|
|
|
|
void job_add_to_run_queue(Job *j);
|
|
diff --git a/src/manager.c b/src/manager.c
|
|
index 7bff456..0f87e9f 100644
|
|
--- a/src/manager.c
|
|
+++ b/src/manager.c
|
|
@@ -650,6 +650,8 @@ int manager_add_job(Manager *m, JobType type, Unit *unit, JobMode mode, bool ove
|
|
|
|
log_debug("Trying to enqueue job %s/%s/%s", unit->id, job_type_to_string(type), job_mode_to_string(mode));
|
|
|
|
+ job_type_collapse(&type, unit);
|
|
+
|
|
tr = transaction_new();
|
|
if (!tr)
|
|
return -ENOMEM;
|
|
diff --git a/src/test-job-type.c b/src/test-job-type.c
|
|
index 9de21e1..c7a9b5a 100644
|
|
--- a/src/test-job-type.c
|
|
+++ b/src/test-job-type.c
|
|
@@ -25,59 +25,80 @@
|
|
#include <unistd.h>
|
|
|
|
#include "job.h"
|
|
+#include "unit.h"
|
|
+#include "service.h"
|
|
|
|
int main(int argc, char*argv[]) {
|
|
- JobType a, b, c, d, e, f, g;
|
|
+ JobType a, b, c, ab, bc, ab_c, bc_a, a_bc;
|
|
+ const ServiceState test_states[] = { SERVICE_DEAD, SERVICE_RUNNING };
|
|
+ unsigned i;
|
|
+ bool merged_ab;
|
|
|
|
- for (a = 0; a < _JOB_TYPE_MAX; a++)
|
|
- for (b = 0; b < _JOB_TYPE_MAX; b++) {
|
|
+ /* fake a unit */
|
|
+ static Service s = {
|
|
+ .meta.load_state = UNIT_LOADED,
|
|
+ .type = UNIT_SERVICE
|
|
+ };
|
|
+ Unit *u = UNIT(&s);
|
|
|
|
- if (!job_type_is_mergeable(a, b))
|
|
- printf("Not mergeable: %s + %s\n", job_type_to_string(a), job_type_to_string(b));
|
|
+ for (i = 0; i < ELEMENTSOF(test_states); i++) {
|
|
+ s.state = test_states[i];
|
|
+ printf("\nWith collapsing for service state %s\n"
|
|
+ "=========================================\n", service_state_to_string(s.state));
|
|
+ for (a = 0; a < _JOB_TYPE_MAX_MERGING; a++) {
|
|
+ for (b = 0; b < _JOB_TYPE_MAX_MERGING; b++) {
|
|
|
|
- for (c = 0; c < _JOB_TYPE_MAX; c++) {
|
|
+ ab = a;
|
|
+ merged_ab = (job_type_merge_and_collapse(&ab, b, u) >= 0);
|
|
|
|
- /* Verify transitivity of mergeability
|
|
- * of job types */
|
|
- assert(!job_type_is_mergeable(a, b) ||
|
|
- !job_type_is_mergeable(b, c) ||
|
|
- job_type_is_mergeable(a, c));
|
|
+ if (!job_type_is_mergeable(a, b)) {
|
|
+ assert(!merged_ab);
|
|
+ printf("Not mergeable: %s + %s\n", job_type_to_string(a), job_type_to_string(b));
|
|
+ continue;
|
|
+ }
|
|
+
|
|
+ assert(merged_ab);
|
|
+ printf("%s + %s = %s\n", job_type_to_string(a), job_type_to_string(b), job_type_to_string(ab));
|
|
|
|
- d = a;
|
|
- if (job_type_merge(&d, b) >= 0) {
|
|
+ for (c = 0; c < _JOB_TYPE_MAX_MERGING; c++) {
|
|
|
|
- printf("%s + %s = %s\n", job_type_to_string(a), job_type_to_string(b), job_type_to_string(d));
|
|
+ /* Verify transitivity of mergeability of job types */
|
|
+ assert(!job_type_is_mergeable(a, b) ||
|
|
+ !job_type_is_mergeable(b, c) ||
|
|
+ job_type_is_mergeable(a, c));
|
|
|
|
- /* Verify that merged entries can be
|
|
- * merged with the same entries they
|
|
- * can be merged with separately */
|
|
- assert(!job_type_is_mergeable(a, c) || job_type_is_mergeable(d, c));
|
|
- assert(!job_type_is_mergeable(b, c) || job_type_is_mergeable(d, c));
|
|
+ /* Verify that merged entries can be merged with the same entries
|
|
+ * they can be merged with separately */
|
|
+ assert(!job_type_is_mergeable(a, c) || job_type_is_mergeable(ab, c));
|
|
+ assert(!job_type_is_mergeable(b, c) || job_type_is_mergeable(ab, c));
|
|
|
|
- /* Verify that if a merged
|
|
- * with b is not mergeable with
|
|
- * c then either a or b is not
|
|
- * mergeable with c either. */
|
|
- assert(job_type_is_mergeable(d, c) || !job_type_is_mergeable(a, c) || !job_type_is_mergeable(b, c));
|
|
+ /* Verify that if a merged with b is not mergeable with c, then
|
|
+ * either a or b is not mergeable with c either. */
|
|
+ assert(job_type_is_mergeable(ab, c) || !job_type_is_mergeable(a, c) || !job_type_is_mergeable(b, c));
|
|
|
|
- e = b;
|
|
- if (job_type_merge(&e, c) >= 0) {
|
|
+ bc = b;
|
|
+ if (job_type_merge_and_collapse(&bc, c, u) >= 0) {
|
|
|
|
/* Verify associativity */
|
|
|
|
- f = d;
|
|
- assert(job_type_merge(&f, c) == 0);
|
|
+ ab_c = ab;
|
|
+ assert(job_type_merge_and_collapse(&ab_c, c, u) == 0);
|
|
+
|
|
+ bc_a = bc;
|
|
+ assert(job_type_merge_and_collapse(&bc_a, a, u) == 0);
|
|
|
|
- g = e;
|
|
- assert(job_type_merge(&g, a) == 0);
|
|
+ a_bc = a;
|
|
+ assert(job_type_merge_and_collapse(&a_bc, bc, u) == 0);
|
|
|
|
- assert(f == g);
|
|
+ assert(ab_c == bc_a);
|
|
+ assert(ab_c == a_bc);
|
|
|
|
- printf("%s + %s + %s = %s\n", job_type_to_string(a), job_type_to_string(b), job_type_to_string(c), job_type_to_string(d));
|
|
+ printf("%s + %s + %s = %s\n", job_type_to_string(a), job_type_to_string(b), job_type_to_string(c), job_type_to_string(ab_c));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
+ }
|
|
|
|
|
|
return 0;
|
|
diff --git a/src/transaction.c b/src/transaction.c
|
|
index f687539..ceac5a6 100644
|
|
--- a/src/transaction.c
|
|
+++ b/src/transaction.c
|
|
@@ -212,7 +212,7 @@ static int transaction_merge_jobs(Transaction *tr, DBusError *e) {
|
|
|
|
t = j->type;
|
|
LIST_FOREACH(transaction, k, j->transaction_next) {
|
|
- if (job_type_merge(&t, k->type) >= 0)
|
|
+ if (job_type_merge_and_collapse(&t, k->type, j->unit) >= 0)
|
|
continue;
|
|
|
|
/* OK, we could not merge all jobs for this
|
|
@@ -238,9 +238,9 @@ static int transaction_merge_jobs(Transaction *tr, DBusError *e) {
|
|
JobType t = j->type;
|
|
Job *k;
|
|
|
|
- /* Merge all transactions */
|
|
+ /* Merge all transaction jobs for j->unit */
|
|
LIST_FOREACH(transaction, k, j->transaction_next)
|
|
- assert_se(job_type_merge(&t, k->type) == 0);
|
|
+ assert_se(job_type_merge_and_collapse(&t, k->type, j->unit) == 0);
|
|
|
|
while ((k = j->transaction_next)) {
|
|
if (tr->anchor_job == k) {
|
|
@@ -774,6 +774,7 @@ int transaction_add_job_and_dependencies(
|
|
|
|
assert(tr);
|
|
assert(type < _JOB_TYPE_MAX);
|
|
+ assert(type < _JOB_TYPE_MAX_IN_TRANSACTION);
|
|
assert(unit);
|
|
|
|
/* log_debug("Pulling in %s/%s from %s/%s", */
|
|
@@ -824,7 +825,8 @@ int transaction_add_job_and_dependencies(
|
|
assert(!tr->anchor_job);
|
|
tr->anchor_job = ret;
|
|
}
|
|
- if (is_new && !ignore_requirements) {
|
|
+
|
|
+ if (is_new && !ignore_requirements && type != JOB_NOP) {
|
|
Set *following;
|
|
|
|
/* If we are following some other unit, make sure we
|
|
@@ -844,7 +846,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
|
|
/* Finally, recursively add in all dependencies. */
|
|
- if (type == JOB_START || type == JOB_RELOAD_OR_START || type == JOB_RESTART) {
|
|
+ if (type == JOB_START || type == JOB_RESTART) {
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRES], i) {
|
|
r = transaction_add_job_and_dependencies(tr, JOB_START, dep, ret, true, override, false, false, ignore_order, e);
|
|
if (r < 0) {
|
|
@@ -934,7 +936,7 @@ int transaction_add_job_and_dependencies(
|
|
|
|
}
|
|
|
|
- if (type == JOB_STOP || type == JOB_RESTART || type == JOB_TRY_RESTART) {
|
|
+ if (type == JOB_STOP || type == JOB_RESTART) {
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_REQUIRED_BY], i) {
|
|
r = transaction_add_job_and_dependencies(tr, type, dep, ret, true, override, false, false, ignore_order, e);
|
|
@@ -959,7 +961,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
}
|
|
|
|
- if (type == JOB_RELOAD || type == JOB_RELOAD_OR_START) {
|
|
+ if (type == JOB_RELOAD) {
|
|
|
|
SET_FOREACH(dep, ret->unit->dependencies[UNIT_PROPAGATE_RELOAD_TO], i) {
|
|
r = transaction_add_job_and_dependencies(tr, JOB_RELOAD, dep, ret, false, override, false, false, ignore_order, e);
|
|
@@ -972,7 +974,7 @@ int transaction_add_job_and_dependencies(
|
|
}
|
|
}
|
|
|
|
- /* JOB_VERIFY_STARTED, JOB_RELOAD require no dependency handling */
|
|
+ /* JOB_VERIFY_STARTED require no dependency handling */
|
|
}
|
|
|
|
return 0;
|
|
diff --git a/src/unit.c b/src/unit.c
|
|
index ff8331c..3625ed2 100644
|
|
--- a/src/unit.c
|
|
+++ b/src/unit.c
|
|
@@ -249,6 +249,9 @@ bool unit_check_gc(Unit *u) {
|
|
if (u->job)
|
|
return true;
|
|
|
|
+ if (u->nop_job)
|
|
+ return true;
|
|
+
|
|
if (unit_active_state(u) != UNIT_INACTIVE)
|
|
return true;
|
|
|
|
@@ -358,6 +361,12 @@ void unit_free(Unit *u) {
|
|
job_free(j);
|
|
}
|
|
|
|
+ if (u->nop_job) {
|
|
+ Job *j = u->nop_job;
|
|
+ job_uninstall(j);
|
|
+ job_free(j);
|
|
+ }
|
|
+
|
|
for (d = 0; d < _UNIT_DEPENDENCY_MAX; d++)
|
|
bidi_set_free(u, u->dependencies[d]);
|
|
|
|
@@ -501,6 +510,9 @@ int unit_merge(Unit *u, Unit *other) {
|
|
if (other->job)
|
|
return -EEXIST;
|
|
|
|
+ if (other->nop_job)
|
|
+ return -EEXIST;
|
|
+
|
|
if (!UNIT_IS_INACTIVE_OR_FAILED(unit_active_state(other)))
|
|
return -EEXIST;
|
|
|
|
@@ -729,6 +741,9 @@ void unit_dump(Unit *u, FILE *f, const char *prefix) {
|
|
if (u->job)
|
|
job_dump(u->job, f, prefix2);
|
|
|
|
+ if (u->nop_job)
|
|
+ job_dump(u->nop_job, f, prefix2);
|
|
+
|
|
free(p2);
|
|
}
|
|
|
|
@@ -1507,6 +1522,7 @@ bool unit_job_is_applicable(Unit *u, JobType j) {
|
|
case JOB_VERIFY_ACTIVE:
|
|
case JOB_START:
|
|
case JOB_STOP:
|
|
+ case JOB_NOP:
|
|
return true;
|
|
|
|
case JOB_RESTART:
|
|
@@ -2293,6 +2309,11 @@ int unit_serialize(Unit *u, FILE *f, FDSet *fds) {
|
|
job_serialize(u->job, f, fds);
|
|
}
|
|
|
|
+ if (u->nop_job) {
|
|
+ fprintf(f, "job\n");
|
|
+ job_serialize(u->nop_job, f, fds);
|
|
+ }
|
|
+
|
|
dual_timestamp_serialize(f, "inactive-exit-timestamp", &u->inactive_exit_timestamp);
|
|
dual_timestamp_serialize(f, "active-enter-timestamp", &u->active_enter_timestamp);
|
|
dual_timestamp_serialize(f, "active-exit-timestamp", &u->active_exit_timestamp);
|
|
@@ -2382,12 +2403,18 @@ int unit_deserialize(Unit *u, FILE *f, FDSet *fds) {
|
|
return r;
|
|
}
|
|
|
|
- job_install_deserialized(j);
|
|
r = hashmap_put(u->manager->jobs, UINT32_TO_PTR(j->id), j);
|
|
if (r < 0) {
|
|
job_free(j);
|
|
return r;
|
|
}
|
|
+
|
|
+ r = job_install_deserialized(j);
|
|
+ if (r < 0) {
|
|
+ hashmap_remove(u->manager->jobs, UINT32_TO_PTR(j->id));
|
|
+ job_free(j);
|
|
+ return r;
|
|
+ }
|
|
} else {
|
|
/* legacy */
|
|
JobType type = job_type_from_string(v);
|
|
diff --git a/src/unit.h b/src/unit.h
|
|
index 5fceabb..62b82bc 100644
|
|
--- a/src/unit.h
|
|
+++ b/src/unit.h
|
|
@@ -158,10 +158,12 @@ struct Unit {
|
|
char *fragment_path; /* if loaded from a config file this is the primary path to it */
|
|
usec_t fragment_mtime;
|
|
|
|
- /* If there is something to do with this unit, then this is
|
|
- * the job for it */
|
|
+ /* If there is something to do with this unit, then this is the installed job for it */
|
|
Job *job;
|
|
|
|
+ /* JOB_NOP jobs are special and can be installed without disturbing the real job. */
|
|
+ Job *nop_job;
|
|
+
|
|
usec_t job_timeout;
|
|
|
|
/* References to this */
|
|
--
|
|
1.7.7
|
|
|
|
|
|
From ad1ece751b204c21424b6bf1de265fec406ba2e4 Mon Sep 17 00:00:00 2001
|
|
From: Michal Schmidt <mschmidt@redhat.com>
|
|
Date: Sat, 12 May 2012 21:06:27 +0200
|
|
Subject: [PATCH 30/30] job: only jobs on the runqueue can be run
|
|
|
|
---
|
|
src/job.c | 7 +++----
|
|
1 files changed, 3 insertions(+), 4 deletions(-)
|
|
|
|
diff --git a/src/job.c b/src/job.c
|
|
index 2438f39..d6e7cb1 100644
|
|
--- a/src/job.c
|
|
+++ b/src/job.c
|
|
@@ -470,11 +470,10 @@ int job_run_and_invalidate(Job *j) {
|
|
assert(j);
|
|
assert(j->installed);
|
|
assert(j->type < _JOB_TYPE_MAX_IN_TRANSACTION);
|
|
+ assert(j->in_run_queue);
|
|
|
|
- if (j->in_run_queue) {
|
|
- LIST_REMOVE(Job, run_queue, j->manager->run_queue, j);
|
|
- j->in_run_queue = false;
|
|
- }
|
|
+ LIST_REMOVE(Job, run_queue, j->manager->run_queue, j);
|
|
+ j->in_run_queue = false;
|
|
|
|
if (j->state != JOB_WAITING)
|
|
return 0;
|
|
--
|
|
1.7.7
|
|
|