* [COMMITTED] Introduce maxcpus, make bb{1, 2, 3} accept 2 concurrent builds
@ 2022-06-29 20:39 Mark Wielaard
0 siblings, 0 replies; only message in thread
From: Mark Wielaard @ 2022-06-29 20:39 UTC (permalink / raw)
To: buildbot; +Cc: Mark Wielaard
This introduces the maxcpus property for workers that accept multiple
concurrent builds. ncpus will be used by default for builds, but for
larger (less frequent) builds maxcpus can be used. The bb{1,2,3}
workers have npcus = 6 and maxcpus = 8 and now accept 2 concurrent
builds. All other workers have ncpus equals maxcpus.
Only the full glibc build and the gccrust bootstrap build use maxcpus.
---
builder/master.cfg | 59 +++++++++++++++++++++++++++++++---------------
1 file changed, 40 insertions(+), 19 deletions(-)
diff --git a/builder/master.cfg b/builder/master.cfg
index c1bad9f..7e97ae5 100644
--- a/builder/master.cfg
+++ b/builder/master.cfg
@@ -60,19 +60,19 @@ c['workers'].append(sourceware_worker)
centos_x86_64_worker = worker.Worker("centos-x86_64",
getpw("centos-x86_64"),
max_builds=1,
- properties={'ncpus': 8},
+ properties={'ncpus': 8, 'maxcpus': 8},
notify_on_missing='mark@klomp.org');
c['workers'].append(centos_x86_64_worker)
debian_i386_worker = worker.Worker("debian-i386",
getpw("debian-i386"),
max_builds=1,
- properties={'ncpus': 4},
+ properties={'ncpus': 4, 'maxcpus': 4},
notify_on_missing='mark@klomp.org');
c['workers'].append(debian_i386_worker)
debian_i386_2_worker = worker.Worker("debian-i386-2",
getpw("debian-i386-2"),
max_builds=1,
- properties={'ncpus': 4},
+ properties={'ncpus': 4, 'maxcpus': 4},
notify_on_missing='mark@klomp.org');
c['workers'].append(debian_i386_2_worker)
@@ -82,7 +82,7 @@ debian_i386_workers = ['debian-i386', 'debian-i386-2']
debian_armhf_worker = worker.Worker("debian-armhf",
getpw("debian-armhf"),
max_builds=1,
- properties={'ncpus': 4},
+ properties={'ncpus': 4, 'maxcpus': 4},
keepalive_interval=900,
notify_on_missing='mark@klomp.org');
c['workers'].append(debian_armhf_worker)
@@ -91,7 +91,7 @@ c['workers'].append(debian_armhf_worker)
debian_arm64_worker = worker.Worker("debian-arm64",
getpw("debian-arm64"),
max_builds=1,
- properties={'ncpus': 3},
+ properties={'ncpus': 3, 'maxcpus': 3},
keepalive_interval=900,
notify_on_missing='mark@klomp.org');
c['workers'].append(debian_arm64_worker)
@@ -100,7 +100,7 @@ c['workers'].append(debian_arm64_worker)
fedora_s390x_worker = worker.Worker("fedora-s390x",
getpw("fedora-s390x"),
max_builds=1,
- properties={'ncpus': 3},
+ properties={'ncpus': 3, 'maxcpus': 3},
notify_on_missing=['mark@klomp.org',
'dhorak@redhat.com']);
c['workers'].append(fedora_s390x_worker)
@@ -109,7 +109,7 @@ c['workers'].append(fedora_s390x_worker)
fedora_ppc64le_worker = worker.Worker("fedora-ppc64le",
getpw("fedora-ppc64le"),
max_builds=1,
- properties={'ncpus': 4},
+ properties={'ncpus': 4, 'maxcpus': 4},
notify_on_missing=['mark@klomp.org',
'dhorak@redhat.com']);
c['workers'].append(fedora_ppc64le_worker)
@@ -118,7 +118,7 @@ c['workers'].append(fedora_ppc64le_worker)
debian_ppc64_worker = worker.Worker("debian-ppc64",
getpw("debian-ppc64"),
max_builds=1,
- properties={'ncpus': 4},
+ properties={'ncpus': 4, 'maxcpus': 4},
notify_on_missing=['mark@klomp.org',
'fitzsim@fitzsim.org']);
c['workers'].append(debian_ppc64_worker)
@@ -126,7 +126,7 @@ c['workers'].append(debian_ppc64_worker)
fedrawhide_x86_64_worker = worker.Worker("fedrawhide-x86_64",
getpw("fedrawhide-x86_64"),
max_builds=1,
- properties={'ncpus': 2},
+ properties={'ncpus': 2, 'maxcpus': 2},
notify_on_missing=['fche@elastic.org']);
c['workers'].append(fedrawhide_x86_64_worker)
@@ -139,8 +139,8 @@ bb1_worker = worker.DockerLatentWorker("bb1",
dockerfile=util.Interpolate('%(prop:container-file)s'),
volumes=["/home/builder/shared:/home/builder/shared"],
build_wait_timeout=0,
- max_builds=1,
- properties={'ncpus': 8});
+ max_builds=2,
+ properties={'ncpus': 6, 'maxcpus': 8});
c['workers'].append(bb1_worker)
bb2_worker = worker.DockerLatentWorker("bb2",
@@ -150,8 +150,8 @@ bb2_worker = worker.DockerLatentWorker("bb2",
dockerfile=util.Interpolate('%(prop:container-file)s'),
volumes=["/home/builder/shared:/home/builder/shared"],
build_wait_timeout=0,
- max_builds=1,
- properties={'ncpus': 8});
+ max_builds=2,
+ properties={'ncpus': 6, 'maxcpus': 8});
c['workers'].append(bb2_worker)
bb3_worker = worker.DockerLatentWorker("bb3",
@@ -161,8 +161,8 @@ bb3_worker = worker.DockerLatentWorker("bb3",
dockerfile=util.Interpolate('%(prop:container-file)s'),
volumes=["/home/builder/shared:/home/builder/shared"],
build_wait_timeout=0,
- max_builds=1,
- properties={'ncpus': 8});
+ max_builds=2,
+ properties={'ncpus': 6, 'maxcpus': 8});
c['workers'].append(bb3_worker)
vm_workers = ['bb1', 'bb2', 'bb3']
@@ -1367,6 +1367,12 @@ gccrust_make_step = steps.Compile(
name='make',
haltOnFailure=True)
+gccrust_make_bootstrap_step = steps.Compile(
+ workdir='gccrs-build',
+ command=['make', util.Interpolate('-j%(prop:maxcpus)s')],
+ name='make',
+ haltOnFailure=True)
+
gccrust_check_step = steps.Compile(
workdir='gccrs-build',
command=['make', 'check-rust', util.Interpolate('-j%(prop:ncpus)s')],
@@ -1375,6 +1381,14 @@ gccrust_check_step = steps.Compile(
"rust.log": "gcc/testsuite/rust/rust.log" },
haltOnFailure=False, flunkOnFailure=True)
+gccrust_check_bootstrap_step = steps.Compile(
+ workdir='gccrs-build',
+ command=['make', 'check-rust', util.Interpolate('-j%(prop:maxcpus)s')],
+ name='make check',
+ logfiles={ "rust.sum": "gcc/testsuite/rust/rust.sum",
+ "rust.log": "gcc/testsuite/rust/rust.log" },
+ haltOnFailure=False, flunkOnFailure=True)
+
# needed because make check-rust doesn't fail on unexpected failures
gccrust_check_check_step = steps.ShellCommand(
workdir='gccrs-build',
@@ -1399,8 +1413,8 @@ gccrust_bootstrap_factory = util.BuildFactory()
gccrust_bootstrap_factory.addStep(gccrust_git_step)
gccrust_bootstrap_factory.addStep(gccrust_rm_build_step)
gccrust_bootstrap_factory.addStep(gccrust_configure_bootstrap_step)
-gccrust_bootstrap_factory.addStep(gccrust_make_step)
-gccrust_bootstrap_factory.addStep(gccrust_check_step)
+gccrust_bootstrap_factory.addStep(gccrust_make_bootstrap_step)
+gccrust_bootstrap_factory.addStep(gccrust_check_bootstrap_step)
gccrust_bootstrap_factory.addStep(gccrust_check_check_step)
gccrust_bootstrap_factory.addSteps(gccrust_bunsen_steps)
@@ -2093,6 +2107,13 @@ glibc_configure_step = steps.Configure(
haltOnFailure=True)
glibc_make_step = steps.Compile(
+ workdir='glibc-build',
+ command=['make',
+ util.Interpolate('-j%(prop:maxcpus)s')],
+ name='make',
+ haltOnFailure=False, flunkOnFailure=True)
+
+glibc_make_build_step = steps.Compile(
workdir='glibc-build',
command=['make',
util.Interpolate('-j%(prop:ncpus)s')],
@@ -2102,7 +2123,7 @@ glibc_make_step = steps.Compile(
glibc_check_step = steps.Test(
workdir='glibc-build',
command=['make',
- util.Interpolate('-j%(prop:ncpus)s'),
+ util.Interpolate('-j%(prop:maxcpus)s'),
'check'],
name='make check',
haltOnFailure=False, flunkOnFailure=True)
@@ -2125,7 +2146,7 @@ glibc_build_factory = util.BuildFactory()
glibc_build_factory.addStep(glibc_git_step)
glibc_build_factory.addStep(glibc_rm_step)
glibc_build_factory.addStep(glibc_configure_step)
-glibc_build_factory.addStep(glibc_make_step)
+glibc_build_factory.addStep(glibc_make_build_step)
glibc_fedrawhide_x86_64_builder = util.BuilderConfig(
name="glibc-fedrawhide-x86_64",
--
2.30.2
^ permalink raw reply [flat|nested] only message in thread
only message in thread, other threads:[~2022-06-29 20:39 UTC | newest]
Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-06-29 20:39 [COMMITTED] Introduce maxcpus, make bb{1, 2, 3} accept 2 concurrent builds Mark Wielaard
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for read-only IMAP folder(s) and NNTP newsgroup(s).