extras-buildsys/builder Config.py, NONE, 1.1 Makefile, 1.3, 1.4 builder.py, 1.31, 1.32 CONFIG.py, 1.9, NONE
Daniel Williams (dcbw)
fedora-extras-commits at redhat.com
Thu Aug 25 18:15:15 UTC 2005
Author: dcbw
Update of /cvs/fedora/extras-buildsys/builder
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv3976/builder
Modified Files:
Makefile builder.py
Added Files:
Config.py
Removed Files:
CONFIG.py
Log Message:
2005-08-25 Dan Williams <dcbw at redhat.com>
* Initial commit of reworked stuff:
- Each target gets separate config files on builder
and server
- Builders now run multiple jobs per builder instance
- Config files now ConfigParser based
- Target specifications are richer and require distro,
target, and repo names
- Builder's supported arches are autodetermined
- Various database fields renamed and/or removed
IT DOESN'T WORK YET
--- NEW FILE Config.py ---
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2005 Dan Williams <dcbw at redhat.com> and Red Hat, Inc.
import os
from ConfigParser import ConfigParser
from plague import BaseConfig
from plague import ArchUtils
class InvalidTargetException(Exception): pass
class BuilderConfig(BaseConfig.BaseConfig):
def __init__(self, filename):
BaseConfig.BaseConfig.__init__(self, filename)
try:
self.open()
except BaseConfig.BaseConfig.ConfigError:
print "Config file did not exist. Writing %s with default values." % filename
self.save_default_config()
self._targets = []
def targets(self):
return self._targets
def get_target(self, td, fuzzy=False):
"""
Returns a target for the ID specified, optionally just grabbing
the first matching target that can build a particular arch.
"""
for target_cfg in self._targets:
if not fuzzy and target_cfg.match_exactly(td):
return target_cfg
elif fuzzy and target_cfg.match_fuzzy_on_arch(td):
return target_cfg
raise InvalidTargetException
def load_target_configs(self, allowed_arches):
cfg_dir = self.get_str("Directories", "target_configs_dir")
if not os.path.exists(cfg_dir) or not os.access(cfg_dir, os.R_OK):
return
# Don't ever load targets twice
if len(self._targets) > 0:
return
files = os.listdir(cfg_dir)
for f in files:
if not f.endswith(".cfg"):
continue
cfg_file = os.path.join(cfg_dir, f)
target_cfg = TargetConfig(self, cfg_file)
if target_cfg.basearch() in allowed_arches:
# Add additional supported "sub" arches to each target's
# arch list, like i486, sparcv9, etc
for sub_arch in allowed_arches:
if ArchUtils.sub_arches.has_key(sub_arch) and ArchUtils.sub_arches[sub_arch] == target_cfg.basearch():
if not sub_arch in target_cfg.arches():
target_cfg.add_arch(sub_arch)
target_cfg.add_arch('noarch')
self._targets.append(target_cfg)
else:
del target_cfg
def save_default_config(self, filename=None):
self.add_section("General")
self.set_option("General", "debug", "yes")
self.set_option("General", "builder_cmd", "/usr/bin/mock")
self.set_option("General", "builder_user", "plague-builder")
self.add_section("Directories")
self.set_option("Directories", "builder_work_dir", "/tmp/builder_work")
self.set_option("Directories", "target_configs_dir", "/etc/plague/builder/targets")
self.add_section("Network")
self.set_option("Network", "fileserver_port", "8889")
self.set_option("Network", "xmlrpc_port", "8888")
self.set_option("Network", "hostname", "")
self.add_section("SSL")
self.set_option("SSL", "use_ssl", "yes")
self.set_option("SSL", "builder_key_and_cert_dir", "/etc/plague/builder/certs")
self.set_option("SSL", "ca_cert", "/etc/plague/builder/certs/ca_cert.pem")
self.save()
class TargetConfig(BaseConfig.BaseConfig):
def __init__(self, cfg, filename):
BaseConfig.BaseConfig.__init__(self, filename)
try:
self.open()
except BaseConfig.BaseConfig.ConfigError:
print "Config file did not exist. Writing %s with default values." % filename
self.save_default_config()
self._parent_cfg = cfg
self._distro = self.get_str("General", "distro")
self._target = self.get_str("General", "target")
self._basearch = self.get_str("General", "basearch")
self._repo = self.get_str("General", "repo")
self._mock_config = self.get_str("General", "mock_config")
self._arches = []
self._arches.append(self._basearch)
def target_dict(self):
target_dict = {}
target_dict['distro'] = self._distro
target_dict['target'] = self._target
target_dict['arch'] = self._basearch
target_dict['repo'] = self._repo
return target_dict
def match_exactly(self, td):
if td['distro'] == self._distro \
and td['target'] == self._target \
and td['arch'] == self._basearch \
and td['repo'] == self._repo:
return True
return False
def match_fuzzy_on_arch(self, td):
if td['distro'] == self._distro \
and td['target'] == self._target \
and td['arch'] in self._arches \
and td['repo'] == self._repo:
return True
return False
def distro(self):
return self._distro
def target(self):
return self._target
def basearch(self):
return self._basearch
def repo(self):
return self._repo
def mock_config(self):
return self._mock_config
def arches(self):
return self._arches
def add_arch(self, arch):
self._arches.append(arch)
def parent_cfg(self):
return self._parent_cfg
def save_default_config(self, filename=None):
self.add_section("General")
self.set_option("General", "distro", "fedora")
self.set_option("General", "target", "development")
self.set_option("General", "basearch", "i386")
self.set_option("General", "repo", "core")
self.set_option("General", "mock_config", "fedora-development-i386-core")
self.save()
Index: Makefile
===================================================================
RCS file: /cvs/fedora/extras-buildsys/builder/Makefile,v
retrieving revision 1.3
retrieving revision 1.4
diff -u -r1.3 -r1.4
--- Makefile 28 Jun 2005 16:46:12 -0000 1.3
+++ Makefile 25 Aug 2005 18:15:13 -0000 1.4
@@ -1,4 +1,5 @@
BINDIR=/usr/bin
+DATADIR=/usr/share
ETCDIR=/etc
DESTDIR='/'
INSTALL=/usr/bin/install
@@ -9,11 +10,17 @@
clean:
rm -f *.pyc *.pyo *~ *.bak
+OTHERINSTDIR=$(DESTDIR)/$(DATADIR)/$(PKGNAME)/builder
CONFIGDIR=$(DESTDIR)$(ETCDIR)/$(PKGNAME)/builder
+FILES = \
+ Config.py
+
install:
$(MKDIR) -p $(DESTDIR)$(BINDIR)
$(INSTALL) -m 755 builder.py $(DESTDIR)/$(BINDIR)/$(PKGNAME)-builder
+ $(MKDIR) -p $(OTHERINSTDIR)
+ for file in $(FILES); do $(INSTALL) -m 644 $$file $(OTHERINSTDIR)/$$file; done
$(MKDIR) -p $(CONFIGDIR)
- $(INSTALL) -m 755 CONFIG.py $(CONFIGDIR)/CONFIG.py
+ $(MKDIR) -p $(CONFIGDIR)/targets
$(MKDIR) -p $(CONFIGDIR)/certs
Index: builder.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/builder/builder.py,v
retrieving revision 1.31
retrieving revision 1.32
diff -u -r1.31 -r1.32
--- builder.py 11 Aug 2005 19:32:32 -0000 1.31
+++ builder.py 25 Aug 2005 18:15:13 -0000 1.32
@@ -31,6 +31,7 @@
import urllib
import errno
import exceptions
+import threading
from plague import ArchUtils
from plague import FileDownloader
from plague import AuthedXMLRPCServer
@@ -38,59 +39,82 @@
from plague import daemonize
from optparse import OptionParser
+sys.path.append('/usr/share/plague/builder')
+
+import Config
certs = {}
+build_arches = []
+
+
+def get_hostname(cfg, bind_all):
+ cfg_hostname = cfg.get_str("Network", "hostname")
+ if cfg_hostname and len(cfg_hostname):
+ return cfg_hostname
+ elif bind_all:
+ return ''
+ return socket.gethostname()
-def get_url_for_file(file_path):
+def get_url_for_file(cfg, file_path):
""" Return a URL pointing to a particular file in our work dir """
# Ensure the file we're turning into a URL lives in our builder work dir
- if not file_path.startswith(config_opts["builder_work_dir"]):
+ work_dir = cfg.get_str("Directories", "builder_work_dir")
+ if not file_path.startswith(work_dir):
return None
- file_part = file_path[len(config_opts["builder_work_dir"]) + 1:]
- port = "%s" % config_opts['fileserver_port']
- if config_opts['ssl_buildserver']:
+ file_part = file_path[len(work_dir) + 1:]
+ port = "%s" % cfg.get_int("Network", "fileserver_port")
+ if cfg.get_bool("SSL", "use_ssl"):
method = "https://"
else:
method = "http://"
- full_url = method + config_opts['hostname'] + ":" + port + "/" + file_part
+ hostname = get_hostname(cfg, False)
+ full_url = "%s%s:%s/%s" % (method, hostname, port, file_part)
return urllib.quote(full_url)
-class BuilderMock:
+class BuilderMock(threading.Thread):
"""puts things together for an arch - baseclass for handling builds for
other arches"""
- def __init__(self, uniqid, target, srpm_url):
+ def __init__(self, uniqid, target_cfg, buildarch, srpm_url):
+ self.buildarch = buildarch
+ self._started = time.time()
self._uniqid = uniqid
self._status = 'init'
+ self._die = False
self._repo_locked = True
self._repo_locked_msg = False
self._files = []
self._pobj = None
- self._target = target
+ self._target_cfg = target_cfg
+ self._builder_cfg = target_cfg.parent_cfg()
self._srpm_url = srpm_url
self._log_fd = None
self._mock_config = None
self._done_status = ''
+ self.buildroot = self._target_cfg.mock_config()
- self._result_dir = os.path.join(config_opts['builder_work_dir'], self._uniqid, "result")
+ work_dir = self._builder_cfg.get_str("Directories", "builder_work_dir")
+ self._result_dir = os.path.join(work_dir, self._uniqid, "result")
if not os.path.exists(self._result_dir):
os.makedirs(self._result_dir)
- self._state_dir = os.path.join(config_opts['builder_work_dir'], self._uniqid, "mock-state")
+ self._state_dir = os.path.join(work_dir, self._uniqid, "mock-state")
if not os.path.exists(self._state_dir):
os.makedirs(self._state_dir)
logfile = os.path.join(self._result_dir, "builder.log")
self._log_fd = open(logfile, "w+")
+ target_dict = self._target_cfg.target_dict()
+ target_str = "%s-%s-%s-%s" % (target_dict['distro'], target_dict['target'], target_dict['arch'], target_dict['repo'])
self.log("""Starting job:
Time: %s
Target: %s
UID: %s
Architecture: %s
- SRPM: %s\n\n""" % (time.asctime(time.gmtime()), self._target, self._uniqid, self.buildarch, srpm_url))
+ SRPM: %s\n\n""" % (time.asctime(time.gmtime()), target_str, self._uniqid, self.buildarch, srpm_url))
try:
srpm_filename = FileDownloader.get_base_filename_from_url(srpm_url, ['.src.rpm'])
@@ -98,14 +122,17 @@
self._status = 'failed'
self._srpm_path = None
self.log("Failed to extract SRPM filename. Error: '%s' URL: %s\n" % (e, srpm_url))
- else:
- self._srpm_path = os.path.join(config_opts['builder_work_dir'], self._uniqid, "source", srpm_filename)
+ return
+
+ self._srpm_path = os.path.join(work_dir, self._uniqid, "source", srpm_filename)
+ threading.Thread.__init__(self)
def die(self, sig=15):
- # Do nothing if we've already been killed
- if self._done_status == 'killed':
- return True
+ if self.is_done_status() or self._done_status == 'killed':
+ return
+ self._die = True
+ def _handle_death(self):
self.log("Killing build process...\n")
# Don't try to kill a running cleanup process
if self._status != 'cleanup' and self._pobj and self._pobj.pid:
@@ -119,25 +146,19 @@
# Don't start cleanup over top of an existing cleanup process
if self._status != 'cleanup':
- self._cleanup()
-
- return True
+ self._start_cleanup()
def log(self, string):
if string and self._log_fd:
self._log_fd.write(string)
self._log_fd.flush()
os.fsync(self._log_fd.fileno())
- if config_opts['debug']:
+ if self._builder_cfg.get_bool("General", "debug"):
s = "%s: " % self._uniqid
sys.stdout.write(s + string)
sys.stdout.flush()
- def start(self):
- # check for existence of srpm before going on
- self._download_srpm()
-
- def _download_srpm(self):
+ def _start_srpm_download(self):
self._status = 'downloading'
self.log("Starting download of %s.\n" % self._srpm_url)
target_dir = os.path.dirname(self._srpm_path)
@@ -145,19 +166,19 @@
target_dir, ['.src.rpm'], certs)
dl_thread.start()
- def dl_callback(self, status, cb_data):
+ def dl_callback(self, dl_status, cb_data):
url = cb_data
- if status == 'done':
+ if dl_status == 'done':
self._status = 'downloaded'
self.log("Retrieved %s.\n" % url)
- elif status == 'failed':
+ elif dl_status == 'failed':
# Don't overwrite our status with 'failed' if we were cancelled
# and a download error ocurred
if not self.is_done_status():
self._status = 'failed'
self.log("Failed to retrieve %s.\n" % url)
- def _build(self):
+ def _start_build(self):
self.log("Starting step 'building' with command:\n")
if not os.path.exists(self._result_dir):
os.makedirs(self._result_dir)
@@ -200,7 +221,7 @@
self._status = 'failed'
break
- def _cleanup(self):
+ def _start_cleanup(self):
self.log("Cleaning up the buildroot...\n")
cmd = '%s %s clean --uniqueext=%s -r %s' % (self.arch_command,
config_opts['builder_cmd'], self._uniqid,
@@ -319,7 +340,7 @@
self._log_fd = None
def _status_init(self):
- pass
+ self._start_srpm_download()
def _status_downloading(self):
pass
@@ -328,7 +349,7 @@
# We can't start doing anything with yum until the build
# server tells us the repo is unlocked.
if not self._repo_locked:
- self._build()
+ self._start_build()
else:
# Only show this message once
if not self._repo_locked_msg:
@@ -348,11 +369,11 @@
if self._status != 'building':
self.log("Bad job end status %s encountered!" % self._status)
self._done_status = 'done'
- self._cleanup()
+ self._start_cleanup()
elif exit_status > 0:
# mock exited with an error
self._done_status = 'failed'
- self._cleanup()
+ self._start_cleanup()
def _status_cleanup(self):
exit_status = self._pobj.poll()
@@ -373,21 +394,25 @@
else:
shutil.rmtree(self._mock_config['rootdir'], ignore_errors=True)
- def process(self):
- if self.is_done_status():
- return
+ def run(self):
+ while True:
+ if self._die:
+ self._handle_death()
- # Execute operations for our current status
- try:
- func = getattr(self, "_status_%s" % self._status)
- func()
- except AttributeError:
- self.log("ERROR: internal builder inconsistency, didn't recognize status '%s'." % self._status)
- self._status = 'failed'
+ # Execute operations for our current status
+ try:
+ func = getattr(self, "_status_%s" % self._status)
+ func()
+ except AttributeError:
+ self.log("ERROR: internal builder inconsistency, didn't recognize status '%s'." % self._status)
+ self._status = 'failed'
+
+ self._grab_mock_output()
+ if self.is_done_status():
+ self._mock_done()
+ break
- self._grab_mock_output()
- if self.is_done_status():
- self._mock_done()
+ time.sleep(3)
def _find_files(self):
# Grab the list of files in our job's result dir and URL encode them
@@ -406,12 +431,14 @@
def status(self):
return self._status
+ def uniqid(self):
+ return self._uniqid
+
def files(self):
return self._files
def repo_unlocked(self):
self._repo_locked = False
- return 0
def is_done_status(self):
if (self._status is 'done') or (self._status is 'killed') or (self._status is 'failed'):
@@ -419,57 +446,37 @@
return False
-def resolve_buildroot_name(target, arch):
- buildroot_map = config_opts['mock_buildroot_map']
- try:
- return buildroot_map[(config_opts['distro_name'], target, arch, config_opts['repo_name'])]
- except KeyError:
- raise InvalidTargetError()
-
-
class InvalidTargetError(exceptions.Exception): pass
class i386Arch(BuilderMock):
- def __init__(self, uniqid, target, buildarch, srpm_url):
- self.buildroot = resolve_buildroot_name(target, 'i386')
- self.buildarch = buildarch
+ def __init__(self, uniqid, target_cfg, buildarch, srpm_url):
self.arch_command = '/usr/bin/setarch i686'
- BuilderMock.__init__(self, uniqid, target, srpm_url)
+ BuilderMock.__init__(self, uniqid, target_cfg, buildarch, srpm_url)
class x86_64Arch(BuilderMock):
- def __init__(self, uniqid, target, buildarch, srpm_url):
- self.buildroot = resolve_buildroot_name(target, 'x86_64')
- self.buildarch = buildarch
+ def __init__(self, uniqid, target_cfg, buildarch, srpm_url):
self.arch_command = ''
- BuilderMock.__init__(self, uniqid, target, srpm_url)
+ BuilderMock.__init__(self, uniqid, target_cfg, buildarch, srpm_url)
class PPCArch(BuilderMock):
- def __init__(self, uniqid, target, buildarch, srpm_url):
- self.buildroot = resolve_buildroot_name(target, 'ppc')
- self.buildarch = buildarch
+ def __init__(self, uniqid, target_cfg, buildarch, srpm_url):
self.arch_command = '/usr/bin/setarch ppc32'
- BuilderMock.__init__(self, uniqid, target, srpm_url)
+ BuilderMock.__init__(self, uniqid, target_cfg, buildarch, srpm_url)
class PPC64Arch(BuilderMock):
- def __init__(self, uniqid, target, buildarch, srpm_url):
- self.buildroot = resolve_buildroot_name(target, 'ppc64')
- self.buildarch = buildarch
+ def __init__(self, uniqid, target_cfg, buildarch, srpm_url):
self.arch_command = ''
- BuilderMock.__init__(self, uniqid, target, srpm_url)
+ BuilderMock.__init__(self, uniqid, target_cfg, buildarch, srpm_url)
class SparcArch(BuilderMock):
- def __init__(self, uniqid, target, buildarch, srpm_url):
- self.buildroot = resolve_buildroot_name(target, 'sparc')
- self.buildarch = buildarch
+ def __init__(self, uniqid, target_cfg, buildarch, srpm_url):
self.arch_command = '/usr/bin/sparc32'
- BuilderMock.__init__(self, uniqid, target, srpm_url)
+ BuilderMock.__init__(self, uniqid, target_cfg, buildarch, srpm_url)
class Sparc64Arch(BuilderMock):
- def __init__(self, uniqid, target, buildarch, srpm_url):
- self.buildroot = resolve_buildroot_name(target, 'sparc64')
- self.buildarch = buildarch
+ def __init__(self, uniqid, target_cfg, buildarch, srpm_url):
self.arch_command = '/usr/bin/sparc64'
- BuilderMock.__init__(self, uniqid, target, srpm_url)
+ BuilderMock.__init__(self, uniqid, target_cfg, buildarch, srpm_url)
# Keep this global scope, used in __main__
builder_dict = {'i386': i386Arch,
@@ -489,106 +496,155 @@
'sparc64': Sparc64Arch
}
-def getArchBuilder(uniqid, target, buildarch, srpm_url, localarches):
+def get_arch_builder_instance(uniqid, target_cfg, buildarch, srpm_url):
"""hand it an arch it hands you back the builder instance you need"""
-
+
if buildarch != 'noarch' and not builder_dict.has_key(buildarch):
return None
builder = None
- if buildarch == 'noarch' and len(localarches) > 0:
- builder = builder_dict[localarches[0]]
+ if buildarch == 'noarch':
+ builder = builder_dict[target_cfg.arches()[0]]
else:
- if buildarch in localarches:
+ if buildarch in target_cfg.arches():
builder = builder_dict[buildarch]
if builder:
- return builder(uniqid, target, buildarch, srpm_url)
+ return builder(uniqid, target_cfg, buildarch, srpm_url)
return None
class XMLRPCBuilderServer:
- def __init__(self, target_arch_dict):
- self.ids = {} # unique id => awclass instance
- self.target_arch_dict = target_arch_dict
- self.cur_job = 0
+ def __init__(self, cfg, max_jobs):
+ self._all_jobs = {} # unique id => awclass instance
+ self._building_jobs_lock = threading.Lock()
+ self._building_jobs = []
+ self._cfg = cfg
+ self._max_jobs = max_jobs
def log(self, string):
- if config_opts['debug']:
+ if self._cfg.get_bool("General", "debug"):
print string
- def process(self):
- # Give jobs some time to update their status and do their thing
- jobid = 0
- for (uniqid, job) in self.ids.iteritems():
- if not job.is_done_status():
- job.process()
- jobid = uniqid
- self.cur_job = jobid # Update current job
+ def notify_job_done(self, job):
+ self._building_jobs_lock.acquire()
+ if job in self._building_jobs:
+ self._building_jobs.remove(job)
+ self._building_jobs_lock.release()
- def _get_uniqid(self, target, arch, srpm_url):
- check = '%d %s %s %s' % (time.time(), target, arch, srpm_url)
+ def _generate_uniqid(self, target_str, srpm_url):
sum = sha.new()
- sum.update(check)
+ sum.update('%d %s %s' % (time.time(), target_str, srpm_url))
return sum.hexdigest()
- def start(self, target, arch, srpm_url):
- # Sanity check the request
- if self.cur_job != 0:
- self.log("Tried to build '%s' when already buiding something" % srpm_url)
- return 0
- if not self.target_arch_dict.has_key(target) or len(self.target_arch_dict[target]) == 0:
- self.log("Tried to build '%s' on target %s which isn't supported" % (srpm_url, target))
+ def _get_target_cfg(self, target_dict):
+ target_cfg = None
+
+ # First try to find a target for buildarch specifically
+ try:
+ target_cfg = self._cfg.get_target(target_dict)
+ except Config.InvalidTargetException:
+ pass
+
+ if not target_cfg:
+ # If that doesn't work, just get a target that can build the arch
+ try:
+ target_cfg = self._cfg.get_target(target_dict, True)
+ except Config.InvalidTargetException:
+ pass
+
+ return target_cfg
+
+ def start_new_job(self, target_dict, srpm_url):
+ target_str = "%s-%s-%s-%s" % (target_dict['distro'], target_dict['target'], target_dict['arch'], target_dict['repo'])
+
+ self._building_jobs_lock.acquire()
+ num_building = len(self._building_jobs)
+ if num_building >= self._max_jobs:
+ self.log("Tried to build '%s' on target %s when already building" \
+ " %d/%d jobs" % (srpm_url, target_str, num_building, self._max_jobs))
+ self._building_jobs_lock.release()
return 0
- if arch != 'noarch' and not arch in self.target_arch_dict[target]:
- self.log("Tried to build '%s' on target %s which doesn't support arch %s" % (srpm_url, target, arch))
+ self._building_jobs_lock.release()
+
+ target_cfg = self._get_target_cfg(target_dict)
+ if not target_cfg:
+ self.log("Tried to build '%s' on target %s which isn't supported" % (srpm_url, target_str))
return 0
- uniqid = self._get_uniqid(target, arch, srpm_url)
- job = getArchBuilder(uniqid, target, arch, srpm_url, self.target_arch_dict[target])
+ uniqid = self._generate_uniqid(target_str, srpm_url)
+ job = get_arch_builder_instance(uniqid, target_cfg, target_dict['arch'], srpm_url)
if job != None:
- self.ids[uniqid] = job
+ self._all_jobs[uniqid] = job
+ self._building_jobs_lock.acquire()
+ self._building_jobs.append(job)
+ self._building_jobs_lock.release()
job.start()
filename = os.path.basename(srpm_url)
self.log("%s: started %s on %s arch %s at time %d" % (uniqid, filename,
target, arch, cur_time))
else:
self.log("%s: Failed request for %s on %s UNSUPPORTED arch %s at time %d" %
- (uniqid, srpm_url, target, arch, cur_time))
+ (uniqid, srpm_url, target_str, target_dict['arch'], cur_time))
uniqid = 0
- self.cur_job = uniqid
+
return uniqid
def die(self, uniqid):
- job = self.ids[uniqid]
- return job.die()
-
+ try:
+ job = self._all_jobs[uniqid]
+ job.die()
+ except KeyError:
+ pass
+
def files(self, uniqid):
- job = self.ids[uniqid]
- return job.files()
+ try:
+ job = self._all_jobs[uniqid]
+ return job.files()
+ except KeyError:
+ pass
+ return []
def repo_unlocked(self, uniqid):
- job = self.ids[uniqid]
- return job.repo_unlocked()
+ try:
+ job = self._all_jobs[uniqid]
+ job.repo_unlocked()
+ except KeyError:
+ pass
- def listjobs(self):
- return self.ids.keys()
+ def building_jobs(self):
+ jobs = {}
+ self._building_jobs_lock.acquire()
+ for job in self._building_jobs:
+ jobs[job.uniqid()] = jobs.status()
+ self._building_jobs_lock.release()
+ return jobs
- def get_cur_job(self):
- """ Are we currently building something? """
- status = 'idle'
+ def job_status(self, uniqid):
try:
- job = self.ids[self.cur_job]
- status = job.status()
+ job = self._all_jobs[uniqid]
+ return job.status()
except KeyError:
- status = 'idle'
- return (self.cur_job, status)
+ pass
+ return ''
- def supported_target_arches(self):
- return self.target_arch_dict
+ def free_slots(self):
+ free = 0
+ self._building_jobs_lock.acquire()
+ free = self._max_jobs - len(self._building_jobs)
+ self._building_jobs_lock.release()
+ return free
+
+ def supported_targets(self):
+ targets = []
+ for t in self._cfg.targets():
+ td = t.target_dict()
+ td['supported_arches'] = t.arches()
+ targets.append(td)
+ return targets
-def drop_privs():
+def drop_privs(user):
"""
We can't and shouldn't run mock as root, so we drop privs.
We have to run the HTTP server as root though so it can chroot
@@ -601,7 +657,7 @@
import pwd
import grp
- eu = config_opts['builder_user']
+ eu = user
try:
uid = int(eu)
except ValueError:
@@ -642,40 +698,50 @@
return 0
-def get_target_arches(allowed_arches):
- target_arches = {}
- for t in config_opts['mock_buildroot_map'].keys():
- (distro, target, arch, repo) = t
- if distro == config_opts['distro_name'] and repo == config_opts['repo_name']:
- # 'arch' is by definition the "master" arch, ie i386 or x86_64,
- # not i486/i586/ia32e/etc. We want to add on the "sub" arches
- # that this builder supports to its list of arches it can build
- # for this target
- if arch in allowed_arches:
- if not target_arches.has_key(target):
- target_arches[target] = []
- target_arches[target].append(arch)
-
- # Add applicable "sub" arches, like i486, sparcv9, etc
- for sub_arch in allowed_arches:
- if ArchUtils.sub_arches.has_key(sub_arch) and ArchUtils.sub_arches[sub_arch] == arch:
- if not sub_arch in target_arches[target]:
- target_arches[target].append(sub_arch)
+def determine_build_arches(cfg):
+ """
+ Attempt to autodetect what architectures this machine can build for,
+ based on the kernel's uname. If that fails, fall back to options in
+ the config file.
+ """
- return target_arches
+ machine_arch = os.uname()[4]
+ arches = []
+ try:
+ arches = ArchUtils.supported_arches[machine_arch]
+ except KeyError:
+ print "Unknown machine type. Please update plague's ArchUtils.py file."
+ # Ok, grab from config file if we can't autodetermine
+ if not len(arches):
+ arches = cfg.get_list("General", "build_arches")
+
+ for arch in arches:
+ if not arch in builder_dict.keys():
+ print "Unknown arch '%s' is not supported." % arch
+ sys.exit(1)
+
+ return arches
-if __name__ == '__main__':
- state={'opts': True, 'host': None, 'archs': [],
- 'daemon': False, 'pidfile': None, 'logfile': None}
- archlist = ""
- avail_arches = builder_dict.keys()
- avail_arches.sort()
- for a in avail_arches:
- archlist = archlist + a
- if a != avail_arches[len(avail_arches)-1]:
- archlist = archlist + ", "
+def determine_max_jobs(cfg):
+ """ Simple max job calculator based on number of CPUs """
+
+ import commands
+ max_jobs = 1
+ cmd = "/usr/bin/getconf _NPROCESSORS_ONLN"
+ (s, o) = commands.getstatusoutput(cmd)
+ if s == 0:
+ try:
+ max_jobs = int(o)
+ except ValueError:
+ pass
+ return max_jobs
+
+
+if __name__ == '__main__':
+# state={'opts': True, 'host': None, 'archs': [],
+# 'daemon': False, 'pidfile': None, 'logfile': None}
usage = "Usage: %s [-p <pidfile>] [-l <logfile>] [-d] -c <configfile>" % sys.argv[0]
parser = OptionParser(usage=usage)
@@ -697,25 +763,23 @@
sys.exit(1)
# Load in the config
- execfile(opts.configfile)
- if config_opts['ssl_buildserver']:
- certs['key_and_cert'] = config_opts['builder_key_and_cert']
- certs['ca_cert'] = config_opts['ca_cert']
- certs['peer_ca_cert'] = config_opts['ca_cert']
-
- for arch in config_opts['arches']:
- if arch not in archlist:
- print "Arch '%s' must be one of [ %s ]" % (arch, archlist)
- sys.exit(1)
+ cfg = Config.BuilderConfig(opts.configfile)
+ hostname = get_hostname(cfg, False)
- for arch in config_opts['arches']:
- if not arch in builder_dict.keys():
- print "Arch '%s' specified in the config file is not supported." % arch
- sys.exit(1)
+ if cfg.get_bool("SSL", "use_ssl"):
+ key_file = os.path.join(cfg.get_str("SSL", "builder_key_and_cert_dir"), "%s.pem" % hostname)
+ certs['key_and_cert'] = key_file
+ certs['ca_cert'] = cfg.get_str("SSL", "ca_cert")
+ certs['peer_ca_cert'] = certs['ca_cert']
+
+ build_arches = determine_build_arches(cfg)
+ if not len(build_arches):
+ print "Cannot determine buildable arches for this builder. Exiting..."
+ sys.exit(1)
- target_arch_dict = get_target_arches(config_opts['arches'])
- if len(target_arch_dict) == 0:
- print "No useable mock buildroot names configured. Exiting."
+ cfg.load_target_configs(build_arches)
+ if len(cfg.targets()) == 0:
+ print "No useable mock buildroots configured. Exiting..."
sys.exit(1)
if opts.daemon:
@@ -735,19 +799,20 @@
sys.stdout=logf
sys.stderr=logf
- work_dir = config_opts['builder_work_dir']
+ work_dir = cfg.get_str("Directories", "builder_work_dir")
if not os.path.exists(work_dir) or not os.access(work_dir, os.R_OK):
print "%s does not exist or is not readable." % work_dir
os._exit(1)
# Start up the HTTP server thread which the build server
# pulls completed RPMs from
- port = config_opts['fileserver_port']
- http_server = HTTPServer.PlgHTTPServerManager((config_opts['hostname'], port), work_dir, certs)
+ hostname = get_hostname(cfg, True)
+ port = cfg.get_int("Network", "fileserver_port")
+ http_server = HTTPServer.PlgHTTPServerManager((hostname, port), work_dir, certs)
http_server.start()
# Stop running as root
- if drop_privs() == -1:
+ if drop_privs(cfg.get_str("General", "builder_user")) == -1:
http_server.stop()
try:
time.sleep(1)
@@ -755,48 +820,38 @@
pass
os._exit(1)
- print "Binding to address '%s' with arches: [%s]" % (config_opts['hostname'], string.join(config_opts['arches']))
- xmlrpc_port = config_opts['xmlrpc_port']
+ print "Binding to address '%s' with arches: [%s]" % (hostname, string.join(build_arches, ","))
+ xmlrpc_port = cfg.get_int("Network", "xmlrpc_port")
try:
- if config_opts['ssl_buildserver']:
- xmlserver = AuthedXMLRPCServer.AuthedSSLXMLRPCServer((config_opts['hostname'], xmlrpc_port), None, certs)
+ if cfg.get_bool("SSL", "use_ssl") == True:
+ xmlserver = AuthedXMLRPCServer.AuthedSSLXMLRPCServer((hostname, xmlrpc_port), None, certs)
else:
- xmlserver = AuthedXMLRPCServer.AuthedXMLRPCServer((config_opts['hostname'], xmlrpc_port), None)
+ xmlserver = AuthedXMLRPCServer.AuthedXMLRPCServer((hostname, xmlrpc_port), None)
except socket.error, e:
if e[0] == 98:
- print "Error: couldn't bind to address '%s:%s'. Is the builder already running?" % (config_opts['hostname'], xmlrpc_port)
+ print "Error: couldn't bind to address '%s:%s'. " \
+ "Is the builder already running?" % (hostname, xmlrpc_port)
os._exit(1)
- bcs = XMLRPCBuilderServer(target_arch_dict)
+ max_jobs = determine_max_jobs(cfg)
+ bcs = XMLRPCBuilderServer(cfg, max_jobs)
xmlserver.register_instance(bcs)
last_time = time.time()
- while True:
- try:
+ try:
+ while True:
xmlserver.handle_request()
- except KeyboardInterrupt, e:
- print "Shutting down..."
- (curjob, status) = bcs.get_cur_job()
- if curjob:
- bcs.die(curjob)
- # wait for the job to clean up before quitting
- while True:
- (curjob, status) = bcs.get_cur_job()
- if status == 'idle' or curjob == 0:
- break
- bcs.process()
- time.sleep(0.5)
- break
-
- cur_time = time.time()
- if cur_time >= last_time + 3:
- # do some work every 3s or so
- bcs.process()
- last_time = time.time()
+ except KeyboardInterrupt, e:
+ print "Shutting down..."
+ building_jobs = bcs.building_jobs()
+ for jobid in building_jobs.keys():
+ bcs.die(jobid)
+ # wait for the jobs to clean up before quitting
+ while True:
+ building_jobs = bcs.building_jobs()
+ if len(building_jobs) == 0:
+ break
+ time.sleep(1)
http_server.stop()
- try:
- time.sleep(2)
- except KeyboardInterrupt, e:
- pass
os._exit(0)
--- CONFIG.py DELETED ---
More information about the fedora-extras-commits
mailing list