extras-buildsys/builder builder.py,1.31,1.31.2.1
Daniel Williams (dcbw)
fedora-extras-commits at redhat.com
Wed Aug 31 01:23:42 UTC 2005
Author: dcbw
Update of /cvs/fedora/extras-buildsys/builder
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv22497/builder
Modified Files:
Tag: STABLE_0_3
builder.py
Log Message:
2005-08-26 Dan Williams <dcbw at redhat.com>
* builder/builder.py
common/ExecUtils.py
- Thread jobs on the builder, even though we can only
do one at a time right now. It lets the job proceed
independently of the XMLRPCBuilderServer.
- Make the BuilderMock object's log function private
- Make the XMLRPCBuilderServer's log function private
- To execute mock, we now fork() and execv() the mock process
so that we can more reliably gather its output
- Correctly clean up the mock root directory
- Rename builder.log -> job.log so it can't get confused
with build.log from mock
Index: builder.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/builder/builder.py,v
retrieving revision 1.31
retrieving revision 1.31.2.1
diff -u -r1.31 -r1.31.2.1
--- builder.py 11 Aug 2005 19:32:32 -0000 1.31
+++ builder.py 31 Aug 2005 01:23:40 -0000 1.31.2.1
@@ -31,11 +31,13 @@
import urllib
import errno
import exceptions
+import threading
from plague import ArchUtils
from plague import FileDownloader
from plague import AuthedXMLRPCServer
from plague import HTTPServer
from plague import daemonize
+from plague import ExecUtils
from optparse import OptionParser
@@ -57,22 +59,24 @@
return urllib.quote(full_url)
-class BuilderMock:
+class BuilderMock(threading.Thread):
"""puts things together for an arch - baseclass for handling builds for
other arches"""
def __init__(self, uniqid, target, srpm_url):
self._uniqid = uniqid
self._status = 'init'
+ self._die = False
self._repo_locked = True
self._repo_locked_msg = False
self._files = []
- self._pobj = None
+ self._childpid = 0
self._target = target
self._srpm_url = srpm_url
self._log_fd = None
self._mock_config = None
self._done_status = ''
+ self._mock_log = None
self._result_dir = os.path.join(config_opts['builder_work_dir'], self._uniqid, "result")
if not os.path.exists(self._result_dir):
@@ -82,10 +86,10 @@
if not os.path.exists(self._state_dir):
os.makedirs(self._state_dir)
- logfile = os.path.join(self._result_dir, "builder.log")
+ logfile = os.path.join(self._result_dir, "job.log")
self._log_fd = open(logfile, "w+")
- self.log("""Starting job:
+ self._log("""Starting job:
Time: %s
Target: %s
UID: %s
@@ -97,33 +101,35 @@
except FileDownloader.FileNameError, e:
self._status = 'failed'
self._srpm_path = None
- self.log("Failed to extract SRPM filename. Error: '%s' URL: %s\n" % (e, srpm_url))
+ self._log("Failed to extract SRPM filename. Error: '%s' URL: %s\n" % (e, srpm_url))
else:
self._srpm_path = os.path.join(config_opts['builder_work_dir'], self._uniqid, "source", srpm_filename)
- def die(self, sig=15):
- # Do nothing if we've already been killed
- if self._done_status == 'killed':
+ threading.Thread.__init__(self)
+
+ def die(self):
+ if self.is_done_status() or self._done_status == 'killed':
return True
+ self._die = True
+ return True
- self.log("Killing build process...\n")
+ def _handle_death(self):
+ self._log("Killing build process...\n")
# Don't try to kill a running cleanup process
- if self._status != 'cleanup' and self._pobj and self._pobj.pid:
+ if self._status != 'cleanup' and self._childpid:
try:
- os.kill(self._pobj.pid, sig)
+ os.kill(self._childpid, 15)
except OSError, e:
- self.log("Couldn't kill process %d: %s\n" % (self._pobj.pid, e))
+ self._log("Couldn't kill process %d: %s\n" % (self._childpid, e))
- self.log("Killed.\n");
+ self._log("Killed.\n");
self._done_status = 'killed'
# Don't start cleanup over top of an existing cleanup process
if self._status != 'cleanup':
- self._cleanup()
-
- return True
+ self._start_cleanup()
- def log(self, string):
+ def _log(self, string):
if string and self._log_fd:
self._log_fd.write(string)
self._log_fd.flush()
@@ -133,13 +139,9 @@
sys.stdout.write(s + string)
sys.stdout.flush()
- def start(self):
- # check for existence of srpm before going on
- self._download_srpm()
-
- def _download_srpm(self):
+ def _start_srpm_download(self):
self._status = 'downloading'
- self.log("Starting download of %s.\n" % self._srpm_url)
+ self._log("Starting download of %s.\n" % self._srpm_url)
target_dir = os.path.dirname(self._srpm_path)
dl_thread = FileDownloader.FileDownloader(self.dl_callback, self._srpm_url, self._srpm_url,
target_dir, ['.src.rpm'], certs)
@@ -149,26 +151,54 @@
url = cb_data
if status == 'done':
self._status = 'downloaded'
- self.log("Retrieved %s.\n" % url)
+ self._log("Retrieved %s.\n" % url)
elif status == 'failed':
# Don't overwrite our status with 'failed' if we were cancelled
# and a download error ocurred
if not self.is_done_status():
self._status = 'failed'
- self.log("Failed to retrieve %s.\n" % url)
+ self._log("Failed to retrieve %s.\n" % url)
+
+ def _copy_mock_output_to_log(self):
+ if self._mock_log and os.path.exists(self._mock_log):
+ ml = open(self._mock_log, "r")
+ line = "foo"
+ while len(line):
+ line = ml.readline()
+ if len(line):
+ self._log_fd.write(line)
+ ml.close()
+ os.remove(self._mock_log)
+ self._mock_log = None
- def _build(self):
- self.log("Starting step 'building' with command:\n")
+ def _start_build(self):
+ self._log("Starting step 'building' with command:\n")
if not os.path.exists(self._result_dir):
os.makedirs(self._result_dir)
if not os.path.exists(self._result_dir):
os.makedirs(self._result_dir)
- mock_args = "-r %s --arch %s --resultdir=%s --statedir=%s --uniqueext=%s %s" % (self.buildroot,
- self.buildarch, self._result_dir, self._state_dir, self._uniqid, self._srpm_path)
- cmd = '%s %s %s' % (self.arch_command, config_opts['builder_cmd'], mock_args)
- self.log(" %s\n" % cmd)
- self._pobj = popen2.Popen4(cmd=cmd, bufsize=1024)
- fcntl.fcntl(self._pobj.fromchild.fileno(), fcntl.F_SETFL, os.O_NONBLOCK)
+
+ # Set up build process arguments
+ args = []
+ cmd = os.path.abspath(config_opts['builder_cmd'])
+ if self.arch_command and len(self.arch_command):
+ arg_list = self.arch_command.split()
+ for arg in arg_list:
+ args.append(arg)
+ cmd = os.path.abspath(arg_list[0])
+ args.append(config_opts['builder_cmd'])
+ args.append("-r")
+ args.append(self.buildroot)
+ args.append("--arch")
+ args.append(self.buildarch)
+ args.append("--resultdir=%s" % self._result_dir)
+ args.append("--statedir=%s" % self._state_dir)
+ args.append("--uniqueext=%s" % self._uniqid)
+ args.append(self._srpm_path)
+ self._log(" %s\n" % string.join(args))
+
+ self._mock_log = os.path.join(self._result_dir, "mock-output.log")
+ self._childpid = ExecUtils.exec_with_redirect(cmd, args, None, self._mock_log, self._mock_log)
self._status = 'prepping'
# Poll a bit to wait for mock to write out the status file if
@@ -180,33 +210,51 @@
time.sleep(0.5)
except KeyboardInterrupt:
pass
+
# if mock exited with an error report that error and not
# the missing status file.
- exit_status = self._pobj.poll()
- if exit_status > 0:
+ (aux_pid, status) = os.waitpid(self._childpid, os.WNOHANG)
+ status = os.WEXITSTATUS(status)
+ if aux_pid:
+ # If mock exits anywhere here, something is wrong no matter
+ # what it's exit status
+ self._childpid = 0
+ self._copy_mock_output_to_log()
self._status = 'failed'
break
- # Kill mock after 7s if it didn't dump the status file
- if time.time() - start_time > 7:
- self.log("Timed out waiting for the mock status file! %s\n" % mockstatusfile)
+ # Kill mock after 15s if it didn't dump the status file
+ if time.time() - start_time > 15:
+ self._copy_mock_output_to_log()
+ self._log("Timed out waiting for the mock status file! %s\n" % mockstatusfile)
try:
- self.log("Killing mock...\n")
- os.kill(self._pobj.pid, 15)
+ self._log("Killing mock...\n")
+ os.kill(self._childpid, 15)
+ self._log("Killed.\n")
except OSError, e:
- self.log("Couldn't kill mock process %d: %s\n" % (self._pobj.pid, e))
- else:
- self.log("Killed.\n")
+ self._log("Couldn't kill mock process %d: %s\n" % (self._childpid, e))
+
self._status = 'failed'
break
- def _cleanup(self):
- self.log("Cleaning up the buildroot...\n")
- cmd = '%s %s clean --uniqueext=%s -r %s' % (self.arch_command,
- config_opts['builder_cmd'], self._uniqid,
- self.buildroot)
- self.log(" %s\n" % cmd)
- self._pobj = popen2.Popen4(cmd=cmd)
+ def _start_cleanup(self):
+ self._log("Cleaning up the buildroot...\n")
+
+ args = []
+ cmd = os.path.abspath(config_opts['builder_cmd'])
+ if self.arch_command and len(self.arch_command):
+ arg_list = self.arch_command.split()
+ for arg in arg_list:
+ args.append(arg)
+ cmd = os.path.abspath(arg_list[0])
+ args.append(config_opts['builder_cmd'])
+ args.append("clean")
+ args.append("--uniqueext=%s" % self._uniqid)
+ args.append("-r")
+ args.append(self.buildroot)
+
+ self._log(" %s\n" % string.join(args))
+ self._childpid = ExecUtils.exec_with_redirect(cmd, args, None, None, None)
self._status = 'cleanup'
def _mock_is_prepping(self):
@@ -279,47 +327,33 @@
f.close()
return contents
- def _grab_mock_output(self):
- """ Grab mock output and write it to a log """
- if self._pobj:
- string = ' '
- while len(string) > 0:
- try:
- string = os.read(self._pobj.fromchild.fileno(), 1024)
- except OSError, e:
- if e.errno == errno.EAGAIN: # Resource temporarily unavailable
- break
- else:
- self.log("Error reading mock output: %s\n" % e)
- else:
- # We don't care about output from the 'cleanup' stage
- if self._status != 'cleanup':
- self._log_fd.write(string)
- self._log_fd.flush()
- os.fsync(self._log_fd.fileno())
-
def _mock_done(self):
- # Ensure child mock is reaped
- if self._pobj:
- self._pobj.poll()
+ # Ensure child process is reaped
+ if self._childpid:
+ try:
+ (pid, status) = os.waitpid(self._childpid, 0)
+ except OSError, e:
+ self._childpid = 0
+ pass
+
+ self._copy_mock_output_to_log()
self._files = self._find_files()
- self.log("\n\n-----------------------\n\n")
+ self._log("\n\n-----------------------\n\n")
if self._status == 'done':
- self.log("Job completed successfully.\n")
+ self._log("Job completed successfully.\n")
elif self._status == 'failed':
- if self._pobj:
- exit_status = self._pobj.poll()
- self.log("Job failed due to mock errors! Please see output in root.log and build.log\n")
+ if self._childpid:
+ self._log("Job failed due to mock errors! Please see output in root.log and build.log\n")
elif self._status == 'killed':
- self.log("Job failed because it was killed.\n")
+ self._log("Job failed because it was killed.\n")
if self._log_fd:
self._log_fd.close()
self._log_fd = None
def _status_init(self):
- pass
+ self._start_srpm_download()
def _status_downloading(self):
pass
@@ -328,11 +362,11 @@
# We can't start doing anything with yum until the build
# server tells us the repo is unlocked.
if not self._repo_locked:
- self._build()
+ self._start_build()
else:
# Only show this message once
if not self._repo_locked_msg:
- self.log("Waiting for repository to unlock before starting the build...\n")
+ self._log("Waiting for repository to unlock before starting the build...\n")
self._repo_locked_msg = True
def _status_prepping(self):
@@ -342,65 +376,63 @@
self._status = 'building'
def _status_building(self):
- exit_status = self._pobj.poll()
- if exit_status == 0:
- # mock completed successfully
- if self._status != 'building':
- self.log("Bad job end status %s encountered!" % self._status)
- self._done_status = 'done'
- self._cleanup()
- elif exit_status > 0:
- # mock exited with an error
- self._done_status = 'failed'
- self._cleanup()
+ (aux_pid, status) = os.waitpid(self._childpid, os.WNOHANG)
+ status = os.WEXITSTATUS(status)
+ if aux_pid:
+ self._childpid = 0
+ if status == 0:
+ self._done_status = 'done'
+ elif status > 0:
+ self._done_status = 'failed'
+
+ self._start_cleanup()
def _status_cleanup(self):
- exit_status = self._pobj.poll()
- if exit_status >= 0:
+ (aux_pid, status) = os.waitpid(self._childpid, os.WNOHANG)
+ if aux_pid:
# Mock exited
self._status = self._done_status
- if self._mock_config and self._mock_config.has_key('rootdir') and self._mock_config.has_key('statedir'):
- # Kill the entire job dir, not just the rootdir
- job_dir = os.path.normpath(self._mock_config['rootdir'] + "/../")
- job_dir2 = os.path.normpath(self._mock_config['statedir'] + "/../")
- print job_dir, job_dir2
-
- # Be a little paranoid about randomly removing an entire directory.
- # Compare the rootdir's parent to the statedir's parent and remove the
- # parent only if they match.
- if job_dir == job_dir2:
- shutil.rmtree(job_dir, ignore_errors=True)
- else:
- shutil.rmtree(self._mock_config['rootdir'], ignore_errors=True)
+ if self._mock_config:
+ if self._mock_config.has_key('rootdir'):
+ mock_root_dir = os.path.abspath(os.path.join(self._mock_config['rootdir'], "../"))
+ # Ensure we're actually deleteing the job's rootdir
+ if mock_root_dir.endswith(self._uniqid):
+ shutil.rmtree(mock_root_dir, ignore_errors=True)
- def process(self):
- if self.is_done_status():
- return
+ if self._mock_config.has_key('statedir'):
+ shutil.rmtree(self._mock_config['statedir'], ignore_errors=True)
- # Execute operations for our current status
- try:
- func = getattr(self, "_status_%s" % self._status)
- func()
- except AttributeError:
- self.log("ERROR: internal builder inconsistency, didn't recognize status '%s'." % self._status)
- self._status = 'failed'
+ def run(self):
+ while True:
+ if self._die:
+ self._handle_death()
+
+ # Execute operations for our current status
+ try:
+ func = getattr(self, "_status_%s" % self._status)
+ func()
+ except AttributeError:
+ self._log("ERROR: internal builder inconsistency, didn't recognize status '%s'." % self._status)
+ self._status = 'failed'
+
+ if self.is_done_status():
+ self._mock_done()
+ break
- self._grab_mock_output()
- if self.is_done_status():
- self._mock_done()
+ time.sleep(3)
def _find_files(self):
# Grab the list of files in our job's result dir and URL encode them
files_in_dir = os.listdir(self._result_dir)
file_list = []
- self.log("\n\nOutput File List:\n-----------------\n")
+ self._log("\n\nOutput File List:\n-----------------\n")
for f in files_in_dir:
file_url = get_url_for_file(os.path.join(self._result_dir, f))
if file_url:
file_list.append(file_url)
- self.log(" Output File: %s\n" % urllib.unquote(file_url))
+ self._log(" Output File: %s\n" % urllib.unquote(file_url))
else:
- self.log(" Error: Couldn't get file URL for file %s" % f)
+ self._log(" Error: Couldn't get file URL for file %s" % f)
return file_list
def status(self):
@@ -513,7 +545,7 @@
self.target_arch_dict = target_arch_dict
self.cur_job = 0
- def log(self, string):
+ def _log(self, string):
if config_opts['debug']:
print string
@@ -522,7 +554,6 @@
jobid = 0
for (uniqid, job) in self.ids.iteritems():
if not job.is_done_status():
- job.process()
jobid = uniqid
self.cur_job = jobid # Update current job
@@ -535,13 +566,13 @@
def start(self, target, arch, srpm_url):
# Sanity check the request
if self.cur_job != 0:
- self.log("Tried to build '%s' when already buiding something" % srpm_url)
+ self._log("Tried to build '%s' when already buiding something" % srpm_url)
return 0
if not self.target_arch_dict.has_key(target) or len(self.target_arch_dict[target]) == 0:
- self.log("Tried to build '%s' on target %s which isn't supported" % (srpm_url, target))
+ self._log("Tried to build '%s' on target %s which isn't supported" % (srpm_url, target))
return 0
if arch != 'noarch' and not arch in self.target_arch_dict[target]:
- self.log("Tried to build '%s' on target %s which doesn't support arch %s" % (srpm_url, target, arch))
+ self._log("Tried to build '%s' on target %s which doesn't support arch %s" % (srpm_url, target, arch))
return 0
uniqid = self._get_uniqid(target, arch, srpm_url)
@@ -550,10 +581,10 @@
self.ids[uniqid] = job
job.start()
filename = os.path.basename(srpm_url)
- self.log("%s: started %s on %s arch %s at time %d" % (uniqid, filename,
+ self._log("%s: started %s on %s arch %s at time %d" % (uniqid, filename,
target, arch, cur_time))
else:
- self.log("%s: Failed request for %s on %s UNSUPPORTED arch %s at time %d" %
+ self._log("%s: Failed request for %s on %s UNSUPPORTED arch %s at time %d" %
(uniqid, srpm_url, target, arch, cur_time))
uniqid = 0
self.cur_job = uniqid
More information about the fedora-extras-commits
mailing list