extras-buildsys/server ArchJob.py, 1.33, 1.34 Builder.py, 1.43, 1.44 BuilderManager.py, 1.26, 1.27 PackageJob.py, 1.50, 1.51

Daniel Williams (dcbw) fedora-extras-commits at redhat.com
Mon May 29 20:26:45 UTC 2006


Author: dcbw

Update of /cvs/fedora/extras-buildsys/server
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv877/server

Modified Files:
	ArchJob.py Builder.py BuilderManager.py PackageJob.py 
Log Message:
2006-05-29  Dan Williams  <dcbw at redhat.com>

    * Make most everything work now; both Passive and Active builders will
        download/upload the SRPM from the server.  The SRPM's URL is no longer
        passed to the builder in the initial job request, but comes along later
        once the SPRM is actually available to the builder (which might be after
        an upload to the builder in the case of Passive builders).
    * Pylint cleanups
    * Clean up logging on builders by consolidating newline handling is one place
    * Use the Targets command to pass back to the server any upload URL the builder
        might have (for passive builders)




Index: ArchJob.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/ArchJob.py,v
retrieving revision 1.33
retrieving revision 1.34
diff -u -r1.33 -r1.34
--- ArchJob.py	20 May 2006 05:10:08 -0000	1.33
+++ ArchJob.py	29 May 2006 20:26:38 -0000	1.34
@@ -22,13 +22,13 @@
 import sha
 
 
-def _generate_uniqid(parent_jobid, start_time, target_dict, srpm_url):
+def _generate_uniqid(parent_jobid, start_time, target_dict, srpm):
     distro = target_dict['distro']
     repo =  target_dict['repo']
     target = target_dict['target']
     arch = target_dict['arch']
     hash_string = "%d%d%s%s%s%s%s" % (parent_jobid, start_time, distro,
-            target, arch, repo, srpm_url)
+            target, arch, repo, srpm)
     sha_hash = sha.new()
     sha_hash.update(hash_string)
     return sha_hash.hexdigest()
@@ -36,30 +36,36 @@
 
 AJ_STATUS_QUEUED = 'queued'
 AJ_STATUS_WAITING = 'waiting'
+AJ_STATUS_UPLOADING = 'uploading'
 AJ_STATUS_REPO_WAIT = 'repo_wait'
 AJ_STATUS_REPO_UNLOCK = 'repo_unlock'
 AJ_STATUS_RUNNING = 'running'
 AJ_STATUS_DOWNLOADING = 'downloading'
 AJ_STATUS_DONE = 'done'
 
+AJ_FAILURE_NONE = ''
+AJ_FAILURE_DOWNLOAD = 'download'
+AJ_FAILURE_BUILDER = 'builder'
+AJ_FAILURE_INTERNAL = 'internal'
+
 class ArchJob:
     """ Tracks a single build instance for a single arch on a builder """
 
-    def __init__(self, parent, target_dict, srpm_url):
+    def __init__(self, parent, target_dict, srpm_path):
         self._parent = parent
         self._builder = None
         self._repo = parent.repo()
         self._starttime = time.time()
         self._endtime = 0
         self._id = _generate_uniqid(parent.uid, self._starttime, target_dict,
-                srpm_url)
+                srpm_path)
         self._status = AJ_STATUS_QUEUED
+        self._uploader = None
         self._builder_status = ''
         self._failure_noticed = False
-        self._download_failed = False
-        self._internal_failure = False
+        self._failure_type = AJ_FAILURE_NONE
         self._target_dict = target_dict
-        self._srpm_url = srpm_url
+        self._srpm_path = srpm_path
         self._result_files = {}
         self._die = False
         self._die_user_requested = False
@@ -89,6 +95,9 @@
             attrdict = self._to_dict()
             self._parent.bm.queue_archjob_status_update(self._id, attrdict)
             del attrdict
+            
+            if builder_status in ['killed', 'failed']:
+                self._failure_type = AJ_FAILURE_BUILDER
 
     def failure_noticed(self):
         return self._failure_noticed
@@ -101,16 +110,8 @@
             return True
         return False
 
-    def builder_failed(self):
-        if self._builder_status in ['killed', 'failed']:
-            return True
-        return False
-
-    def download_failed(self):
-        return self._download_failed
-
-    def internal_failure(self):
-        return self._internal_failure
+    def failure_type(self):
+        return self._failure_type
 
     def prepping(self):
         return self._prepping
@@ -121,8 +122,8 @@
     def target_dict(self):
         return self._target_dict
 
-    def srpm_url(self):
-        return self._srpm_url
+    def srpm_path(self):
+        return self._srpm_path
 
     def archjob_id(self):
         return self._id
@@ -130,6 +131,9 @@
     def builder(self):
         return self._builder
 
+    def parent(self):
+        return self._parent
+
     def orphaned(self):
         return self._orphaned
 
@@ -144,6 +148,12 @@
 
     def unclaim(self, builder):
         builder.remove_suspend_listener(self)
+        if builder != self._builder:
+            return
+
+        if self._uploader:
+            self._uploader.cancel()
+            self._uploader = None
         self._builder = None
         if not self._is_done_status():
             self._orphaned = True
@@ -192,13 +202,38 @@
         self._set_status(AJ_STATUS_DONE)
 
     def _handle_builder_finished(self):
+        self._uploader = None
         self._set_status(AJ_STATUS_DOWNLOADING)
         self._builder.request_job_files(self._id)
 
     def _status_queued(self):
         pass
 
+    def _upload_srpm_cb(self, builder, result, msg, user_data=None):
+        """Callback from our controlling Builer object when it has uploaded the SRPM
+        to the remote builder."""
+        if builder != self._builder:
+            return
+        self._uploader = None
+        if result == FileTransfer.FT_RESULT_FAILED:
+            srpm_path = user_data
+            print "%s (%s/%s): %s - SRPM upload failed for %s." % (self._parent.uid,
+                    self._parent.package, self._target_dict['arch'], self._id, srpm_path)
+            # Unclaim the job and try for another builder
+            self.unclaim(self._builder)
+
     def _status_waiting(self):
+        if self._builder_status == 'init':
+            # Ask our Builder object to send the SRPM to the remote builder
+            # (if Passive) or to notify the builder of the SRPM URL (if Active).
+            self._set_status(AJ_STATUS_UPLOADING)
+            self._uploader = self._builder.request_srpm_upload(self,
+                    self._upload_srpm_cb, self._srpm_path, self._srpm_path)
+
+        if self._builder_finished():
+            self._handle_builder_finished()
+
+    def _status_uploading(self):
         # Builders pause before they enter the 'prep' state (which accesses
         # the repo for this target), and wait for the server to allow them
         # to proceed when the repo is unlocked.
@@ -262,11 +297,11 @@
             return
 
         if len(files.keys()) == 0:
-            self._download_failed = True
+            self._failure_type = AJ_FAILURE_DOWNLOAD
         else:
             for fname in files.keys():
                 if files[fname] == FileTransfer.FT_RESULT_FAILED:
-                    self._download_failed = True
+                    self._failure_type = AJ_FAILURE_DOWNLOAD
         self._result_files = files
         self._print_downloaded_files(self._result_files)
 
@@ -278,7 +313,11 @@
         pass
 
     def _handle_death(self, user_requested):
-        self._builder.request_kill_for_job(self._id)
+        if self._builder:
+            self._builder.request_kill_for_job(self._id)
+        if self._uploader:
+            self._uploader.cancel()
+            self._uploader = None
         if self._status == AJ_STATUS_REPO_WAIT:
             self._repo.cancel_unlock_request(self)
         self._set_done()
@@ -306,7 +345,7 @@
             print "%s (%s/%s): %s - internal archjob inconsistency.  Unknown status '%s'." % (self._parent.uid,
                     self._parent.package, self._target_dict['arch'], self._id, self._status)
             self._set_done()
-            self._internal_failure = True
+            self._failure_type = AJ_FAILURE_INTERNAL
             return
             
         # Do the actual work for this status


Index: Builder.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/Builder.py,v
retrieving revision 1.43
retrieving revision 1.44
diff -u -r1.43 -r1.44
--- Builder.py	20 May 2006 05:10:08 -0000	1.43
+++ Builder.py	29 May 2006 20:26:38 -0000	1.44
@@ -20,12 +20,13 @@
 import os
 import urllib
 import threading
+import shutil
 from plague import Commands
 from plague import XMLRPCServerProxy
 from plague import FileDownloader
+from plague import FileUploader
 from plague import FileTransfer
 import OpenSSL
-import ArchJob
 import EmailUtils
 from plague import DebugUtils
 
@@ -314,12 +315,6 @@
             self._free_slots = cmd.free_slots()
             self._num_slots = cmd.max_slots()
             self._lock.release()
-        elif isinstance(cmd, Commands.PlgCommandTargets):
-            self._lock.acquire()
-            self._target_list = cmd.targets()
-            self._lock.release()
-        elif isinstance(cmd, Commands.PlgCommandNewJobAck):
-            self._handle_new_job_ack(cmd)
         elif isinstance(cmd, Commands.PlgCommandBuildingJobs):
             status_reqs = self._handle_building_jobs(cmd)
             # Add any additional status requests onto our pending command queue
@@ -329,6 +324,8 @@
                 self._lock.release()
         elif isinstance(cmd, Commands.PlgCommandJobStatusAck):
             self._handle_job_status_ack(cmd)
+        elif isinstance(cmd, Commands.PlgCommandNewJobAck):
+            self._handle_new_job_ack(cmd)
         else:
             handled = False
         return handled
@@ -390,7 +387,7 @@
                 print "PassiveBuilder(%s) Error in request(): '%s'" % (self._address, exc)
         self.done = True
 
-    def close(self):
+    def cancel(self):
         try:
             self._server.close()
         except:
@@ -413,6 +410,7 @@
         Builder.__init__(self, manager, cfg, address, weight, TYPE_PASSIVE)
         # Builder will get pinged immediately since self._last_contact == 0
         self._ping_interval = self._BUILDER_UNAVAIL_PING_INTERVAL
+        self._upload_url = None
 
         self._certs = None
         if self._server_cfg.get_bool("Builders", "use_ssl"):
@@ -421,6 +419,33 @@
             self._certs['ca_cert'] = self._server_cfg.get_str("SSL", "ca_cert")
             self._certs['peer_ca_cert'] = self._server_cfg.get_str("SSL", "ca_cert")
 
+    def _upload_cb(self, result, cb_data, msg):
+        """Call the archjob's upload callback with the upload result."""
+        (archjob_id, srpm_path, upload_cb, user_data) = cb_data
+
+        # Notify the builder that the SRPM is uploaded
+        if result == FileTransfer.FT_RESULT_SUCCESS:
+            url = "file:///%s" % os.path.basename(srpm_path)
+            cmd = Commands.PlgCommandJobSRPM(archjob_id, url, self._seq_gen.next())
+            self._lock.acquire()
+            self._cmd_queue.append(cmd)
+            self._lock.release()
+
+        # Call the archjob's upload callback
+        upload_cb(self, result, msg, user_data)
+
+    def request_srpm_upload(self, archjob, upload_cb, user_data, srpm_path):
+        """Called by the archjob to request an upload of the SRPM to the builder."""
+        archjob_id = archjob.archjob_id()
+
+        # Start uploading the job to the builder
+        data = [("archjob_id", archjob_id)]
+        uploader = FileUploader.FileUploader(self._upload_url, [srpm_path], 'filedata', data,
+                self._certs)
+        uploader.set_callback(self._upload_cb, (archjob_id, srpm_path, upload_cb, user_data))
+        uploader.start()
+        return uploader
+
     def _download_cb(self, result, (archjob, urls), msg):
         """Notify archjob of the result of its download request."""
         if result == FileTransfer.FT_RESULT_FAILED:
@@ -526,8 +551,8 @@
             # right now.  Think server restart here.
             for item in cmd.jobs():
                 (uniqid, status) = cmd.get_job(item)
-                cmd = Commands.PlgCommandKillJob(uniqid, self._seq_gen.next())
-                self._cmd_queue.append(cmd)
+                new_cmd = Commands.PlgCommandKillJob(uniqid, self._seq_gen.next())
+                self._cmd_queue.append(new_cmd)
             handled = True
 
         # Let the superclass handle what's left
@@ -542,7 +567,9 @@
             elif isinstance(cmd, Commands.PlgCommandTargets):
                 if not self._target_list:
                     self._target_list = cmd.targets()
-                    
+                    self._upload_url = cmd.upload_url()
+                handled = True
+
         if not handled:
             print "Builder Error (%s): unhandled command '%s'" % (self._address, cmd.name())
 
@@ -610,6 +637,44 @@
     def __init__(self, manager, cfg, address, weight):
         Builder.__init__(self, manager, cfg, address, weight, TYPE_ACTIVE)
 
+    def _request_srpm_upload(self, archjob, upload_cb, user_data, srpm_path):
+        """Called by the archjob to request that an SRPM be made available to the builder."""
+        # Copy the SRPM to the correct upload location
+        http_dir = self._manager.http_dir()
+        srpm_dest_dir = archjob.parent().make_stage_dir(http_dir, delete=False)
+        srpm_dest = os.path.join(srpm_dest_dir, os.path.basename(srpm_path))
+
+        err_msg = "Failed"
+        result = FileTransfer.FT_RESULT_FAILED
+        if srpm_dest != srpm_path:
+            # Copy file if it's not already in the HTTP server's download dir
+            try:
+                shutil.copyfile(srpm_path, srpm_dest)
+                err_msg = "Success"
+                result = FileTransfer.FT_RESULT_SUCCESS
+            except IOError, exc:
+                err_msg = str(exc)
+        else:
+            # Make sure it's where it's supposed to be
+            if os.path.exists(srpm_dest):
+                err_msg = "Success"
+                result = FileTransfer.FT_RESULT_SUCCESS
+            else:
+                err_msg = "Candidate SRPM file %s didn't exist." % srpm_dest
+
+        if result == FileTransfer.FT_RESULT_SUCCESS:
+            # Construct the download URL
+            archjob_id = archjob.archjob_id()
+            url = self._manager.get_http_url_base() + srpm_dest[len(http_dir):]
+            cmd = Commands.PlgCommandJobSRPM(archjob_id, url, self._seq_gen.next())
+            self._lock.acquire()
+            self._cmd_queue.append(cmd)
+            self._lock.release()
+
+        # Call the archjob's upload callback
+        upload_cb(self, result, err_msg, user_data)
+        return None
+
     def _handle_job_files_ack(self, cmd):
         (archjob, urls) = self._decompose_job_files_ack(cmd)
         if not archjob:


Index: BuilderManager.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/BuilderManager.py,v
retrieving revision 1.26
retrieving revision 1.27
diff -u -r1.26 -r1.27
--- BuilderManager.py	16 May 2006 15:49:56 -0000	1.26
+++ BuilderManager.py	29 May 2006 20:26:38 -0000	1.27
@@ -115,7 +115,7 @@
     Can't block the main BuilderManager object by sitting in
     serve_forever().
     """
-    def __init__(self, cfg, bm):
+    def __init__(self, bm, cfg, use_ssl, certs):
         self._cfg = cfg
         self._bm = bm
         self._stopped = False
@@ -125,11 +125,7 @@
         hostname = cfg.get_str("General", "hostname")
         port = cfg.get_int("Active Builders", "xmlrpc_server_port")
         try:
-            if cfg.get_bool("Builders", "use_ssl") == True:
-                certs = {}
-                certs['key_and_cert'] = cfg.get_str("SSL", "server_key_and_cert")
-                certs['ca_cert'] = cfg.get_str("SSL", "ca_cert")
-                certs['peer_ca_cert'] = cfg.get_str("SSL", "ca_cert")
+            if use_ssl:
                 self._server = AuthedSSLBuilderServer((hostname, port), certs, self._bm)
             else:
                 self._server = AuthedBuilderServer((hostname, port), self._bm)
@@ -168,30 +164,43 @@
         self._queue_lock = threading.Lock()
         self._queue = []
 
+        self._certs = None
+        self._use_ssl = cfg.get_bool("Builders", "use_ssl")
+        if self._use_ssl:
+            self._certs = {}
+            self._certs['key_and_cert'] = cfg.get_str("SSL", "server_key_and_cert")
+            self._certs['ca_cert'] = cfg.get_str("SSL", "ca_cert")
+            self._certs['peer_ca_cert'] = cfg.get_str("SSL", "ca_cert")
+
         self._xmlrpc_server = None
         if any_active:
             # Builder XMLRPC server
             # Only start it when there are active-type builders listed
             # in the config file
-            self._xmlrpc_server = BuilderServerThread(cfg, self)
+            self._xmlrpc_server = BuilderServerThread(self, cfg, self._use_ssl, self._certs)
             self._xmlrpc_server.start()
 
         # Builder HTTP fileserver
-        hostname = cfg.get_str("General", "hostname")
-        port = cfg.get_int("Active Builders", "file_server_port")
-        http_dir = os.path.join(cfg.get_str("Directories", "server_work_dir"), "srpm_http_dir")
-        certs = {}
-        if cfg.get_bool("Builders", "use_ssl"):
-            certs['key_and_cert'] = cfg.get_str("SSL", "server_key_and_cert")
-            certs['ca_cert'] = cfg.get_str("SSL", "ca_cert")
-            certs['peer_ca_cert'] = cfg.get_str("SSL", "ca_cert")
-        self._fileserver = HTTPServer.PlgHTTPServerManager((hostname, port), http_dir, certs)
+        self._hostname = cfg.get_str("General", "hostname")
+        self._http_port = cfg.get_int("Active Builders", "file_server_port")
+        self._http_dir = os.path.join(cfg.get_str("Directories", "server_work_dir"), "srpm_http_dir")
+        self._fileserver = HTTPServer.PlgHTTPServerManager((self._hostname, self._http_port), self._http_dir, self._certs)
         if any_active:
             self._fileserver.set_POST_handler('/upload', self.upload_callback)
         self._fileserver.start()
 
         self._print_builders()
 
+    def get_http_url_base(self):
+        """Return the base HTTP server URL, taking port and SSL into account."""
+        method = "http"
+        if self._use_ssl:
+            method = "https"
+        return "%s://%s:%d/" % (method, self._hostname, self._http_port)
+
+    def http_dir(self):
+        return self._http_dir
+
     def upload_callback(self, request_handler, fs):
         # Ensure we know this builder
         ip = request_handler.client_address[0]


Index: PackageJob.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/PackageJob.py,v
retrieving revision 1.50
retrieving revision 1.51
diff -u -r1.50 -r1.51
--- PackageJob.py	14 May 2006 05:43:07 -0000	1.50
+++ PackageJob.py	29 May 2006 20:26:38 -0000	1.51
@@ -44,9 +44,11 @@
     if DEBUG:
         print stuff
 
-class PrepError(exceptions.Exception): pass
+class PrepError(exceptions.Exception):
+    pass
 
-class DepError(exceptions.Exception): pass
+class DepError(exceptions.Exception):
+    pass
         
 class BuildError(exceptions.Exception):
     def __init__(self, msg, arch):
@@ -164,8 +166,7 @@
         self._killer = None
         self._die = False
 
-        self.http_dir = os.path.join(self._server_cfg.get_str("Directories",
-                "server_work_dir"), "srpm_http_dir")
+        self._base_work_dir = self._server_cfg.get_str("Directories", "server_work_dir")
 
         first_stage = 'initialize'
         if self.use_cvs == False:
@@ -210,7 +211,7 @@
     def get_uid(self):
         return self.uid
         
-    def arch_handling(self, hdr):
+    def arch_handling(self, ba, exclusive, exclude):
         # Grab additional allowed arches for this package, using
         # wildcard matching if needed
         addl_arches = self._target_cfg.addl_arches_for_pkg(self.name)
@@ -230,13 +231,6 @@
                 if master_addl_arch not in base_arches:
                     addl_arches.remove(arch)
 
-        # Grab arches the SRPM does/does not want to build for.  If the package
-        # does use one or more of these tags in the specfile, hdr['<tag>'] will
-        # return an empty list, and we'll use defaults instead
-        ba = hdr['buildarchs']
-        exclusive = hdr['exclusivearch'] 
-        exclude = hdr['excludearch']
-        
         build_arches = {}
 
         # If the SRPM is noarch, there's nothing left to do, since
@@ -392,14 +386,14 @@
         if not srpmpath:
             msg = "Error: could not find srpm for %s - output was:\n\n%s" % (self._source, o)
             raise PrepError(msg)
-
+        
         self._srpm_path = srpmpath
 
         self._set_cur_stage('prep')
         return False
 
     def _stage_prep(self):
-        # In SRPM-only mode, cvs_tag is path to the SRPM to build
+        # In SRPM-only mode, 'source' is path to the SRPM to build
         if self.use_cvs == False:
             self._srpm_path = self._source
 
@@ -417,9 +411,24 @@
             self.epoch = '0'
         self.ver = hdr['version']
         self.release = hdr['release']
-        (self._archjobs, pkg_arches, allowed_arches) = self.arch_handling(hdr)
-        del hdr
-        del ts
+
+        buildarchs = hdr['buildarchs']
+        exclusive = hdr['exclusivearch'] 
+        exclude = hdr['excludearch']
+        del hdr, ts
+
+        # copy the SRPM to our work directory
+        self._result_dir = self.make_stage_dir(self._base_work_dir)
+        dest = os.path.join(self._result_dir, os.path.basename(self._srpm_path))
+        shutil.copy(self._srpm_path, dest)
+        self._srpm_path = dest
+
+        # Remove CVS checkout dirs
+        if self.use_cvs == True:
+            shutil.rmtree(self.checkout_tmpdir, ignore_errors=True)
+
+        # Find out what arches we're going to be building on
+        (self._archjobs, pkg_arches, allowed_arches) = self.arch_handling(buildarchs, exclusive, exclude)
 
         if len(self._archjobs) == 0:
             msg = """Package %s does not build on any architectures this build system supports.
@@ -510,7 +519,8 @@
                 raise DepError(str(exc))
 
             for dep in srpm.requiresList():
-                if dep.startswith("rpmlib("): continue
+                if dep.startswith("rpmlib("):
+                    continue
                 try:
                     pkg = base.returnPackageByDep(dep)
                 except repomd.mdErrors.PackageSackError, exc:
@@ -547,8 +557,7 @@
 
         # Create the depsolve metadata cache dir
         if not self._depsolve_dir:
-            server_work_dir = self._server_cfg.get_str("Directories", "server_work_dir")
-            self._depsolve_dir = os.path.join(server_work_dir, "depsolve", "%s-%s" % (self.uid, self.name))
+            self._depsolve_dir = os.path.join(self._base_work_dir, "depsolve", "%s-%s" % (self.uid, self.name))
             if os.path.exists(self._depsolve_dir):
                 shutil.rmtree(self._depsolve_dir, ignore_errors=True)
             os.makedirs(self._depsolve_dir)
@@ -605,60 +614,43 @@
 
     def _prepare_and_wait(self):
         # Make some directories we need
-        work_dir = self._server_cfg.get_str("Directories", "server_work_dir")
-        self._result_dir = self._make_stage_dir(work_dir)
         for arch in self._archjobs.keys():
             thisdir = os.path.join(self._result_dir, arch)
             if not os.path.exists(thisdir):
                 os.makedirs(thisdir)
 
-        # Copy SRPM to where the builder can access it
-        http_pkg_path = self._make_stage_dir(self.http_dir)
-        self._srpm_http_path = os.path.join(http_pkg_path, os.path.basename(self._srpm_path))
-        shutil.copy(self._srpm_path, self._srpm_http_path)
-        self._srpm_path = None
-
-        # Remove CVS checkout and make_srpm dirs
-        if self.use_cvs == True:
-            shutil.rmtree(self.checkout_tmpdir, ignore_errors=True)
+        self._set_cur_stage('waiting')
 
         # Queue up archjobs
-        self._set_cur_stage('waiting')
-        self._create_arch_jobs()
+        self._archjobs_lock.acquire()
+        for arch in self._archjobs.keys():
+            # Create and queue a single archjob for one architecture
+            target_dict = self._target_cfg.target_dict(arch)
+            archjob = ArchJob.ArchJob(self, target_dict, self._srpm_path)
+            self._archjobs[arch] = archjob
+            self.bm.builder_manager.request_arch_job(archjob)
+        self._archjobs_lock.release()
+
         return False
 
     def _stage_depsolve_wait(self):
         pass
 
-    def _make_stage_dir(self, rootdir):
+    def make_stage_dir(self, rootdir, delete=True):
+        """Given a root, creates a unique job-specific holding directory for
+        job files."""
         # The dir will look like this:
         # <rootdir>/devel/95-foo-1.1.0-23
         pkgsubdir = '%d-%s-%s-%s' % (self.uid, self.name, self.ver, self.release)
         stage_dir = os.path.join(rootdir, self._target_str, pkgsubdir)
-        if os.path.exists(stage_dir):
+        if os.path.exists(stage_dir) and delete:
             shutil.rmtree(stage_dir, ignore_errors=True)
-        os.makedirs(stage_dir)
+        try:
+            os.makedirs(stage_dir)
+        except OSError, exc:
+            pass
         return stage_dir
 
-    def _create_arch_jobs(self):
-        self._archjobs_lock.acquire()
-        for arch in self._archjobs.keys():
-            # Construct the SRPM URL
-            srpm_http_base = self._srpm_http_path[len(self.http_dir):]
-            method = "http"
-            if self._server_cfg.get_bool("Builders", "use_ssl") == True:
-                method = "https"
-            hostname = self._server_cfg.get_str("General", "hostname")
-            port = self._server_cfg.get_int("Active Builders", "file_server_port")
-            srpm_url = "%s://%s:%d/%s" % (method, hostname, port, srpm_http_base)
-
-            # Create and queue the archjob
-            target_dict = self._target_cfg.target_dict(arch)
-            archjob = ArchJob.ArchJob(self, target_dict, srpm_url)
-            self._archjobs[arch] = archjob
-            self.bm.builder_manager.request_arch_job(archjob)
-        self._archjobs_lock.release()
-
     def archjob_started_cb(self, archjob):
         # If this is the first archjob, that means we are now building.
         # So we start up the second PackageJobController thread.
@@ -725,7 +717,8 @@
                     self._event.wait()
                 self._event.clear()
         except PrepError, exc:
-            if self.use_cvs == True:
+            # Clean up CVS checkout directory if it exists
+            if self.use_cvs == True and os.path.exists(self.checkout_tmpdir):
                 shutil.rmtree(self.checkout_tmpdir, ignore_errors=True)
             msg = str(exc)
             subj = 'Prep Error (Job %s): %s on %s' % (self.uid, self._source, self._target_str)
@@ -766,7 +759,8 @@
                 continue
 
             completed_jobs = completed_jobs + 1
-            if job.builder_failed() or job.download_failed() or job.internal_failure():
+            failure_type = job.failure_type()
+            if failure_type != ArchJob.AJ_FAILURE_NONE:
                 failed_jobs = failed_jobs + 1
 
                 # Normal jobs will just stop when a single archjob fails, but
@@ -777,12 +771,16 @@
                     job.set_failure_noticed()
                     jobarch = job.arch()
                     msg = "Job failed."
-                    if job.builder_failed():
+                    if failure_type == ArchJob.AJ_FAILURE_BUILDER:
                         msg = "Job failed on arch %s\n" % jobarch
-                    elif job.download_failed():
+                    elif failure_type == ArchJob.AJ_FAILURE_DOWNLOAD:
+                        addr = None
+                        builder = job.builder()
+                        if builder:
+                            addr = job.builder().address()
                         msg = "Job failed on arch %s: couldn't download result files from builder '%s'.\n " \
-                        "Please contact the build system administrator." % (jobarch, job.builder().address())
-                    elif job.internal_failure():
+                        "Please contact the build system administrator." % (jobarch, addr)
+                    elif failure_type == ArchJob.AJ_FAILURE_INTERNAL:
                         msg = "Job failed on arch %s: there was an internal build system failure.\n " \
                         "Please contact the build system administrator." % jobarch
                     self._archjobs_lock.release()
@@ -901,13 +899,11 @@
     def _get_log_tail(self, arch):
         """ Returns the last 30 lines of the most relevant log file """
 
-        pkg_dir = "%s-%s-%s-%s" % (self.uid, self.name, self.ver, self.release)
-        work_dir = self._server_cfg.get_str("Directories", "server_work_dir")
-        log_dir = os.path.join(work_dir, self._target_str, pkg_dir, arch)
+        log_dir = os.path.join(self._result_dir, arch)
+        build_log = os.path.join(log_dir, "build.log")
+        root_log = os.path.join(log_dir, "root.log")
+        job_log = os.path.join(log_dir, "job.log")
         final_log = None
-        build_log = "%s/%s" % (log_dir, "build.log")
-        root_log = "%s/%s" % (log_dir, "root.log")
-        job_log = "%s/%s" % (log_dir, "job.log")
 
         # Try the most relevant log file first
         if os.path.exists(build_log) and os.path.getsize(build_log) > 0:




More information about the fedora-extras-commits mailing list