extras-buildsys/server ArchJob.py, NONE, 1.1 Builder.py, NONE, 1.1 BuilderManager.py, NONE, 1.1 PackageJob.py, NONE, 1.1 main.py, NONE, 1.1 BuildMaster.py, 1.9, 1.10 CONFIG.py, 1.14, 1.15 Makefile, 1.4, 1.5 Repo.py, 1.4, 1.5 UserInterface.py, 1.14, 1.15 BuildJob.py, 1.15, NONE buildserver.py, 1.12, NONE client_manager.py, 1.27, NONE

Daniel Williams (dcbw) fedora-extras-commits at redhat.com
Tue Jul 5 21:08:05 UTC 2005


Author: dcbw

Update of /cvs/fedora/extras-buildsys/server
In directory cvs-int.fedora.redhat.com:/tmp/cvs-serv3768/server

Modified Files:
	BuildMaster.py CONFIG.py Makefile Repo.py UserInterface.py 
Added Files:
	ArchJob.py Builder.py BuilderManager.py PackageJob.py main.py 
Removed Files:
	BuildJob.py buildserver.py client_manager.py 
Log Message:
2005-07-05  Dan Williams <dcbw at redhat.com>

    * Rework a bunch of stuff so the build server doesn't use quite as much CPU,
        also split out stuff from server/client_manager.py

    * Generalize the common/ classes to provide both SSL and non-SSL facilities,
        renaming a lot of those files in the process

    * Fix non-SSL builder/server and client/server communication

    Note: At this time, SSL may be broken.




--- NEW FILE ArchJob.py ---
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2005 Dan Williams <dcbw at redhat.com> and Red Hat, Inc.

import time
import string
import xmlrpclib
import sys
import socket
import os
import threading
import urllib
from plague import FileDownloader
from plague import CommonErrors
from M2Crypto import SSL

# Load in the config
execfile("/etc/plague/server/CONFIG.py")


# SSL certificate and key filenames
certs = {}
certs['key_and_cert'] = config_opts['server_key_and_cert']
certs['ca_cert'] = config_opts['ca_cert']
certs['peer_ca_cert'] = config_opts['ca_cert']


class ArchJob:
    """ Tracks a single build instance for a single arch on a builder """

    def __init__(self, builder, server, par_job, jobid, target, arch):
        self.par_job = par_job
        self.builder = builder
        self._server = server
        self.jobid = jobid
        self.status = 'running'
        self.builder_status = ''
        self.target = target
        self.arch = arch
        self._builder_gone = False
        self.downloads = {}
        self.starttime = time.time()

    def _builder_finished(self):
        if self.builder_status == 'done' or self.builder_status == 'killed' or self.builder_status == 'failed':
            return True
        return False

    def builder_failed(self):
        if self.builder_status == 'killed' or self.builder_status == 'failed':
            return True
        return False

    def builder_prepping(self):
        if self.builder_status == 'prepping':
            return True
        return False

    def set_builder_status(self, status):
        if status != 'idle':
           self.builder_status = status
        if status == 'killed' or status == 'failed':
            self.par_job.wake()

    def _send_repo_unlocked(self):
        try:
            self._server.repo_unlocked(self.jobid)
        except socket.error, e:
            if not CommonErrors.canIgnoreSocketError(e):
                print "%s (%s/%s): [ %s ] Unknown error when signalling repo unlocked: '%s'" % (self.par_job.uid,
                            self.par_job.package, self.arch, self.bci.address(), e)
        except xmlrpclib.ProtocolError, e:
            pass

    def _dl_files(self):
        files = []
        try:
            files = self._server.files(self.jobid)
        except socket.error, e:
            if not CommonErrors.canIgnoreSocketError(e):
                print "%s (%s/%s): [ %s ] Unknown error when signalling repo unlocked: '%s'" % (self.par_job.uid,
                            self.par_job.package, self.arch, self.bci.address(), e)
        except xmlrpclib.ProtocolError, e:
            pass
        return files

    def process(self):
        if self.status == 'done':
            return
        elif self.status == 'running':
            # Clients pause before they enter the 'prep' state (which accesses
            # the repo for this target), and wait for the server to allow them
            # to proceed when the repo is unlocked.
            if self.builder_status == 'downloaded':
                if not self.par_job.repo.locked():
                    self._send_repo_unlocked()

            # if the builder is done, grab list of files to download
            if self._builder_finished():
                self.status = 'downloading'
                for f in self._dl_files():
                    uf = urllib.unquote(f)
                    self.downloads[uf] = 0
        elif self.status == 'downloading':
            # Start grabbing the next undownloaded file, but only
            # if we aren't already pulling one down
            #
            # Download states:
            #   0: waiting
            #   1: in progress
            #   2: error
            #   3: done
            undownloaded = False
            for url in self.downloads.keys():
                dl_status = self.downloads[url]
                if dl_status == 0:
                    # spawn the download
                    target_dir = os.path.join(self.par_job.get_stage_dir(), self.arch)
                    if not os.path.exists(target_dir):
                        os.makedirs(target_dir)
                    if config_opts['ssl_builders']:
                        dl_thread = FileDownloader.FileDownloader(self.dl_callback, url, url,
                                    target_dir, ['.rpm', '.log'], certs)
                    else:
                        dl_thread = FileDownloader.FileDownloader(self.dl_callback, url, url,
                                    target_dir, ['.rpm', '.log'], None)
                    dl_thread.start()
                    undownloaded = True
                    self.downloads[url] = 1
                    break
                elif dl_status == 1:
                    # in progress
                    undownloaded = True
                    break
                elif dl_status == 2:
                    # error
                    continue
                elif dl_status == 3:
                    # this one is done
                    continue

            # All done downloading?
            if not undownloaded:
                self._print_downloaded_files()
                self.status = 'done'
                self.par_job.wake()

    def _print_downloaded_files(self):
        file_string = ""
        ndownloads = len(self.downloads.keys())
        for url in self.downloads.keys():
            filename = os.path.basename(url)
            string = "'" + filename + "'"
            if self.downloads[url] == 2:
                string = string + " (failed)"
            file_string = file_string + string
            if url != self.downloads.keys()[ndownloads - 1]:
                file_string = file_string + ", "

        print "%s (%s/%s): Build result files - [ %s ]" % (self.par_job.uid,
                    self.par_job.package, self.arch, file_string)

    def dl_callback(self, status, cb_data):
        url = cb_data
        if status == 'done':
            self.downloads[url] = 3
        elif status == 'failed':
            self.downloads[url] = 2

    def get_status(self):
        return self.status

    def get_files(self):
        files = []
        for url in self.downloads.keys():
            fname = FileDownloader.get_base_filename_from_url(url, ['.rpm', '.log'])
            if fname and self.downloads[url] == 3:
                files.append(fname)
        return files

    def set_download_status(self, url, status):
        if self.downloads.has_key(url):
            self.downloads[url] = status

    def builder_gone(self):
        if self.status != 'done':
            self._par_job.remove_arch_job(self)

    def die(self):
        if self.status == 'initialize' or self.status == 'running':
            self._server.die(self.jobid)
            self.status = 'done'




--- NEW FILE Builder.py ---
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2005 Dan Williams <dcbw at redhat.com> and Red Hat, Inc.

import time
import string
import xmlrpclib
import sys
import socket
import os
import threading
from plague import XMLRPCServerProxy
from plague import CommonErrors
from M2Crypto import SSL
import ArchJob

# Load in the config
execfile("/etc/plague/server/CONFIG.py")


# SSL certificate and key filenames
certs = {}
certs['key_and_cert'] = config_opts['server_key_and_cert']
certs['ca_cert'] = config_opts['ca_cert']
certs['peer_ca_cert'] = config_opts['ca_cert']


class Builder(threading.Thread):
    """ Tracks all jobs on a builder instance """

    def __init__(self, manager, address):
        self._cur_jobid = None
        self._manager = manager
        self._jobs = {}
        self._address = address
        if config_opts['ssl_builders']:
            self._server = XMLRPCServerProxy.XMLRPCServerProxy(self._address, certs)
        else:
            self._server = XMLRPCServerProxy.XMLRPCServerProxy(self._address, None)
        self._unavail_count = 0
        self._arches = []
        try:
            self._arches = self._server.supported_arches()
        except (socket.error, SSL.SSLError):
            raise RuntimeError
        self._alive = True
        self._arches.append('noarch')
        self._server_lock = threading.Lock()
        self._stop = False
        self._prepping_jobs = False
        threading.Thread.__init__(self)

    def arches(self):
        return self._arches

    def can_build_arch(self, arch):
        if arch in self._arches:
            return True
        return False

    def address(self):
        return self._address

    def alive(self):
        """
        Is the builder responding to requests?
        """
        return self._alive
    
    def start_job(self, par_job, target, arch, srpm_url):
        if not arch in self._arches or not self.available():
            raise RuntimeError

        self._server_lock.acquire()
        try:
            jobid = self._server.start(target, arch, srpm_url)
        except (socket.error, SSL.SSLError, xmlrpclib.ProtocolError):
            jobid = 0
        self._server_lock.release()

        if jobid == 0:
            raise RuntimeError

        job = ArchJob.ArchJob(self, self._server, par_job, jobid, target, arch)
        self._jobs[jobid] = job
        self._update_cur_job()

        return job

    def _update_cur_job(self):
        cur_jobid = None

        self._server_lock.acquire()
        try:
            (jobid, status) = self._server.get_cur_job()
        except (socket.error, SSL.SSLError, xmlrpclib.ProtocolError):
            self._unavail_count = self._unavail_count + 1
        else:
            self._unavail_count = 0
        self._server_lock.release()

        # Update the current job's status
        if self._unavail_count == 0:
            try:
                job = self._jobs[jobid]
                job.set_builder_status(status)
            except KeyError:
                pass
            self._cur_jobid = jobid

            if status == 'prepping':
                self._prepping_jobs = True
            else:
                self._prepping_jobs = False

    def stop(self):
        self._stop = True

    def run(self):
        while not self._stop:
            self._update_cur_job()

            # Kill all jobs on the client if it went away
            if self._unavail_count > 2:
                for jobid in self._jobs.keys():
                    job = self._jobs[jobid]
                    job.builder_gone()
                    del self._jobs[jobid]
                self._alive = False
                self._stop = True
                self._manager.builder_gone()
                continue

            # Update status of all jobs
            for j in self._jobs.values():
                j.process()

            time.sleep(3)
    
    def available(self):
        """
        Can the builder start a new job right now?
        """
        if self._unavail_count > 2 or not self._alive:
            return False
        if self._cur_jobid:
            return False
        return True

    def any_prepping_jobs(self):
        return self._prepping_jobs

    def to_dict(self):
        client_dict = {}
        client_dict['address'] = self._address
        client_dict['arches'] = self._arches
        if self._cur_jobid:
            client_dict['status'] = 'building'
        else:
            client_dict['status'] = 'idle'
        return client_dict



--- NEW FILE BuilderManager.py ---
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2005 Dan Williams <dcbw at redhat.com> and Red Hat, Inc.

import time
import string
import xmlrpclib
import sys
import socket
import os
import threading
import Builder

# Load in the config
execfile("/etc/plague/server/CONFIG.py")


class BuilderManager:
    """
    Tracks individual builder instances.
    """

    def __init__(self):
        self._builders_lock = threading.Lock()

        # List of addresses of possible builders
        self.possible_builders = config_opts['builders']
        self.running_builders = []
        builder_list = self.update_builders()

        # Print out builder list when starting up
        print "\nBuilders:"
        print "-" * 60
        for builder in builder_list:
            string = "  " + builder['address']
            string = string + " " * (40 - len(builder['address']))
            for arch in builder['arches']:
                string = string + arch + " "
            print string
        print ""

        self._queue_lock = threading.Lock()
        self._queue = []

        self._have_work = False

    def __del__(self):
        for builder in self.running_builders:
            builder.stop()
        time.sleep(2)
        for builder in self.running_builders:
            del builder

    def set_build_master(self, build_master):
        self._build_master = build_master

    def update_builders(self):
        self._builders_lock.acquire()
        builder_list = []
        for address in self.possible_builders:
            # If the address is "https" but we aren't set up for SSL, exit
            if address.startswith('https') and not config_opts['ssl_builders']:
                print "Builder address (%s) starts with 'https', but the 'ssl_builders' option is set to False." % address
                os._exit(1)

            # If the address is already in our running_builders list, skip it
            skip = False
            for builder in self.running_builders:
                if address == builder.address():
                    skip = True
            if skip == True:
                continue

            # Try to connect to builder and add it to our builder
            # list if we can
            try:
                builder = Builder.Builder(self, address)
            except RuntimeError:
                pass
            else:
                builder_list.append(builder.to_dict())
                builder.start()
                self.running_builders.append(builder)

        self._builders_lock.release()
        return builder_list

    def list_builders(self):
        builder_list = []
        for builder in self.running_builders:
            builder_list.append(builder.to_dict())
        return builder_list

    def have_work(self):
        avail = False
        for builder in self.running_builders:
            if builder.available():
                avail = True
        if len(self._queue) > 0 and avail:
            return True
        return self._have_work

    def builder_gone(self):
        self._have_work = True

    def process(self):
        self._have_work = False

        # Deal with dead/unreachable builders
        for builder in self.running_builders:
            if not builder.alive():
                print "Removing builder '%s' because it timed out." % builder.address()
                builder.stop()
                self.running_builders.remove(builder)

        # Deal with new arch jobs
        self._queue_lock.acquire()
        new_jobs = {}
        for req in self._queue:
            parent = req['parent']
            stage = parent.get_cur_stage()
            if (stage != 'prep') and (stage != 'building'):
                self._queue.remove(req)
                continue
            # Find a free builder for this request
            for builder in self.running_builders:
                if builder.available() and builder.can_build_arch(req['arch']):
                    try:
                        job = builder.start_job(parent, req['target'], req['arch'], req['srpm_url'])
                    except RuntimeError:
                        pass
                    else:
                        if not new_jobs.has_key(parent):
                            new_jobs[parent] = []
                        new_jobs[parent].append(job)
                        self._queue.remove(req)
                        break
        self._queue_lock.release()

        # Notify the parent jobs of their new archjobs.  Have to do this outside _queue_lock
        # for locking reasons
        for parent in new_jobs.keys():
            for job in new_jobs[parent]:
                parent.add_arch_job(job)

        if len(self._queue) > 0:
            time.sleep(0.25)

    def request_arch_job(self, par_job, target, arch, srpm_url):
        req = {}
        req['parent'] = par_job
        req['target'] = target
        req['arch'] = arch
        req['srpm_url'] = srpm_url

        self._queue_lock.acquire()
        self._queue.append(req)
        self._queue_lock.release()

    def any_prepping_builders(self):
        # query each build builder for any jobs that are in the 'prepping' state
        for builder in self.running_builders:
            if builder.alive() and builder.any_prepping_jobs():
                return True
        return False




--- NEW FILE PackageJob.py ---
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Copyright 2005 Dan Williams <dcbw at redhat.com> and Red Hat, Inc.


import os
import os.path
import sys
import commands
import threading
import time
import popen2
import rpmUtils
import exceptions
import shutil
import tempfile
import smtplib
from email.MIMEText import MIMEText
import string
import SimpleXMLRPCServer
import xmlrpclib
import socket
import BuilderManager
import ArchJob

# Load in the config
execfile("/etc/plague/server/CONFIG.py")

os.environ['CVSROOT'] = config_opts['pkg_cvs_root']
if len(config_opts['pkg_cvs_rsh']) > 0:
    os.environ['CVS_RSH'] = config_opts['pkg_cvs_rsh']

DEBUG = False
def debugprint(stuff=''):
    if DEBUG:
        print stuff

def log(stuff=''):
    print stuff

class PrepError(exceptions.Exception):
    def __init__(self, errno=0, args=None):
        exceptions.Exception.__init__(self)
        self.args = args
        self.errno = errno
    def __str__(self):
        return self.args
        
class BuildError(exceptions.Exception):
    def __init__(self, errno=0, args=None):
        exceptions.Exception.__init__(self)
        self.args = args
        self.errno = errno
    def __str__(self):
        return self.args

        

http_dir = os.path.join(config_opts['server_work_dir'], "srpm_http_dir")



def is_build_job_stage_valid(stage):
    """
    Validate a job stage.
    """

    stages = ['initialize', 'checkout', 'make_srpm', 'prep', 'building', 'cleanup', 'failed', 'addtorepo', 'repodone', 'needsign', 'finished', 'killed']
    if stage in stages:
        return True
    return False



class PackageJob(threading.Thread):
    """ Controller object for building 1 SRPM on multiple arches """

    def __init__(self, uid, username, package, cvs_tag, repo, buildmaster, hostname):
        self.curstage = 'initialize'
        self.bm = buildmaster
        self.hostname = hostname
        self.uid = uid
        self.username = username
        self.starttime = time.time()
        self.endtime = None
        self.package = package
        self.name = None
        self.target = repo.target()
        self.repo = repo
        self.failed = False
        self.no_cvs = config_opts['use_srpm_not_cvs']
        self.cvs_tag = cvs_tag
        self.stage_dir = None
        self.srpm_path = None
        self.srpm_http_path = None
        # Deal with straight SRPM builds
        if self.no_cvs and self.curstage is 'initialize':
            self._set_cur_stage('make_srpm')
        self.repofiles = {}
        self.archjobs = {}
        self._archjobs_lock = threading.Lock()
        self._event = threading.Event()
        threading.Thread.__init__(self)

    def get_cur_stage(self):
        return self.curstage

    def _set_cur_stage(self, stage):
        """ Update our internal job stage, and notify the BuildMaster that
            we've changed as well.
        """
        oldstage = self.curstage
        self.curstage = stage
        if oldstage != stage:
            self.bm.queue_job_status_update(self.uid, stage)

    def get_uid(self):
        return self.uid
        
    def arch_handling(self, hdr):
        # Associate sub-architectures with their "master" architecture.
        # This is only used to determine which arches to build on by default,
        # so that if we have an Additional Package Arches file that specifies
        # 'sparcv9' for a package that we don't try to build sparcv9 for that
        # package unless 'sparc' is listed in our 'targets' config option.
        sub_arches = {
                        'athlon'    : 'i386',
                        'i686'      : 'i386',
                        'i586'      : 'i386',
                        'i486'      : 'i386',
                        'amd64'     : 'x86_64',
                        'ia32e'     : 'x86_64',
                        'ppc32'     : 'ppc',
                        'sparcv8'   : 'sparc',
                        'sparcv9'   : 'sparc'
                     }

        # Grab additional build arches out of the Additional Package
        # Arches file
        apa_file_name = self.target + "addl-arches"
        apa_file = os.path.join(config_opts['addl_package_arches_dir'], apa_file_name)
        addl_arches = []
        try:
            f = open(apa_file, "r")
        except IOError, e:
            pass
        else:
            for line in f.readlines():
                line = line.strip()
                tmp_split = line.split(':')
                if len(tmp_split) == 2:
                    package = tmp_split[0]
                    if package == self.name:
                        tmp_arches = tmp_split[1]
                        tmp_arches = tmp_arches.strip()
                        addl_arches = tmp_arches.split(' ')
                        break

        targets = config_opts['targets']
        buildable_arches = targets[self.target]

        target_opt_arches = config_opts['target_optional_arches']
        opt_arches = []
        if target_opt_arches.has_key(self.target):
            opt_arches = target_opt_arches[self.target]

        # Remove arches we don't support from addl_arches
        for arch in addl_arches:
            if sub_arches.has_key(arch):
                master_addl_arch = sub_arches[arch]
                if master_addl_arch not in buildable_arches:
                    if master_addl_arch not in opt_arches:
                        addl_arches.remove(arch)

        ba = hdr['buildarchs']
        exclusive = hdr['exclusivearch'] 
        exclude = hdr['excludearch']
        
        arches = {}

        if ba == ['noarch']:
            arches['noarch'] = None
            return arches

        # default to building all base arches the 'target'
        # supports, and any additional arches from the
        # Additional Package Arches file whose "master" arch
        # is enabled for this target
        tmparchs = []
        allowed_arches = []
        for arch in buildable_arches:
            tmparchs.append(arch)
            allowed_arches.append(arch)
        for arch in addl_arches:
            tmparchs.append(arch)
            allowed_arches.append(arch)
        # Optional arches don't get built by default but are "allowed"
        for arch in opt_arches:
            allowed_arches.append(arch)

        if ba:
            tmparchs = ba
        else:
            if exclusive:
                tmparchs = exclusive
                
        if exclude:
            for arch in exclude:
                if arch in tmparchs:
                    tmparchs.remove(arch)

        # we probably need to check the archs, and make sure they are what 
        # we can build for
        for thisarch in tmparchs:
            if thisarch in allowed_arches:
                arches[thisarch] = None

        return arches


    def _make_stage_dir(self, rootdir):
        # The dir will look like this:
        # <rootdir>/devel/95-foo-1.1.0-23
        pkgsubdir = '%d-%s-%s-%s' % (self.uid, self.name, self.ver, self.release)
        stage_dir = os.path.join(rootdir, self.target, pkgsubdir)
        if os.path.exists(stage_dir):
            shutil.rmtree(stage_dir, ignore_errors=True)
        os.makedirs(stage_dir)
        return stage_dir

        
    def _checkout(self):
        self._set_cur_stage('checkout')
        dir_prefix = self.cvs_tag + "-"
        self.checkout_tmpdir = tempfile.mkdtemp(prefix=dir_prefix, dir=config_opts['tmpdir'])
        os.chdir(self.checkout_tmpdir)

        # Checkout the module
        cmd = '%s co -r %s %s' % (config_opts['cvs_cmd'], self.cvs_tag, self.package)
        debugprint("%d: Running %s" % (self.uid, cmd))
        s, o = commands.getstatusoutput(cmd)
        if s != 0:
            subj = 'Prep Error: %s on %s' % (self.cvs_tag, self.target)
            msg = "could not check out %s from %s - output was:\n %s" % (self.cvs_tag, self.target, o)
            self.email_result(resultstring=msg, subject=subj)
            self._set_cur_stage('finished')
            self.failed = True
            shutil.rmtree(self.checkout_tmpdir, True)
            return

        # Just in case the 'common' directory didn't come along for the ride,
        # get it from CVS
        pkg_path = os.path.join(self.checkout_tmpdir, self.package)
        if not os.path.exists(os.path.join(pkg_path, "common")):
            os.chdir(pkg_path)
            cmd = '%s co common' % config_opts['cvs_cmd']
            debugprint("%d: Running %s" % (self.uid, cmd))
            s, o = commands.getstatusoutput(cmd)
            os.chdir(self.checkout_tmpdir)
            if s != 0:
                subj = 'Prep Error: %s on %s' % (self.cvs_tag, self.target)
                msg = "could not check out common directory - output was:\n %s" % (self.cvs_tag, self.target, o)
                self.email_result(resultstring=msg, subject=subj)
                self._set_cur_stage('finished')
                self.failed = True
                shutil.rmtree(self.checkout_tmpdir, True)
                return

    def _make_srpm(self):
        self._set_cur_stage('make_srpm')
        self.srpm_path = None
        srpm_dir = os.path.join(self.checkout_tmpdir, self.package, self.target)
        if not os.path.exists(srpm_dir):
            subj = 'Prep Error: %s on %s' % (self.cvs_tag, self.target)
            msg = "could not find path %s for %s." % (srpm_dir, self.cvs_tag)
            self.email_result(resultstring=msg, subject=subj)
            self._set_cur_stage('finished')
            self.failed = True
            shutil.rmtree(self.checkout_tmpdir, True)
            return

        os.chdir(srpm_dir)

        cmd = '%s srpm' % config_opts['make_cmd']
        debugprint("%d: Running %s in %s" % (self.uid, cmd, srpm_dir))
        s, o = commands.getstatusoutput(cmd)
        if s != 0:
            subj = 'Prep Error: %s on %s' % (self.cvs_tag, self.target)
            msg = "could not make srpm for %s - output was:\n %s" % (self.cvs_tag, o)
            self.email_result(resultstring=msg, subject=subj)
            self._set_cur_stage('finished')
            self.failed = True
            shutil.rmtree(self.checkout_tmpdir, True)
            return
        
        srpmpath = None
        for line in o.split("\n"):
            if line.startswith("Wrote:"):
                line.replace("\n", "")
                (garbage, path) = line.split(':')
                srpmpath = path.strip()
                break
        if not srpmpath:
            subj = 'Prep Error: %s on %s' % (self.cvs_tag, self.target)
            msg = "could not find srpm for %s - output was:\n %s" % (self.cvs_tag, o)
            self.email_result(resultstring=msg, subject=subj)
            self._set_cur_stage('finished')
            self.failed = True
            shutil.rmtree(self.checkout_tmpdir, True)
            return
        self.srpm_path = srpmpath

    def _prep(self):
        self._set_cur_stage('prep')

        # In SRPM-only mode, cvs_tag is path to the SRPM to build
        if self.no_cvs:
            self.srpm_path = self.cvs_tag

        ts = rpmUtils.transaction.initReadOnlyTransaction()
        hdr = rpmUtils.miscutils.hdrFromPackage(ts, self.srpm_path)
        self.name = hdr['name']
        self.ver = hdr['version']
        self.release = hdr['release']
        self.archjobs = self.arch_handling(hdr)
        del hdr
        del ts

        if len(self.archjobs) == 0:
            subj = 'Prep Error: %s on %s' % (self.cvs_tag, self.target)
            msg = "could not find any architectures to build for %s - output was:\n" % (self.cvs_tag)
            self.email_result(resultstring=msg, subject=subj)
            self.failed = True
            self._set_cur_stage('finished')
            return

        self.stage_dir = self._make_stage_dir(config_opts['server_work_dir'])
        for arch in self.archjobs.keys():
            thisdir = os.path.join(self.stage_dir, arch)
            if not os.path.exists(thisdir):
                os.makedirs(thisdir)

        # Copy the SRPM to the final package product dir
        srpm = os.path.basename(self.srpm_path)
        srpm_in_dir = os.path.join(self.stage_dir, srpm)
        if os.path.exists(srpm_in_dir):
            os.unlink(srpm_in_dir)
        shutil.copy(self.srpm_path, self.stage_dir)

        # Must also copy SRPM to where the build client can get it over HTTP
        http_pkg_path = self._make_stage_dir(http_dir)
        self.srpm_http_path = os.path.join(http_pkg_path, srpm)
        shutil.copy(self.srpm_path, self.srpm_http_path)
        self.srpm_path = srpm_in_dir

        # Remove CVS checkout and make_srpm dirs
        if not self.no_cvs:
            shutil.rmtree(self.checkout_tmpdir, ignore_errors=True)

        self._request_arch_jobs()

    def _request_one_arch_job(self, arch):
        # Construct SPRM URL
        srpm_http_base = self.srpm_http_path[len(http_dir):]
        if config_opts['ssl_builders'] == True:
            method = "https://"
        else:
            method = "http://"
        srpm_url = method + self.hostname + ":8886/" + srpm_http_base
        self.bm.builder_manager.request_arch_job(self, self.target, arch, srpm_url)

    def _request_arch_jobs(self):
        # Queue requests for build jobs
        self._archjobs_lock.acquire()
        for arch in self.archjobs.keys():
            if self.archjobs[arch]:
                continue
            self._request_one_arch_job(arch)
        self._archjobs_lock.release()

    def add_arch_job(self, job):
        """ Called by the BuilderManager when it's started a new arch job for us """
        self._archjobs_lock.acquire()
        if self.archjobs[job.arch] != None:
            log("%s (%s/%s): Already have archjob for this arch (%s).  New job UID is %s." % (self.uid, self.package, job.arch, self.archjobs[job.arch].jobid, job.jobid))
        self.archjobs[job.arch] = job
        self._archjobs_lock.release()
        log("%s (%s/%s): Builder UID is %s" % (self.uid, self.package, job.arch, job.jobid))

    def remove_arch_job(self, job):
        """ Removes an arch job when its builder is no longer responding """
        self._archjobs_lock.acquire()
        print "%s (%s/%s): Builder disappeared.  Requeuing arch..." % (self.uid, self.package, job.arch)
        self.archjobs[job.arch] = None
        self._request_one_arch_job(job.arch)
        self._archjobs_lock.release()

    def is_done(self):
        if self.curstage == 'needsign' or self.curstage == 'failed' or self.curstage == 'killed':
            return True
        return False

    def die(self, username):
        # Kill any building jobs
        self._set_cur_stage('killed')
        self._archjobs_lock.acquire()
        for job in self.archjobs.values():
            if job:
                job.die()
        self.archjobs = {}
        self._archjobs_lock.release()

        resultstring = "%s (%s): Build on target %s was killed by %s." % (self.uid, self.name, self.target, username)
        self.email_result(resultstring)
        self.bm.notify_job_done(self)

    def wake(self):
        self._event.set()

    def run(self):
        while not self.is_done():
            # Advance to next stage based on current stage
            wait = False
            if self.curstage == 'initialize':
                self._checkout()
            elif self.curstage == 'checkout':
                self._make_srpm()
            elif self.curstage == 'make_srpm':
                self._prep()
            elif self.curstage == 'prep' or self.curstage == 'building':
                wait = self._monitor()
            elif self.curstage == 'finished':
                self._cleanup()
            elif self.curstage == 'cleanup':
                if self.failed:
                    self._failed()
                else:
                    self._add_to_repo()
            elif self.curstage == 'addtorepo':
                wait = True
            elif self.curstage == 'repodone':
                self._succeeded()

            if wait:
                while not self._event.isSet():
                    self._event.wait()
                self._event.clear()

    def _monitor(self):
        self._set_cur_stage('building')

        # Count failed and completed jobs
        completed_jobs = 0
        for job in self.archjobs.values():
            if not job:
                continue
            if job.get_status() is 'done':
                if job.builder_failed():
                    self.failed = True
                else:
                    completed_jobs = completed_jobs + 1

        # If any jobs have failed, or if all jobs have completed successfully, advance
        if self.failed or (completed_jobs == len(self.archjobs)):
            self._set_cur_stage('finished')
            return False  # Don't want to wait

        return True

    def _cleanup(self):
        self._set_cur_stage('cleanup')
        if self.failed:
            # Kill remaining jobs on other arches
            for job in self.archjobs.values():
                if job:
                    job.die()

    def get_stage_dir(self):
        return self.stage_dir

    def _failed(self):
        self._set_cur_stage('failed')

        resultstring = """
   %s (%s): %s on %s failed to complete on one or more archs.
""" % (self.uid, self.package, self.cvs_tag, self.target)
        resultstring = resultstring + "\n"

        # Add each arch job and its result:
        for job in self.archjobs.values():
            if job and job.builder_status != 'killed':
                resultstring = resultstring + "     " + "%s - %s: %s" % (job.arch, job.jobid, job.builder_status) + "\n"

        resultstring = resultstring + "\n"
        self.email_result(resultstring)

        self.bm.notify_job_done(self)
        
    def _add_to_repo(self):
        self._set_cur_stage('addtorepo')

        # Create a list of files that the repo should copy to
        # the repo dir
        for job in self.archjobs.values():
            if not job:
                continue
            file_list = job.get_files()
            for f in file_list:
                if not f.endswith(".rpm"):
                    continue
                src_file = os.path.join(self.stage_dir, job.arch, f)
                verrel = "%s-%s" % (self.ver, self.release)
                if f.endswith(".src.rpm"):
                    dst_path = os.path.join(config_opts['repo_dir'], self.target, self.name, verrel, "SRPM")
                else:
                    dst_path = os.path.join(config_opts['repo_dir'], self.target, self.name, verrel, job.arch)
                self.repofiles[src_file] = dst_path

        self._event.clear()

        # Request the repo copy our files.  It will get the file
        # list from this object directly when the copy operation
        # happens
        if len(self.repofiles):
            self.repo.request_copy(self)

    def repo_add_callback(self):
        self._set_cur_stage('repodone')
        self.wake()

    def _succeeded(self):
        self._set_cur_stage('needsign')
        resultstring = "%s (%s): Build on target %s succeeded." % (self.uid, self.name, self.target)
        self.email_result(resultstring)
        self.bm.notify_job_done(self)

    def email_result(self, resultstring, subject=None):
        """send 'resultstring' to self.email from self.email_from"""
        
        msg = MIMEText(resultstring)
        if not subject:
            name = self.name
            if not name:
                name = self.package
            subject = 'Build Result: %d - %s on %s' % (self.uid, name, self.target)
        msg['Subject'] = subject
        msg['From'] = config_opts['email_from']
        msg['To'] = self.username
        s = smtplib.SMTP()
        s.connect()
        s.sendmail(config_opts['email_from'], [self.username], msg.as_string())
        s.close()



--- NEW FILE main.py ---
#!/usr/bin/python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# copyright 2005 Duke University
# written by Seth Vidal


import sys
import os
from plague import AuthedXMLRPCServer
from plague import lighttpdManager
from plague import HTTPServer
import SimpleXMLRPCServer
from M2Crypto import threading as m2thread

sys.path.append('/usr/share/plague/server')

import User
import BuildMaster
import BuilderManager
from UserInterface import UserInterfaceSSLAuth
from UserInterface import UserInterfaceNoAuth


# Load in the config
execfile("/etc/plague/server/CONFIG.py")

use_lighttpd = True


class AuthenticatedSSLXMLRPCServer(AuthedXMLRPCServer.AuthedSSLXMLRPCServer):
    """
    SSL XMLRPC server that authenticates clients based on their certificate.
    """

    def __init__(self, address, certs):
        AuthedXMLRPCServer.AuthedSSLXMLRPCServer.__init__(self, address, self.auth_cb, certs)
        self.authenticator = User.Authenticator()

    def auth_cb(self, request, client_address):
        """
        Authenticate the user and determine user's privs.
        TODO: pull user privs from a DB
        """
        peer_cert = request.get_peer_cert()

        # m2crypto 0.9 only supports 'Email' so fall through
        # to that if emailAddress doesn't work
        try:
            email = peer_cert.get_subject().emailAddress
        except AttributeError:
            email = peer_cert.get_subject().Email

        user = None
        try:
            user = self.authenticator.new_authed_user(email, client_address)
        except Exception:
            pass
        return user


#################################################################

if __name__ == '__main__':
    if len(sys.argv) < 2:
        print "Usage:\n"
        print "   %s <hostname>\n" % sys.argv[0]
        sys.exit(1)

    hostname = sys.argv[1]

    m2thread.init()

    builder_manager = BuilderManager.BuilderManager()

    # Create the BuildMaster thread
    bm = BuildMaster.BuildMaster(hostname, builder_manager)
    bm.start()

    # Create the BuildMaster XMLRPC server
    UI_PORT = 8887
    ui = None
    if config_opts['ssl_frontend'] == True:
        ui_certs = {}
        ui_certs['key_and_cert'] = config_opts['server_key_and_cert']
        ui_certs['ca_cert'] = config_opts['ca_cert']
        ui_certs['peer_ca_cert'] = config_opts['ui_ca_cert']
        ui = UserInterfaceSSLAuth(builder_manager, bm)
        bm_server = AuthenticatedSSLXMLRPCServer((hostname, UI_PORT), ui_certs)
    else:
        ui = UserInterfaceNoAuth(builder_manager, bm)
        bm_server = AuthedXMLRPCServer.AuthedXMLRPCServer((hostname, UI_PORT))
    bm_server.register_instance(ui)

    # SRPM fileserver
    SRPM_SERVER_PORT = 8886
    http_dir = os.path.join(config_opts['server_work_dir'], "srpm_http_dir")
    if use_lighttpd:
        http_cnf_file = "/var/tmp/plague-server.conf"
        if config_opts['ssl_builders']:
            key_and_cert = config_opts['server_key_and_cert']
        else:
            key_and_cert = None
        srpm_server = lighttpdManager.lighttpdManager(http_cnf_file, hostname, SRPM_SERVER_PORT, http_dir, False, key_and_cert)
    else:
        if config_opts['ssl_builders']:
            srpm_server_certs = {}
            srpm_server_certs['key_and_cert'] = config_opts['server_key_and_cert']
            srpm_server_certs['ca_cert'] = config_opts['ca_cert']
            srpm_server_certs['peer_ca_cert'] = config_opts['ca_cert']
            srpm_server = HTTPServer.HTTPServer((hostname, SRPM_SERVER_PORT), http_dir, srpm_server_certs)
        else:
            srpm_server = HTTPServer.HTTPServer((hostname, SRPM_SERVER_PORT), http_dir, None)
    srpm_server.start()

    print "Build Server accepting requests on %s:%d.\n" % (hostname, UI_PORT)
    try:
        bm_server.serve_forever()
    except KeyboardInterrupt:
        # Make sure the BuildMaster thread shuts down
        print "Shutting down..."
        bm.stop()
        srpm_server.stop()
        del bm

    print "Done."
    m2thread.cleanup()
    os._exit(0)




Index: BuildMaster.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/BuildMaster.py,v
retrieving revision 1.9
retrieving revision 1.10
diff -u -r1.9 -r1.10
--- BuildMaster.py	29 Jun 2005 05:23:00 -0000	1.9
+++ BuildMaster.py	5 Jul 2005 21:08:03 -0000	1.10
@@ -1,4 +1,3 @@
-#!/usr/bin/python
 # This program is free software; you can redistribute it and/or modify
 # it under the terms of the GNU General Public License as published by
 # the Free Software Foundation; either version 2 of the License, or
@@ -13,15 +12,17 @@
 # along with this program; if not, write to the Free Software
 # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
 # copyright 2005 Duke University
-# written by Seth Vidal
+#
+# Copyright 2005 Dan Williams <dcbw at redhat.com> and Red Hat, Inc.
 
 
 import time
-import BuildJob
+import PackageJob
 import sqlite
 import threading
 import os
 import Repo
+import copy
 
 
 # Load in the config
@@ -69,13 +70,13 @@
 
 
 class BuildMaster(threading.Thread):
-    def __init__(self, hostname, client_manager):
-        self.bcm = client_manager
+    def __init__(self, hostname, builder_manager):
+        self.builder_manager = builder_manager
         self.hostname = hostname
         self.should_stop = False
         self.repos = {}
         for target in config_opts['targets'].keys():
-            repo = Repo.Repo(target, client_manager)
+            repo = Repo.Repo(target, builder_manager)
             self.repos[target] = repo
             repo.start()
 
@@ -106,7 +107,6 @@
 
     def __del__(self):
         self.dbcx.close()
-        del self.bcm
 
     def stop(self):
         self.should_stop = True
@@ -142,7 +142,9 @@
 
     def queue_job_status_update(self, uid, status):
         self._status_updates_lock.acquire()
-        self._status_updates[uid] = status
+        lcl_uid = copy.copy(uid)
+        lcl_status = copy.copy(status)
+        self._status_updates[lcl_uid] = lcl_status
         self._status_updates_lock.release()
 
     def notify_job_done(self, job):
@@ -164,9 +166,6 @@
         self._done_queue = []
         self._done_queue_lock.release()
 
-    def getClientManager(self):
-        return self.bcm
-
     def _write_status_to_db(self, uid, status):
         try:
             self.curs.execute('UPDATE jobs SET status="%s" WHERE uid=%d' \
@@ -218,7 +217,7 @@
             # to make sure we pick the last result to get the correct one
             row = data[len(data) - 1]
             repo = self.repos[item['target']]
-            job = BuildJob.BuildJob(row['uid'], item['email'], item['package'],
+            job = PackageJob.PackageJob(row['uid'], item['email'], item['package'],
                     locator, repo, self, self.hostname)
 
             print "%s (%s): Starting tag '%s' on target '%s'" % (row['uid'], \
@@ -278,6 +277,9 @@
         if have_work:
             return True
 
+        if self.builder_manager.have_work():
+            return True
+
         return False
 
     def get_job(self, jobid):
@@ -295,8 +297,8 @@
             # Write update status for jobs to the database
             self._save_job_status()
 
-            # Update all build clients and known jobs
-            self.bcm.process()
+            if self.builder_manager.have_work():
+                self.builder_manager.process()
 
             # Clean up jobs that have finished
             self._process_finished_jobs()


Index: CONFIG.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/CONFIG.py,v
retrieving revision 1.14
retrieving revision 1.15
diff -u -r1.14 -r1.15
--- CONFIG.py	1 Jul 2005 11:51:33 -0000	1.14
+++ CONFIG.py	5 Jul 2005 21:08:03 -0000	1.15
@@ -16,7 +16,7 @@
 config_opts['ssl_frontend'] = True
 # ssl_buildclients: True = all communication between server & build client
 # be over an SSL connecction
-config_opts['ssl_buildclients'] = True
+config_opts['ssl_builders'] = False
 
 
 SERVER_BASE_DIR = "/etc/plague/server"
@@ -87,6 +87,6 @@
 
 
 # Builder Clients
-config_opts['builders'] = [ 'https://127.0.0.1:8888' ]
+config_opts['builders'] = [ 'http://127.0.0.1:8888' ]
 
 


Index: Makefile
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/Makefile,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -r1.4 -r1.5
--- Makefile	28 Jun 2005 16:46:13 -0000	1.4
+++ Makefile	5 Jul 2005 21:08:03 -0000	1.5
@@ -10,9 +10,11 @@
 	rm -f *.pyc *.pyo *~ *.bak
 
 FILES = \
-	BuildJob.py \
+    ArchJob.py \
+    BuilderManager.py \
+    Builder.py \
 	BuildMaster.py \
-	client_manager.py \
+    PackageJob.py \
 	Repo.py \
 	UserInterface.py \
 	User.py
@@ -22,7 +24,7 @@
 
 install:
 	$(MKDIR) -p $(DESTDIR)/$(BINDIR)
-	$(INSTALL) -m 755 buildserver.py $(DESTDIR)/$(BINDIR)/$(PKGNAME)-server
+	$(INSTALL) -m 755 main.py $(DESTDIR)/$(BINDIR)/$(PKGNAME)-server
 	$(MKDIR) -p $(OTHERINSTDIR)
 	for file in $(FILES); do $(INSTALL) -m 644 $$file $(OTHERINSTDIR)/$$file; done
 	$(MKDIR) -p $(CONFIGDIR)


Index: Repo.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/Repo.py,v
retrieving revision 1.4
retrieving revision 1.5
diff -u -r1.4 -r1.5
--- Repo.py	29 Jun 2005 18:06:12 -0000	1.4
+++ Repo.py	5 Jul 2005 21:08:03 -0000	1.5
@@ -28,8 +28,8 @@
 class Repo(threading.Thread):
     """ Represents an on-disk repository of RPMs and manages updates to the repo. """
 
-    def __init__(self, target, client_manager):
-        self._bcm = client_manager
+    def __init__(self, target, builder_manager):
+        self._builder_manager = builder_manager
         self._target = target
         self._repodir = os.path.join(config_opts['repo_dir'], target)
         if not os.path.exists(self._repodir):
@@ -79,7 +79,7 @@
             # Notify the build job that we've copied its files to the repo
             buildjob.repo_add_callback()
 
-        s, o = commands.getstatusoutput('/usr/bin/createrepo -q %s' % self._repodir)
+        (s, o) = commands.getstatusoutput('/usr/bin/createrepo -q %s' % self._repodir)
         if s != 0:
             print "createrepo failed with exit status %d!" % s
 
@@ -92,7 +92,7 @@
             # 2 until all clients have finished their 'prep' state.  Only then do we
             # copy RPMs to the repo and run createrepo on it.
 
-            prepping_clients = self._bcm.any_prepping_clients()
+            prepping_builders = self._builder_manager.any_prepping_builders()
 
             self._lock.acquire()
 
@@ -105,7 +105,7 @@
 
             # Enter lock level 2 if there are no build clients in the
             # 'prep' state and we are already at lock level 1
-            if not prepping_clients and self._lock_count == 1:
+            if not prepping_builders and self._lock_count == 1:
                 self._lock_count = 2
 
             self._lock.release()


Index: UserInterface.py
===================================================================
RCS file: /cvs/fedora/extras-buildsys/server/UserInterface.py,v
retrieving revision 1.14
retrieving revision 1.15
diff -u -r1.14 -r1.15
--- UserInterface.py	29 Jun 2005 21:32:33 -0000	1.14
+++ UserInterface.py	5 Jul 2005 21:08:03 -0000	1.15
@@ -23,7 +23,7 @@
 import os
 import copy
 import BuildMaster
-import BuildJob
+import PackageJob
 
 # Load in the config
 execfile("/etc/plague/server/CONFIG.py")
@@ -50,8 +50,8 @@
     Base UserInterface class. NO AUTHENTICATION.  Subclass this to provide some.
     """
 
-    def __init__(self, client_manager, build_master):
-        self._cm = client_manager
+    def __init__(self, builder_manager, build_master):
+        self._builder_manager = builder_manager
         self._bm = build_master
 
 
@@ -92,6 +92,12 @@
                     "%s: try using a shorter path to the SRPM (< 255 chars)." % (srpm_file, target))
             return (-1, "Pathname to SRPM is limited to 255 characters.")
 
+        srpm_file = os.path.abspath(srpm_file)
+        if not srpm_file or not os.access(srpm_file, os.R_OK):
+            email_result(email, srpm_file, "Error setting up build for %s on "\
+                    "%s: The SRPM does not exist, or is not accessible.  Remember to use absolute paths." % (srpm_file, target))
+            return (-1, "SRPM does not exist or is not accessible, remember to use absolute paths.")
+
         print "Request to enqueue '%s' file '%s' for target '%s' (user '%s')" \
                 % (package, srpm_file, target, email)
         targets = config_opts['targets']
@@ -131,7 +137,7 @@
 
         if args_dict.has_key('status') and args_dict['status']:
             status = args_dict['status']
-            if BuildJob.is_build_job_stage_valid(status):
+            if PackageJob.is_build_job_stage_valid(status):
                 if first:
                     search = search +  " WHERE "
                     first = False
@@ -164,16 +170,16 @@
         return (ret, msg, job_list)
 
 
-    def update_clients(self):
+    def update_builders(self):
         execfile("/etc/plague/server/CONFIG.py")
-        client_list = self._cm.update_clients()
-        return (0, "Success.", client_list)
+        builder_list = self._builder_manager.update_builders()
+        return (0, "Success.", builder_list)
 
 
-    def list_clients(self):
+    def list_builders(self):
         execfile("/etc/plague/server/CONFIG.py")
-        client_list = self._cm.list_clients()
-        return (0, "Success.", client_list)
+        builder_list = self._builder_manager.list_builders()
+        return (0, "Success.", builder_list)
 
 
 class UserInterfaceSSLAuth(UserInterface):
@@ -209,15 +215,15 @@
         return UserInterface.list_jobs(self, args_dict)
 
 
-    def update_clients(self, user):
+    def update_builders(self, user):
         if not user.server_admin:
             return (-1, "Insufficient privileges.")
-        return UserInterface.update_clients(self)
+        return UserInterface.update_builders(self)
 
-    def list_clients(self, user):
+    def list_builders(self, user):
         if not user.server_admin:
             return (-1, "Insufficient privileges.")
-        return UserInterface.list_clients(self)
+        return UserInterface.list_builders(self)
 
 
 class UserInterfaceNoAuth(UserInterface):


--- BuildJob.py DELETED ---


--- buildserver.py DELETED ---


--- client_manager.py DELETED ---




More information about the fedora-extras-commits mailing list