def run(self): tarball_log.log("Tarball creator starting...\n") while not self.cancelled(): start_time = utils.get_time() # routinely check for updates (sleep every so often) for pack_name, pack_obj in self.pack_objs.iteritems(): # We are reloading pack_objs, but this for loop won't notice it until we enter it again if self.cancelled(): continue # get latest version from the tree latest_tree_rev = self.src_repo.latest_tree_revision() # print "Latest tree rev: %d (%s)" % (latest_tree_rev, pack_name) if not latest_tree_rev: tarball_log.log("Error getting latest tree rev, trying later... (%s)\n" % pack_name) # Restart for loop over... break # Only do for the last couple of commits, rather than constantly updating a base revision if latest_tree_rev <= self.num_sequential: starting_rev = 1 else: starting_rev = latest_tree_rev - self.num_sequential # If we're not building each and every checkin, only build the latest if not self.sequential: starting_rev = latest_tree_rev # Pretty much do every commit (for binary search on regressions) (should be adjustable) # The + 1 is so that the latest tree revision will be checked (range func does not include the last number in the sequence) for i in range(starting_rev, latest_tree_rev + 1): latest_for_package = self.src_repo.latest_path_revision(pack_obj.info["HEAD_PATH"], revision=i) if not latest_for_package: tarball_log.log("Error getting revision %d, trying later... (%s)\n" % (i, pack_name)) # Skip to next pack... break if not self.distfiles.contains("HEAD", pack_name, str(latest_for_package)) and not self.cancelled(): command = "cd %s; ./mktarball --snapshot %s %d" % ( config.packaging_dir, pack_name, latest_for_package, ) tarball_log.log("Executing: %s\n" % (command)) # TODO: the system needs to be smarter about reinstalling the same rpms over and over... # This will show console output, but not write to the log # Log will be for brief info, and the console will watch what's currently going on # (For some reason my signal gets ignored if I'm using os.system... seems to work with popen) (code, output) = utils.launch_process(command, print_output=0) tarball_log.log("Exit code: %d (%s)\n" % (code, pack_name)) # handle jail busy errors (exit code of 2) if code == 2: tarball_log.log("Jail busy, retrying later... (%s)\n" % pack_name) # handle svn timeouts elif code == utils.KILLED_EXIT_CODE: tarball_log.log("svn commands killed, retrying later... (%s)\n" % pack_name) # Handle failed tarballs... elif code: tarball_log.log("Tarball creation failed...(%s)\n" % pack_name) # Send out the log with the tarball, or at least a link... ? link = "http://mono.ximian.com/monobuild/tarball_logs/HEAD/%s/%d.log" % ( pack_name, latest_for_package, ) utils.send_mail( "*****@*****.**", "*****@*****.**", "mktarball failed (%s %d)" % (pack_name, latest_for_package), "mktarball has failed for package %s revision %d\n\n%s" % (pack_name, latest_for_package, link), ) time_duration = utils.time_duration_asc(start_time, utils.get_time()) * 60 # Only sleep if this loop was shorter than max_poll_interval # and if we do sleep, discount the time_duration if not self.cancelled() and time_duration < self.max_poll_interval: # tarball_log.log("Sleeping for %d seconds...\n" % (self.max_poll_interval - time_duration) ) time.sleep(self.max_poll_interval - time_duration) # Exiting because we've been cancelled tarball_log.log("Tarball creator shutting down...\n")
def run(self): tarball_log.log("Tarball creator starting...\n") while not self.cancelled(): start_time = utils.get_time() # routinely check for updates (sleep every so often) for pack_name, pack_obj in self.pack_objs.iteritems(): # We are reloading pack_objs, but this for loop won't notice it until we enter it again if self.cancelled(): continue # get latest version from the tree latest_tree_rev = self.src_repo.latest_tree_revision() #print "Latest tree rev: %d (%s)" % (latest_tree_rev, pack_name) if not latest_tree_rev: tarball_log.log( "Error getting latest tree rev, trying later... (%s)\n" % pack_name) # Restart for loop over... break # Only do for the last couple of commits, rather than constantly updating a base revision if latest_tree_rev <= self.num_sequential: starting_rev = 1 else: starting_rev = latest_tree_rev - self.num_sequential # If we're not building each and every checkin, only build the latest if not self.sequential: starting_rev = latest_tree_rev # Pretty much do every commit (for binary search on regressions) (should be adjustable) # The + 1 is so that the latest tree revision will be checked (range func does not include the last number in the sequence) for i in range(starting_rev, latest_tree_rev + 1): latest_for_package = self.src_repo.latest_path_revision( pack_obj.info['HEAD_PATH'], revision=i) if not latest_for_package: tarball_log.log( "Error getting revision %d, trying later... (%s)\n" % (i, pack_name)) # Skip to next pack... break if not self.distfiles.contains( 'HEAD', pack_name, str(latest_for_package)) and not self.cancelled(): command = "cd %s; ./mktarball --snapshot %s %d" % ( config.packaging_dir, pack_name, latest_for_package) tarball_log.log("Executing: %s\n" % (command)) # TODO: the system needs to be smarter about reinstalling the same rpms over and over... # This will show console output, but not write to the log # Log will be for brief info, and the console will watch what's currently going on # (For some reason my signal gets ignored if I'm using os.system... seems to work with popen) (code, output) = utils.launch_process(command, print_output=0) tarball_log.log("Exit code: %d (%s)\n" % (code, pack_name)) # handle jail busy errors (exit code of 2) if code == 2: tarball_log.log( "Jail busy, retrying later... (%s)\n" % pack_name) # handle svn timeouts elif code == utils.KILLED_EXIT_CODE: tarball_log.log( "svn commands killed, retrying later... (%s)\n" % pack_name) # Handle failed tarballs... elif code: tarball_log.log( "Tarball creation failed...(%s)\n" % pack_name) # Send out the log with the tarball, or at least a link... ? link = "http://mono.ximian.com/monobuild/tarball_logs/HEAD/%s/%d.log" % ( pack_name, latest_for_package) utils.send_mail( '*****@*****.**', '*****@*****.**', 'mktarball failed (%s %d)' % (pack_name, latest_for_package), "mktarball has failed for package %s revision %d\n\n%s" % (pack_name, latest_for_package, link)) time_duration = utils.time_duration_asc(start_time, utils.get_time()) * 60 # Only sleep if this loop was shorter than max_poll_interval # and if we do sleep, discount the time_duration if not self.cancelled() and time_duration < self.max_poll_interval: #tarball_log.log("Sleeping for %d seconds...\n" % (self.max_poll_interval - time_duration) ) time.sleep(self.max_poll_interval - time_duration) # Exiting because we've been cancelled tarball_log.log("Tarball creator shutting down...\n")
def run(self): distro = self.distro scheduler_log.log("%s:\tStarting scheduler\n" % (distro)) while not self.cancelled(): packages_to_build = [] for pack_def in config.sd_latest_build_packages: pack_obj = packaging.package("", pack_def) if pack_obj.valid_build_platform(distro): packages_to_build.append(pack_def) num_started_builds = 0 start_time = utils.get_time() # Build each package for this jail for package_name in packages_to_build: # Skip builds so we can exit if self.cancelled(): continue # Check to see what the latest tarball is # The src_file_repo class is not threadsafe, so provide a mutex here tarball_lock.acquire() try: tarball_filename = tarballs.get_latest_tarball("HEAD", package_name) except: # catch this in case the filename is being edited by hand tarball_filename = "" tarball_lock.release() if not tarball_filename: # scheduler_log.log("%s:\t*** Error getting latest tarball (%s) (Probably doesn't exist...)!!!\n" % (distro, package_name) ) pass else: # print "Latest tarball: " + tarball_filename # Get version version, ext = version_re.search(tarball_filename).groups() info = datastore.build_info("HEAD", distro, package_name, version) # Build if the build doesn't exist already if not info.exists: command = "cd %s; ./build --suppress_output %s %s %s" % ( config.packaging_dir, distro, package_name, version, ) scheduler_log.log("%s:\t%s\n" % (distro, command)) num_started_builds += 1 # TODO: hmm... is this not blocking? Seems this code continues before being able to run tests? (code, output) = utils.launch_process(command, print_output=0) # Testing... # code = 2 # Is the jail busy? if so, just repeat this loop (and select a new tarball if a newer one exists) # Hmm... this really shouldn't happen, as much at least if code == 2: # scheduler_log.log("%s:\tJail is busy or offline... will retry again (%s)\n" % (distro, package_name) ) num_started_builds -= 1 if code == 5: scheduler_log.log( "%s:\tbuild info is missing, but packages exist... ?? will retry again (%s)\n" % (distro, package_name) ) num_started_builds -= 1 else: # scheduler_log.log("%s:\tSkipping existing build (%s, %s)\n" % (distro, package_name, version) ) pass time_duration = utils.time_duration_asc(start_time, utils.get_time()) * 60 if num_started_builds == 0 and time_duration < config.sd_wakeup_interval: # scheduler_log.log("%s:\tSleeping %d seconds...\n" % (distro, config.sd_wakeup_interval - time_duration) ) time.sleep(config.sd_wakeup_interval - time_duration) # Exiting because we've been removed from the configuration scheduler_log.log("%s:\tExiting upon user request...\n" % distro)
def run(self): distro = self.distro scheduler_log.log("%s:\tStarting scheduler\n" % (distro)) while not self.cancelled(): packages_to_build = [] for pack_def in config.sd_latest_build_packages: pack_obj = packaging.package("", pack_def) if pack_obj.valid_build_platform(distro): packages_to_build.append(pack_def) num_started_builds = 0 start_time = utils.get_time() # Build each package for this jail for package_name in packages_to_build: # Skip builds so we can exit if self.cancelled(): continue # Check to see what the latest tarball is # The src_file_repo class is not threadsafe, so provide a mutex here tarball_lock.acquire() try: tarball_filename = tarballs.get_latest_tarball( "HEAD", package_name) except: # catch this in case the filename is being edited by hand tarball_filename = "" tarball_lock.release() if not tarball_filename: #scheduler_log.log("%s:\t*** Error getting latest tarball (%s) (Probably doesn't exist...)!!!\n" % (distro, package_name) ) pass else: #print "Latest tarball: " + tarball_filename # Get version version, ext = version_re.search(tarball_filename).groups() info = datastore.build_info("HEAD", distro, package_name, version) # Build if the build doesn't exist already if not info.exists: command = "cd %s; ./build --suppress_output %s %s %s" % ( config.packaging_dir, distro, package_name, version) scheduler_log.log("%s:\t%s\n" % (distro, command)) num_started_builds += 1 # TODO: hmm... is this not blocking? Seems this code continues before being able to run tests? (code, output) = utils.launch_process(command, print_output=0) # Testing... #code = 2 # Is the jail busy? if so, just repeat this loop (and select a new tarball if a newer one exists) # Hmm... this really shouldn't happen, as much at least if code == 2: #scheduler_log.log("%s:\tJail is busy or offline... will retry again (%s)\n" % (distro, package_name) ) num_started_builds -= 1 if code == 5: scheduler_log.log( "%s:\tbuild info is missing, but packages exist... ?? will retry again (%s)\n" % (distro, package_name)) num_started_builds -= 1 else: #scheduler_log.log("%s:\tSkipping existing build (%s, %s)\n" % (distro, package_name, version) ) pass time_duration = utils.time_duration_asc(start_time, utils.get_time()) * 60 if num_started_builds == 0 and time_duration < config.sd_wakeup_interval: #scheduler_log.log("%s:\tSleeping %d seconds...\n" % (distro, config.sd_wakeup_interval - time_duration) ) time.sleep(config.sd_wakeup_interval - time_duration) # Exiting because we've been removed from the configuration scheduler_log.log("%s:\tExiting upon user request...\n" % distro)