def buildIndex(self): try: self.locks["build"].lock(timeout=0) except: return 1 logger.raw() logger.info(_("Generating PiSi Index...")) current = os.getcwd() os.chdir(config.binaryPath) os.system("/usr/bin/pisi index %s %s --skip-signing --skip-sources" % (config.localPspecRepo, config.binaryPath)) logger.info(_("PiSi Index generated...")) #FIXME: will be enabled after some internal tests #os.system("rsync -avze ssh --delete . pisi.pardus.org.tr:/var/www/paketler.uludag.org.tr/htdocs/pardus-1.1/") # Check packages containing binaries and libraries broken by any package update os.system("/usr/bin/revdep-rebuild --force") # FIXME: if there is any broken package, mail /root/.revdep-rebuild.4_names file os.chdir(current) # FIXME: handle indexing errors self.locks["build"].unlock() return 0
def show_logo(): logger.raw('', terminal=True) # print nice header thing :) if config.get('general.display_header', True): logoheader.header("") if os.get_terminal_size().columns >= 120: if QUOTE[1]: # If there is an author to show for the quote logger.info("\u001b[33m\033[F" + bordered(QUOTE[0] + '\n -' + QUOTE[1]), terminal=True) else: logger.info("\u001b[33m\033[F" + bordered(QUOTE[0]), terminal=True) else: if QUOTE[1]: logger.info("\u001b[33m\033[F" + QUOTE[0] + '\n -' + QUOTE[1], terminal=True) else: logger.info("\u001b[33m\033[F" + QUOTE[0], terminal=True)
def buildPackages(self): # Return values are interpreted by the client # 0: Successful # 1: Buildfarm is busy # 2: Empty work queue # 3: Finished with errors try: self.locks["build"].lock(timeout=0) except: return 1 sys.excepthook = self.__handle_exception__ self.__checkQueues__() queue = shallowCopy(self.getWorkQueue()) if len(queue) == 0: logger.info(_("Work queue is empty...")) self.locks["build"].unlock() return 2 logger.raw(_("QUEUE")) logger.info(_("Work Queue: %s") % (queue)) sortedQueue = queue[:] sortedQueue.sort() # mailer.info(_("I'm starting to compile following packages:\n\n%s") % "\n".join(sortedQueue)) logger.raw() for pspec in queue: # Gets the packagename, creates a log file packagename = os.path.basename(os.path.dirname(pspec)) build_output = open( os.path.join(config.outputDir, "%s.log" % packagename), "w") logger.info( _("Compiling source %s (%d of %d)") % (packagename, int(queue.index(pspec) + 1), len(queue))) logger.raw() # Initialise PiSi API pisi = pisiinterface.PisiApi(config.workDir) pisi.init(stdout=build_output, stderr=build_output) try: try: # Builds pspec and returns a tuple containing 2 lists # that contains new and old package names # e.g. newBinaryPackages=['package-1-2-1.pisi'] (newBinaryPackages, oldBinaryPackages) = pisi.build(pspec) # Delta package generation deltaPackages = [] if oldBinaryPackages: deltaPackages = pisi.delta(oldBinaryPackages, newBinaryPackages) packagesToInstall = deltaPackages[:] else: packagesToInstall = list(newBinaryPackages) logger.info("packagesToInstall[]: %s" % packagesToInstall) except Exception, e: # Build Error # Transfers the pspec to the wait queue and logs the error self.transferToWaitQueue(pspec) errmsg = _("Error occured for '%s' in BUILD process:\n %s" ) % (pspec, e) logger.error(errmsg) # mailer.error(errmsg, pspec) else: try: for p in packagesToInstall: # For every new binary package generated, this snippet # installs them on the system. # TODO: Install delta packages here logger.info( _("Installing: %s" % os.path.join(config.workDir, p))) pisi.install(os.path.join(config.workDir, p)) except Exception, e: self.transferToWaitQueue(pspec) errmsg = _( "Error occured for '%s' in INSTALL process: %s" ) % (os.path.join(config.workDir, p), e) logger.error(errmsg) # mailer.error(errmsg, pspec) newBinaryPackages.remove(p) self.__removeBinaryPackageFromWorkDir__(p) else:
"Error occured for '%s' in INSTALL process: %s" ) % (os.path.join(config.workDir, p), e) logger.error(errmsg) # mailer.error(errmsg, pspec) newBinaryPackages.remove(p) self.__removeBinaryPackageFromWorkDir__(p) else: self.removeFromWorkQueue(pspec) # Move the packages self.__movePackages__(newBinaryPackages, oldBinaryPackages, deltaPackages) finally: pisi.finalize() logger.raw(_("QUEUE")) logger.info(_("Wait Queue: %s") % (self.getWaitQueue())) logger.info(_("Work Queue: %s") % (self.getWorkQueue())) if self.getWaitQueue(): # mailer.info(_("Queue finished with problems and those packages couldn't be compiled:\n\n%s\n") % "\n".join(self.getWaitQueue())) self.locks["build"].unlock() return 3 else: self.locks["build"].unlock() # mailer.info(_("Queue finished without a problem!...")) return 0 def buildIndex(self): try:
def buildPackages(): qmgr = qmanager.QueueManager() qmgr.transferAllPackagesToWorkQueue() #move all packages to workQueue for compilation queue = copy.copy(qmgr.workQueue) # We'll set home variable just after buidl process to make revdep work right homeDir = os.environ['HOME'] packageList = [] deltaPackageList = [] isopackages = {} if len(queue) == 0: logger.info("Work Queue is empty...") sys.exit(1) # FIXME: Use fcntl.flock f = open("/var/run/buildfarm", 'w') f.close() # Unpickle and load ISO package list here if config.generateDelta is true. if config.generateDelta: try: isopackages = cPickle.Unpickler(open("data/packages.db", "rb")).load() except: logger.error("You have to create packages.db in data/ for delta generation.") os.unlink("/var/run/buildfarm") sys.exit(1) # Compiling current workQueue logger.raw("QUEUE") logger.info("*** All packages to be compiled : %s" % qmgr.workQueue) mailer.info("*** I'm starting to compile following packages (in the order below):\n\n%s" % "\n".join(queue)) logger.raw() for pspec in queue: packagename = getPackageNameFromPath(pspec) build_output = open(os.path.join(config.outputDir, "%s.txt" % packagename), "w") logger.raw() logger.info( "*** Compiling source %s (%d of %d)" % ( packagename, int(queue.index(pspec) + 1), len(queue) ) ) # This is here because farm captures the build output pisi = pisiinterface.PisiApi(stdout = build_output, stderr = build_output, outputDir = config.workDir) try: try: # Save current *.pisi file list in /var/pisi for further cleanup pisiList = glob.glob1(config.workDir, "*.pisi") # Build source package # Returned values can also contain -dbginfo- packages. (newBinaryPackages, oldBinaryPackages) = pisi.build(pspec) # Reduce to filenames newBinaryPackages = map(lambda x: os.path.basename(x), newBinaryPackages) oldBinaryPackages = map(lambda x: os.path.basename(x), oldBinaryPackages) # Filter debug packages because we don't need to build delta packages # for debug packages newDebugPackages = [p for p in newBinaryPackages if isdebug(p)] oldDebugPackages = [p for p in oldBinaryPackages if isdebug(p)] newBinaryPackages = list(set(newBinaryPackages).difference(newDebugPackages)) oldBinaryPackages = list(set(oldBinaryPackages).difference(oldDebugPackages)) newBinaryPackages.sort() oldBinaryPackages.sort() # Delta package generation using delta interface # If the return value is None, delta generation is disabled ret = pisi.delta(isopackages, oldBinaryPackages, newBinaryPackages) if ret: (deltasToInstall, deltaPackages, blacklistedPackages) = ret else: (deltasToInstall, deltaPackages, blacklistedPackages) = ([], [], []) # Reduce to filenames deltasToInstall = map(lambda x: os.path.basename(x), deltasToInstall) deltaPackages = map(lambda x: os.path.basename(x), deltaPackages) # If there exists incremental delta packages, install them. if deltasToInstall: packagesToInstall = deltasToInstall[:] if len(newBinaryPackages) > len(oldBinaryPackages): logger.info("*** There are new binaries, the package is probably splitted.") # There exists some first builds, install them because they don't have delta. packagesToInstall.extend(newBinaryPackages[len(oldBinaryPackages):]) logger.debug("(splitted package), packagesToInstall: %s" % packagesToInstall) else: # No delta, install full packages packagesToInstall = newBinaryPackages[:] if blacklistedPackages: # Merge the blacklisted packages and unify the list logger.debug("blacklistedPackages: %s" % blacklistedPackages) packagesToInstall.extend(blacklistedPackages) packagesToInstall = list(set(packagesToInstall)) logger.debug("packagesToInstall after merge: %s" % packagesToInstall) # Merge the package lists deltaPackages = deltaPackages + deltasToInstall logger.debug("All delta packages: %s" % deltaPackages) except Exception, e: # Transfer source package to wait queue in case of a build error qmgr.transferToWaitQueue(pspec) # If somehow some binary packages could have been build, they'll stay in /var/pisi # We should remove them here. for p in set(glob.glob1(config.workDir, "*.pisi")).difference(pisiList): logger.info("*** Removing stale package '%s' from '%s'" % (p, config.workDir)) removeBinaryPackageFromWorkDir(p) errmsg = "Error occured for '%s' in BUILD process:\n %s" % (pspec, e) logger.error(errmsg) mailer.error(errmsg, pspec) else: try: # If there exists multiple packages, reorder them in order to # correctly install interdependent packages. if len(packagesToInstall) > 1: # packagesToInstall doesn't contain full paths logger.info("*** Reordering packages to satisfy inner runtime dependencies...") packagesToInstall = pisi.getInstallOrder(packagesToInstall) logger.info("*** Installation order is: %s" % packagesToInstall) for p in packagesToInstall: # Install package logger.info("*** Installing: %s" % os.path.join(config.workDir, p)) pisi.install(os.path.join(config.workDir, p)) except Exception, e: # Transfer source package to wait queue in case of an install error qmgr.transferToWaitQueue(pspec) # FIXME: The packages before packagesToInstall[p] are already installed and therefore need to be # uninstalled because p can't be installed. if isdelta(p) and "no attribute 'old_files'" in str(e): logger.info("*** %s was probably not installed on the system and the delta installation failed." % getName(p)) errmsg = "Error occured for '%s' in INSTALL process: %s" % (os.path.join(config.workDir, p), e) logger.error(errmsg) mailer.error(errmsg, pspec) # The package should be removed from the related lists and WorkDir in case of an # installation problem for pa in deltaPackages+newBinaryPackages+newDebugPackages: if pa in deltasToInstall: deltasToInstall.remove(pa) elif pa in newBinaryPackages: newBinaryPackages.remove(pa) logger.info("*** (Cleanup) Removing %s from %s" % (pa, config.workDir)) removeBinaryPackageFromWorkDir(pa) else:
deltasToInstall.remove(pa) elif pa in newBinaryPackages: newBinaryPackages.remove(pa) logger.info("*** (Cleanup) Removing %s from %s" % (pa, config.workDir)) removeBinaryPackageFromWorkDir(pa) else: qmgr.removeFromWorkQueue(pspec) movePackages(newBinaryPackages, oldBinaryPackages, deltaPackages, newDebugPackages) packageList += (map(lambda x: os.path.basename(x), newBinaryPackages)) deltaPackageList += (map(lambda x: os.path.basename(x), deltaPackages)) finally: pisi.close() os.environ['HOME'] = homeDir logger.raw("QUEUE") logger.info("*** Wait Queue: %s" % (qmgr.waitQueue)) if qmgr.waitQueue: mailer.info("Queue finished with problems and those packages couldn't be compiled:\n\n%s\n\n\nNew binary packages are;\n\n%s\n\nnow in repository" % ("\n".join(qmgr.waitQueue), "\n".join(packageList))) else: mailer.info("Queue finished without a problem!...\n\n\nNew binary packages are:\n\n%s\n\n" "New delta packages are:\n\n%s\n\nnow in repository..." % ("\n".join(packageList), "\n".join(deltaPackageList))) logger.raw() logger.raw() # Save current path current = os.getcwd() # Set index paths paths = [config.binaryPath, config.testPath] if config.debugSupport:
def daemon(): ''' Starts the Onionr communication daemon ''' if not hastor.has_tor(): logger.error("Tor is not present in system path or Onionr directory", terminal=True) cleanup.delete_run_files() sys.exit(1) # remove runcheck if it exists if os.path.isfile(filepaths.run_check_file): logger.debug( 'Runcheck file found on daemon start, deleting in advance.') os.remove(filepaths.run_check_file) # Create shared objects shared_state = toomanyobjs.TooMany() Thread(target=shared_state.get(apiservers.ClientAPI).start, daemon=True, name='client HTTP API').start() Thread(target=shared_state.get(apiservers.PublicAPI).start, daemon=True, name='public HTTP API').start() # Init run time tester (ensures Onionr is running right, for testing purposes) shared_state.get(runtests.OnionrRunTestManager) shared_state.get(serializeddata.SerializedData) shared_state.share_object() # share the parent object to the threads apiHost = '' while apiHost == '': try: with open(filepaths.public_API_host_file, 'r') as hostFile: apiHost = hostFile.read() except FileNotFoundError: pass time.sleep(0.5) logger.raw('', terminal=True) # print nice header thing :) if config.get('general.display_header', True): logoheader.header() version.version(verbosity=5, function=logger.info) logger.debug('Python version %s' % platform.python_version()) if onionrvalues.DEVELOPMENT_MODE: logger.warn('Development mode enabled', timestamp=False, terminal=True) net = NetController(config.get('client.public.port', 59497), apiServerIP=apiHost) shared_state.add(net) logger.info('Tor is starting...', terminal=True) if not net.startTor(): localcommand.local_command('shutdown') cleanup.delete_run_files() sys.exit(1) if len(net.myID) > 0 and config.get('general.security_level', 1) == 0: logger.debug('Started .onion service: %s' % (logger.colors.underline + net.myID)) else: logger.debug('.onion service disabled') logger.info( 'Using public key: %s' % (logger.colors.underline + getourkeypair.get_keypair()[0][:52])) try: time.sleep(1) except KeyboardInterrupt: pass events.event('init', threaded=False) events.event('daemon_start') communicator.startCommunicator(shared_state) localcommand.local_command('shutdown') net.killTor() try: time.sleep( 5 ) # Time to allow threads to finish, if not any "daemon" threads will be slaughtered http://docs.python.org/library/threading.html#threading.Thread.daemon except KeyboardInterrupt: pass cleanup.delete_run_files()
def buildPackages(self): # Return values are interpreted by the client # 0: Successful # 1: Buildfarm is busy # 2: Empty work queue # 3: Finished with errors try: self.locks["build"].lock(timeout=0) except: return 1 sys.excepthook = self.__handle_exception__ self.__checkQueues__() queue = shallowCopy(self.getWorkQueue()) if len(queue) == 0: logger.info(_("Work queue is empty...")) self.locks["build"].unlock() return 2 logger.raw(_("QUEUE")) logger.info(_("Work Queue: %s") % (queue)) sortedQueue = queue[:] sortedQueue.sort() # mailer.info(_("I'm starting to compile following packages:\n\n%s") % "\n".join(sortedQueue)) logger.raw() for pspec in queue: # Gets the packagename, creates a log file packagename = os.path.basename(os.path.dirname(pspec)) build_output = open(os.path.join(config.outputDir, "%s.log" % packagename), "w") logger.info( _("Compiling source %s (%d of %d)") % ( packagename, int(queue.index(pspec) + 1), len(queue) ) ) logger.raw() # Initialise PiSi API pisi = pisiinterface.PisiApi(config.workDir) pisi.init(stdout = build_output, stderr = build_output) try: try: # Builds pspec and returns a tuple containing 2 lists # that contains new and old package names # e.g. newBinaryPackages=['package-1-2-1.pisi'] (newBinaryPackages, oldBinaryPackages) = pisi.build(pspec) # Delta package generation deltaPackages = [] if oldBinaryPackages: deltaPackages = pisi.delta(oldBinaryPackages, newBinaryPackages) packagesToInstall = deltaPackages[:] else: packagesToInstall = list(newBinaryPackages) logger.info("packagesToInstall[]: %s" % packagesToInstall) except Exception, e: # Build Error # Transfers the pspec to the wait queue and logs the error self.transferToWaitQueue(pspec) errmsg = _("Error occured for '%s' in BUILD process:\n %s") % (pspec, e) logger.error(errmsg) # mailer.error(errmsg, pspec) else: try: for p in packagesToInstall: # For every new binary package generated, this snippet # installs them on the system. # TODO: Install delta packages here logger.info(_("Installing: %s" % os.path.join(config.workDir, p))) pisi.install(os.path.join(config.workDir, p)) except Exception, e: self.transferToWaitQueue(pspec) errmsg = _("Error occured for '%s' in INSTALL process: %s") % (os.path.join(config.workDir, p), e) logger.error(errmsg) # mailer.error(errmsg, pspec) newBinaryPackages.remove(p) self.__removeBinaryPackageFromWorkDir__(p) else:
except Exception, e: self.transferToWaitQueue(pspec) errmsg = _("Error occured for '%s' in INSTALL process: %s") % (os.path.join(config.workDir, p), e) logger.error(errmsg) # mailer.error(errmsg, pspec) newBinaryPackages.remove(p) self.__removeBinaryPackageFromWorkDir__(p) else: self.removeFromWorkQueue(pspec) # Move the packages self.__movePackages__(newBinaryPackages, oldBinaryPackages, deltaPackages) finally: pisi.finalize() logger.raw(_("QUEUE")) logger.info(_("Wait Queue: %s") % (self.getWaitQueue())) logger.info(_("Work Queue: %s") % (self.getWorkQueue())) if self.getWaitQueue(): # mailer.info(_("Queue finished with problems and those packages couldn't be compiled:\n\n%s\n") % "\n".join(self.getWaitQueue())) self.locks["build"].unlock() return 3 else: self.locks["build"].unlock() # mailer.info(_("Queue finished without a problem!...")) return 0 def buildIndex(self): try: