def _install_app_from_ostree_ref(installation, source_repo_path, ostree_ref, remote_name):
    def _progress_callback(status, progress, estimating, user_data):
        # We rely on the progress status message from flatpak
        # here, no need to worry about the other parameters.
        print("Progress: {}".format(status), end='\r')

    # Install the app from the temporary repository.
    target_ref_parts = ostree_ref.split('/')
    if len(target_ref_parts) < 4:
        die("Could not determine branch of {} app".format(app_id))

    app_id = target_ref_parts[1]
    app_arch = target_ref_parts[2]
    app_branch = target_ref_parts[3]
    logging.debug("App ID: {} / Arch: {} / Branch: {}".format(app_id, app_arch, app_branch))

    try:
        print("Restoring {} app from {}...".format(app_id, source_repo_path))
        installation.install(remote_name,
                             Flatpak.RefKind.APP,
                             app_id, app_arch, app_branch,
                             _progress_callback,
                             None,
                             None)
        print("\nFinished restoring {}".format(app_id))
    except GLib.Error as e:
        die("Could not restore {} app': {}".format(app_id, e.message))
Exemplo n.º 2
0
    def __init__( self, **args ):
        self.subinfo = subinfo()
        CMakePackageBase.__init__(self)
        # jom reports missing moc_translator.xxx
        self.subinfo.options.make.supportsMultijob = False
        # add support for other location based on pythonpath
        localPythonPath = os.path.join(self.rootdir, 'emerge', 'python')
        haveLocalPython = os.path.exists(localPythonPath)

        if compiler.isMSVC2008():
            specName = "win32-msvc2008"
        elif compiler.isMSVC2010():
            specName = "win32-msvc2010"
        elif compiler.isMinGW():
            specName = "win32-g++"
        else:
            utils.die("compiler %s not supported for PyQt4" % compiler.COMPILER)
        if haveLocalPython:
           specDir = self.mergeDestinationDir()
        else:
           specDir = self.rootdir

        os.putenv("QMAKESPEC", os.path.join(specDir, "mkspecs", specName))

        if haveLocalPython:
            self.subinfo.options.merge.destinationPath = "emerge/python"
        self.subinfo.options.configure.defines = " --confirm-license --verbose"

        if self.buildType() == "Debug":
            self.subinfo.options.configure.defines += " -u"
Exemplo n.º 3
0
    def replicate_data(self):

        # distros
        self.logger.info("Copying Distros")
        local_distros = self.api.distros()
        try:
            remote_distros = self.remote.get_distros()
        except:
            utils.die(self.logger, "Failed to contact remote server")

        if self.sync_all or self.sync_trees:
            self.logger.info("Rsyncing Distribution Trees")
            self.rsync_it(os.path.join(self.settings.webdir, "ks_mirror"), self.settings.webdir)

        for distro in remote_distros:
            self.logger.info("Importing remote distro %s." % distro["name"])
            if os.path.exists(distro["kernel"]):
                remote_mtime = distro["mtime"]
                if self.should_add_or_replace(distro, "distros"):
                    new_distro = self.api.new_distro()
                    new_distro.from_datastruct(distro)
                    try:
                        self.api.add_distro(new_distro)
                        self.logger.info("Copied distro %s." % distro["name"])
                    except Exception, e:
                        utils.log_exc(self.logger)
                        self.logger.error("Failed to copy distro %s" % distro["name"])
                else:
                    # FIXME: force logic
                    self.logger.info("Not copying distro %s, sufficiently new mtime" % distro["name"])
            else:
                self.logger.error("Failed to copy distro %s, content not here yet." % distro["name"])
Exemplo n.º 4
0
def GetSmoothingFactor(points, hRefmethod, modifier, proportionAmount):
    hRef = 0
    if hRefmethod.lower() == "worton":
        hRef = HrefWorton(points)
    elif hRefmethod.lower() == "tufto":
        hRef = HrefTufto(points)
    elif hRefmethod.lower() == "silverman":
        hRef = HrefSilverman(points)
    elif hRefmethod.lower() == "gaussian":
        hRef = HrefGaussianApproximation(points)
    elif not hrefmethod or hrefmethod == "#":
        hRef = HrefWorton(points)

    if hRef == 0:
        utils.die("No valid hRef method was provided. Quitting.")

    if modifier.lower() == "proportion":
        h = proportionAmount * hRef
    elif modifier.lower() == "lscv":
        h = Minimize(LSCV, hRef, points)
    elif modifier.lower() == "bcv2":
        h = Minimize(BCV2, hRef, points)
    else:
        h = hRef

    utils.info("hRef (" + hRefmethod + ") = " + str(hRef))    
    utils.info("Using h = " +  str(h))
    return h
    def install( self ):
        """Using *make install"""

        with utils.LockFile(utils.LockFileName("MSYS")):
            if self.buildInSource:
                self.enterSourceDir()
            else:
                self.enterBuildDir()

            command = self.makeProgram
            args = "install"

            if self.subinfo.options.install.useDestDir == True:
                args += " DESTDIR=%s prefix=" % self.shell.toNativePath( self.installDir() )

            if self.subinfo.options.make.ignoreErrors:
                args += " -i"

            if self.subinfo.options.make.makeOptions:
                args += " %s" % self.subinfo.options.make.makeOptions
            if self.buildInSource:
                self.shell.execute(self.sourceDir(), command, args) or utils.die( "while installing. cmd: %s %s" % (command, args) )
            else:
                self.shell.execute(self.buildDir(), command, args) or utils.die( "while installing. cmd: %s %s" % (command, args) )
            if os.path.exists(os.path.join(self.imageDir(),"lib")):
                return self.shell.execute(os.path.join(self.imageDir(),"lib"), "rm", " -Rf *.la")
            else:
                return True
Exemplo n.º 6
0
    def kdeConfigureInternal( self, buildType, kdeCustomDefines ):
        """Using cmake"""
        builddir = "%s" % ( self.COMPILER )

        if( not buildType == None ):
            buildtype = "-DCMAKE_BUILD_TYPE=%s" % buildType
            builddir = "%s-%s" % ( builddir, buildType )

        if( not self.buildNameExt == None ):
            builddir = "%s-%s" % ( builddir, self.buildNameExt )

        os.chdir( self.workdir )
        if ( not os.path.exists( builddir) ):
            os.mkdir( builddir )

        if not self.noClean:
            utils.cleanDirectory( builddir )
        os.chdir( builddir )

        command = r"""cmake -G "%s" %s %s %s""" % \
              ( self.cmakeMakefileGenerator, \
                self.kdeDefaultDefines(), \
                kdeCustomDefines, \
                buildtype )

        if utils.verbose() > 0:
            print "configuration command: %s" % command
        if not utils.system(command):
            utils.die( "while CMake'ing. cmd: %s" % command )
        return True
Exemplo n.º 7
0
    def __init__(self, *args, **kwargs):
        self.project = kwargs['project']
        if not self.project:
            die('consistency error: each feature must have a related project')

        self.args = []

        # we can keep track of feature args in two ways:
        # - project args parsed from project description file
        #                               -> self.args
        # - fargs argument (used in creation process, retrieved from
        #       comma-separated args)   -> self.cargs
        #
        #   XXX: self.cargs - used by create(),
        #        self.args  - used by everything else

        if self.fid in self.project.args:
            self.args = self.project.args[self.fid]

        if 'fargs' in kwargs and kwargs['fargs'] != None: #and len(kwargs['fargs']) > 0:
            self.cargs = kwargs['fargs']
            self.cargs

        self.project.tree.update(self.tree)

        self.configs = {}

        self.nodeploy = kwargs.get('nodeploy', False)
        if self.nodeploy:
            self.deploy = lambda *args: None
            self.unlink = lambda *args: None
Exemplo n.º 8
0
    def rsync_sync(self, repo):

        """
        Handle copying of rsync:// and rsync-over-ssh repos.
        """

        repo_mirror = repo.mirror

        if not repo.mirror_locally:
            utils.die(self.logger,"rsync:// urls must be mirrored locally, yum cannot access them directly")

        if repo.rpm_list != "" and repo.rpm_list != []:
            self.logger.warning("--rpm-list is not supported for rsync'd repositories")

        # FIXME: don't hardcode
        dest_path = os.path.join(self.settings.webdir+"/repo_mirror", repo.name)

        spacer = ""
        if not repo.mirror.startswith("rsync://") and not repo.mirror.startswith("/"):
            spacer = "-e ssh"
        if not repo.mirror.endswith("/"):
            repo.mirror = "%s/" % repo.mirror

        # FIXME: wrapper for subprocess that logs to logger
        cmd = "rsync -rltDv --copy-unsafe-links --delete-after %s --delete --exclude-from=/etc/cobbler/rsync.exclude %s %s" % (spacer, repo.mirror, dest_path)
        rc = utils.subprocess_call(self.logger, cmd)

        if rc !=0:
            utils.die(self.logger,"cobbler reposync failed")
        os.path.walk(dest_path, self.createrepo_walker, repo)
        self.create_local_file(dest_path, repo)
Exemplo n.º 9
0
    def run(self, path, name, network_root=None, autoinstall_file=None, arch=None, breed=None, os_version=None):
        """
        path: the directory we are scanning for files
        name: the base name of the distro
        network_root: the remote path (nfs/http/ftp) for the distro files
        autoinstall_file: user-specified response file, which will override the default
        arch: user-specified architecture
        breed: user-specified breed
        os_version: user-specified OS version
        """
        self.name = name
        self.network_root = network_root
        self.autoinstall_file = autoinstall_file
        self.arch = arch
        self.breed = breed
        self.os_version = os_version

        self.path = path
        self.rootdir = path
        self.pkgdir = path

        # some fixups for the XMLRPC interface, which does not use "None"
        if self.arch == "":
            self.arch = None

        if self.name == "":
            self.name = None

        if self.autoinstall_file == "":
            self.autoinstall_file = None

        if self.os_version == "":
            self.os_version = None

        if self.network_root == "":
            self.network_root = None

        if self.os_version and not self.breed:
            utils.die(self.logger, "OS version can only be specified when a specific breed is selected")

        self.signature = self.scan_signatures()
        if not self.signature:
            error_msg = "No signature matched in %s" % path
            self.logger.error(error_msg)
            raise CX(error_msg)

        # now walk the filesystem looking for distributions that match certain patterns
        self.logger.info("Adding distros from path %s:" % self.path)
        distros_added = []
        os.path.walk(self.path, self.distro_adder, distros_added)

        if len(distros_added) == 0:
            self.logger.warning("No distros imported, bailing out")
            return

        # find out if we can auto-create any repository records from the install tree
        if self.network_root is None:
            self.logger.info("associating repos")
            # FIXME: this automagic is not possible (yet) without mirroring
            self.repo_finder(distros_added)
Exemplo n.º 10
0
    def run(self, report_what = None, report_name = None, report_type = None, report_fields = None, report_noheaders = None):
        """
        Get remote profiles and distros and sync them locally
        """
               
        """
        1. Handles original report output
        2. Handles all fields of report outputs as table given a format
        3. Handles specific fields of report outputs as table given a format
        """        
        

        if report_type == 'text' and report_fields == 'all':

            for collection_name in ["distro","profile","system","repo","network","image","mgmtclass","package","file"]:
                if report_what=="all" or report_what==collection_name or report_what=="%ss"%collection_name or report_what=="%ses"%collection_name:
                    if report_name:
                        self.reporting_list_names2(self.api.get_items(collection_name), report_name)
                    else:
                        self.reporting_print_sorted(self.api.get_items(collection_name))

        elif report_type == 'text' and report_fields != 'all':
            utils.die(self.logger,"The 'text' type can only be used with field set to 'all'")
 
        elif report_type != 'text' and report_fields == 'all':

            for collection_name in ["distro","profile","system","repo","network","image","mgmtclass","package","file"]:
                if report_what=="all" or report_what==collection_name or report_what=="%ss"%collection_name or report_what=="%ses"%collection_name:
                    self.reporting_print_all_fields(self.api.get_items(collection_name), report_name, report_type, report_noheaders)
        
        else:

            for collection_name in ["distro","profile","system","repo","network","image","mgmtclass","package","file"]:
                if report_what=="all" or report_what==collection_name or report_what=="%ss"%collection_name or report_what=="%ses"%collection_name:
                    self.reporting_print_x_fields(self.api.get_items(collection_name), report_name, report_type, report_fields, report_noheaders)
Exemplo n.º 11
0
    def filter_systems_or_profiles(self, selected_items, list_type):
        """
        Return a list of valid profile or system objects selected from all profiles
        or systems by name, or everything if selected_items is empty
        """
        if list_type == 'profile':
            all_objs = [profile for profile in self.api.profiles()]
        elif list_type == 'system':
            all_objs = [system for system in self.api.systems()]
        else:
            utils.die(self.logger, "Invalid list_type argument: " + list_type)

        all_objs.sort(self.sort_name)

        # no profiles/systems selection is made, let's process everything
        if not selected_items:
            return all_objs

        which_objs = []
        selected_list = utils.input_string_or_list(selected_items)
        for obj in all_objs:
            if obj.name in selected_list:
                which_objs.append(obj)

        if not which_objs:
            utils.die(self.logger, "No valid systems or profiles were specified.")

        return which_objs
Exemplo n.º 12
0
    def run(self):
        """
        Returns True if there are no errors, otherwise False.
        """

        if not os.path.exists("/usr/bin/ksvalidator"):
            utils.die(self.logger, "ksvalidator not installed, please install pykickstart")

        failed = False
        for x in self.config.profiles():
            (result, errors) = self.checkfile(x, True)
            if not result:
                failed = True
            if len(errors) > 0:
                self.log_errors(errors)
        for x in self.config.systems():
            (result, errors) = self.checkfile(x, False)
            if not result:
                failed = True
            if len(errors) > 0:
                self.log_errors(errors)

        if failed:
            self.logger.warning("*** potential errors detected in kickstarts ***")
        else:
            self.logger.info("*** all kickstarts seem to be ok ***")

        return not(failed)
Exemplo n.º 13
0
    def configure( self, configureDefines="" ):
        """inplements configure step for Qt projects"""

        self.enterBuildDir()

        # here follows some automatic configure tool detection
        # 1. search for configure.exe in the order
        #      a. provided by method call
        #      b. in source directory
        # 2. if qmake is available search for a pro-file named as the package
        # 3. if a pro-file is available through configureOptions, run it with qmake
        # 4. otherwise run qmake without any pro file given
        configTool = os.path.join(self.configureSourceDir(), "configure.exe")
        qmakeTool = os.path.join(self.mergeDestinationDir(), "bin", "qmake.exe")
        topLevelProFilesFound = 0
        topLevelProFile = ""
        for fileListed in os.listdir(self.configureSourceDir()):
            if fileListed.endswith(".pro"):
                if topLevelProFilesFound == 0:
                    topLevelProFile = os.path.join(self.configureSourceDir(), fileListed)
                topLevelProFilesFound += 1
        if self.subinfo.options.configure.tool != None and self.subinfo.options.configure.tool != False:
            command = "%s %s" % (self.subinfo.options.configure.tool, self.configureOptions(configureDefines))
        elif os.path.exists(configTool):
            command = "%s %s" % (configTool, self.configureOptions(configureDefines))
        elif os.path.exists(qmakeTool) and os.path.exists(topLevelProFile) and topLevelProFilesFound == 1:
            command = "qmake -makefile %s %s" % (topLevelProFile, self.configureOptions(configureDefines))
        elif os.path.exists(qmakeTool):
            command = "qmake %s" % self.configureOptions(configureDefines)
        else:
            utils.die("could not find configure.exe or top level pro-file, please take a look into the source and setup the config process.")

        return self.system( command, "configure" )
Exemplo n.º 14
0
    def msysCompile( self, bOutOfSource = True ):
        """run configure and make for Autotools based stuff"""
        config = os.path.join( self.workdir, self.instsrcdir, "configure" )
        build  = os.path.join( self.workdir )
        if( bOutOfSource ):
            # otherwise $srcdir is very long and a conftest may fail (like it's the
            # case in libgmp-4.2.4)
            config = os.path.join( "..", self.instsrcdir, "configure" )
            build  = os.path.join( build, self.instsrcdir + "-build" )
            utils.cleanDirectory( build )
        else:
            build  = os.path.join( build, self.instsrcdir )

        sh = os.path.join( self.msysdir, "bin", "sh.exe" )

        # todo use msysexecute
        cmd = "%s --login -c \"cd %s && %s %s && make -j2" % \
              ( sh, self.__toMSysPath( build ), self.__toMSysPath( config ), \
                self.msysConfigureFlags() )
        if utils.verbose() > 1:
            cmd += " VERBOSE=1"
        cmd += "\""
        if utils.verbose() > 0:
            print "msys compile: %s" % cmd
        if not utils.system(cmd):
            utils.die( "msys compile failed. cmd: %s" % cmd )
        return True
Exemplo n.º 15
0
def run_remote(cmd):
    r = remote.Remote()
    handlers = {
            'set_url': r.set_url,
            'run': r.run,
            'script': r.run_script,
            'sftp': r.sftp,
            'pull': r.pull,
            'push': r.push,
            'rsync': r.rsync,
            'git': r.git,
            'hg': r.hg,
            'upload': r.upload,
            'confirm': local.confirm,
            'call': lambda x: run_command(x, True),
            'echo': lambda x: sys.stdout.write('{0}\n'.format(x)),
            'echo_error': lambda x: sys.stderr.write('{0}\n'.format(x)),
            'set_verbose': r.set_verbose,
            'key': r.key
            }
    for args in cmd:
        c = args[0]
        if c not in handlers:
            # Ignore unsupported commands
            return
        if len(args) > 1:
            args = tuple(args[1:])
            ret = handlers[c](*args)
        else:
            ret = handlers[c]()
        if isinstance(ret, int):
            if ret != 0:
                utils.die('Abort.')
Exemplo n.º 16
0
    def make( self ):
        """implements the make step for cmake projects"""

        self.enterBuildDir()
        utils.prependPath(self.rootdir, self.envPath)

        if self.subinfo.options.cmake.openIDE:
            if compiler.isMSVC2008():
                command = "start %s" % self.__slnFileName()
            elif compiler.isMSVC2010():
                command = "start vcexpress %s" % self.__slnFileName()
        elif self.subinfo.options.cmake.useIDE:
            if compiler.isMSVC2008():
                if self.isTargetBuild():
                    command = "vcbuild /M1 %s \"%s|Windows Mobile 6 Professional SDK (ARMV4I)\"" % (self.__slnFileName(), self.buildType())
                else:
                    command = "vcbuild /M1 %s \"%s|WIN32\"" % (self.__slnFileName(), self.buildType())
            elif compiler.isMSVC2010():
                utils.die("has to be implemented");
        elif self.subinfo.options.cmake.useCTest:
            # first make clean
            self.system( self.makeProgramm + " clean", "make clean" )
            command = "ctest -M " + "Nightly" + " -T Start -T Update -T Configure -T Build -T Submit"
        else:
            command = ' '.join([self.makeProgramm, self.makeOptions()])

        if self.isTargetBuild():
            self.setupTargetToolchain()

        return self.system( command, "make" )
Exemplo n.º 17
0
    def run(self, cobbler_master=None, distro_patterns=None, profile_patterns=None, system_patterns=None, repo_patterns=None, image_patterns=None, 
            mgmtclass_patterns=None, package_patterns=None, file_patterns=None, prune=False, omit_data=False, sync_all=False, use_ssl=False):
        """
        Get remote profiles and distros and sync them locally
        """

        self.distro_patterns     = distro_patterns.split()
        self.profile_patterns    = profile_patterns.split()
        self.system_patterns     = system_patterns.split()
        self.repo_patterns       = repo_patterns.split()
        self.image_patterns      = image_patterns.split()
        self.mgmtclass_patterns  = mgmtclass_patterns.split()
        self.package_patterns    = package_patterns.split()
        self.file_patterns       = file_patterns.split()
        self.omit_data           = omit_data
        self.prune               = prune
        self.sync_all            = sync_all
        self.use_ssl             = use_ssl

        if self.use_ssl:
            protocol = 'https'
        else:
            protocol = 'http'

        if cobbler_master is not None:
            self.master = cobbler_master
        elif len(self.settings.cobbler_master) > 0:
            self.master = self.settings.cobbler_master
        else:
            utils.die('No cobbler master specified, try --master.')

        self.uri = '%s://%s/cobbler_api' % (protocol,self.master)

        self.logger.info("cobbler_master      = %s" % cobbler_master)
        self.logger.info("distro_patterns     = %s" % self.distro_patterns)
        self.logger.info("profile_patterns    = %s" % self.profile_patterns)
        self.logger.info("system_patterns     = %s" % self.system_patterns)
        self.logger.info("repo_patterns       = %s" % self.repo_patterns)
        self.logger.info("image_patterns      = %s" % self.image_patterns)
        self.logger.info("mgmtclass_patterns  = %s" % self.mgmtclass_patterns)
        self.logger.info("package_patterns    = %s" % self.package_patterns)
        self.logger.info("file_patterns       = %s" % self.file_patterns)
        self.logger.info("omit_data           = %s" % self.omit_data)
        self.logger.info("sync_all            = %s" % self.sync_all)
        self.logger.info("use_ssl             = %s" % self.use_ssl)

        self.logger.info("XMLRPC endpoint: %s" % self.uri)
        self.logger.debug("test ALPHA")
        self.remote = xmlrpclib.Server(self.uri)
        self.logger.debug("test BETA")
        self.remote.ping()
        self.local = xmlrpclib.Server("http://127.0.0.1/cobbler_api")
        self.local.ping()

        self.replicate_data()
        self.link_distros()
        self.logger.info("Syncing")
        self.api.sync(logger=self.logger)
        self.logger.info("Done")
        return True
Exemplo n.º 18
0
def import_factory(kerneldir,path,cli_breed,logger):
    """
    Given a directory containing a kernel, return an instance of an Importer
    that can be used to complete the import.
    """

    breed , rootdir, pkgdir = guess_breed(kerneldir,path,cli_breed,logger)
    # NOTE : The guess_breed code should be included in the factory, in order to make 
    # the real root directory available, so allowing kernels at different levels within 
    # the same tree (removing the isolinux rejection from distro_adder) -- JP

    if rootdir[1]:
        logger.info("found content (breed=%s) at %s" % (breed,os.path.join( rootdir[0] , rootdir[1])))
    else:
        logger.info("found content (breed=%s) at %s" % (breed,rootdir[0]))
    if cli_breed:
        if cli_breed != breed:
            utils.die(logger, "Requested breed (%s); breed found is %s" % ( cli_breed , breed ) )
        breed = cli_breed

    if breed == "redhat":
        return RedHatImporter(logger,rootdir,pkgdir)
    # disabled for 2.0
    #elif breed == "debian":
    #    return DebianImporter(logger,rootdir,pkgdir)
    #elif breed == "ubuntu":
    #    return UbuntuImporter(logger,rootdir,pkgdir)
    elif breed:
        utils.die(logger, "Unsupported OS breed %s" % breed)
Exemplo n.º 19
0
 def update_system_netboot_status(self,name):
     system = self.systems.find(name=name)
     if system is None:
         utils.die(self.logger,"error in system lookup for %s" % name)
     self.sync.pxegen.write_all_system_files(system)
     # generate any templates listed in the system
     self.sync.pxegen.write_templates(system)
def run_command(command, path, interactive=True):
    logging.info("Running '{}' command...".format(command))

    func = SUPPORTED_COMMANDS.get(command)
    if func is None:
        die('Invalid command: {}'.format(command))
    func(path, interactive)
Exemplo n.º 21
0
def SourceFactory(settings):
    """ return sourceBase derived instance for recent settings"""
    utils.trace( "SourceFactory called", 1 )
    source = None

    if settings.hasTarget():
        if settings.hasMultipleTargets():
            url = settings.targetAt(0)
        else:
            url = settings.target()
        source = ArchiveSource(settings)

    ## \todo move settings access into info class
    if settings.hasSvnTarget():
        url = settings.svnTarget()
        sourceType = utils.getVCSType( url )
        if sourceType == "svn":
            source = SvnSource(settings)
        elif sourceType == "hg":
            source = HgSource(settings)
        elif sourceType == "git":
            source = GitSource(settings)

    if source == None:
        utils.die("none or unsupported source system set")
    if not source.subinfo:
        source.subinfo = settings
    source.url = url
    return source
Exemplo n.º 22
0
def getDependencies(category, package, version, runtimeOnly=False):
    """returns the dependencies of this package as list of strings:
    category/package"""
    if not os.path.isfile(getFilename(category, package, version)):
        utils.die("package name %s/%s-%s unknown" % (category, package, version))

    package, subpackage = getSubPackage(category, package)
    print "getDependencies:", package, subpackage
    if subpackage:
        utils.debug(
            "solving package %s/%s/%s-%s %s"
            % (category, subpackage, package, version, getFilename(category, package, version)),
            2,
        )
    else:
        utils.debug(
            "solving package %s/%s-%s %s" % (category, package, version, getFilename(category, package, version)), 2
        )
    mod = __import__(getFilename(category, package, version))

    deps = []
    if hasattr(mod, "subinfo"):
        info = mod.subinfo()
        depDict = info.hardDependencies
        depDict.update(info.dependencies)
        depDict.update(info.runtimeDependencies)
        if not runtimeOnly:
            depDict.update(info.buildDependencies)

        for line in depDict.keys():
            (category, package) = line.split("/")
            version = PortageInstance.getNewestVersion(category, package)
            deps.append([category, package, version, depDict[line]])

    return deps
Exemplo n.º 23
0
def __import__(module):  # pylint: disable=W0622
    utils.debug("module to import: %s" % module, 2)
    if not os.path.isfile(module):
        try:
            return __builtin__.__import__(module)
        except ImportError as e:
            utils.warning("import failed for module %s: %s" % (module, e.message))
            return None
    else:
        sys.path.append(os.path.dirname(module))
        modulename = os.path.basename(module).replace(".py", "")

        suff_index = None
        for suff in imp.get_suffixes():
            if suff[0] == ".py":
                suff_index = suff
                break

        if suff_index is None:
            utils.die("no .py suffix found")

        with open(module) as fileHdl:
            try:
                return imp.load_module(modulename.replace(".", "_"), fileHdl, module, suff_index)
            except ImportError as e:
                utils.warning("import failed for file %s: %s" % (module, e.message))
                return None
Exemplo n.º 24
0
    def run(self):
        """
        Returns None if there are no errors, otherwise returns a list
        of things to correct prior to running application 'for real'.
        (The CLI usage is "cobbler check" before "cobbler sync")
        """

        if not os.path.exists("/usr/bin/ksvalidator"):
            utils.die(self.logger,"ksvalidator not installed, please install pykickstart")

        failed = False
        for x in self.config.profiles():
            (result, errors) = self.checkfile(x, True)
            if not result:
                failed = True
            if len(errors) > 0:
                self.log_errors(errors)
        for x in self.config.systems():
            (result, errors) = self.checkfile(x, False)
            if not result:
                failed = True
            if len(errors) > 0:
                self.log_errors(errors)
 
        if failed:
            self.logger.warning("*** potential errors detected in kickstarts ***")
        else:
            self.logger.info("*** all kickstarts seem to be ok ***")

        return failed
Exemplo n.º 25
0
	def __init__(self, *args, **kwargs):
		self.init('make', **kwargs)
		self.name = 'yasra'
		if not os.path.exists(os.path.join(self.cwd, 'Makefile')):
			utils.die("couldn't find YASRA Makefile in dir '%s'" % self.cwd)
		self.args += args
		self.run()
Exemplo n.º 26
0
	def __init__(self, inputs, db, *args, **kwargs):
		self.init('bowtie2', **kwargs)
		self.version('--version', config.get_command('bowtie2-align'))
		if isinstance(inputs, basestring):
			self.check_input('-U', inputs)
		elif len(inputs) == 1:
			self.check_input('-U', inputs[0])
		elif len(inputs) == 2:
			self.check_input('-1', inputs[0])
			self.check_input('-2', inputs[1])
			self.args.append('-X')
			self.args.append(
				kwargs.get('max_insert', diagnostics.lookup_insert_size().max))
		else:
			utils.die("Bowtie2 wrapper expects either 1 (SE) or 2 (PE) inputs")
		self.args += ('-x', db)
		self.add_threading('-p')
		self.args += args
		self.output_patterns = map(diagnostics.OutputPattern._make, [
			(r"(\d+) reads; of these:$",0,"nreads"),
			(r"  (\d+) \S+ were paired; of these:$",0,"npairs"),
			(r"    (\d+) \S+ aligned concordantly 0 times$",0,"nconcord0"),
			(r"    (\d+) \S+ aligned concordantly exactly 1 time$",0,"nconcord1"),
			(r"    (\d+) \S+ aligned concordantly >1 times$",0,"nconcord2"),
			(r"      (\d+) \S+ aligned discordantly 1 time$",0,"ndiscord1"),
			(r"      (\d+) mates make up the pairs; of these:$",0,"nunpaired"),
			(r"        (\d+) \S+ aligned 0 times$",0,"nunpaired0"),
			(r"        (\d+) \S+ aligned exactly 1 time$",0,"nunpaired1"),
			(r"        (\d+) \S+ aligned >1 times$",0,"nunpaired2")])
		self.run()
Exemplo n.º 27
0
    def createPortablePackage( self ):
        """create portable 7z package with digest files located in the manifest subdir"""

        if not self.packagerExe:
            utils.die("could not find 7za in your path!")

        if self.package.endswith( "-package" ):
            shortPackage = self.package[ : -8 ]
        else:
            shortPackage = self.package


        if not "setupname" in self.defines or not self.defines[ "setupname" ]:
            self.defines[ "setupname" ] = shortPackage
            if self.subinfo.options.package.withArchitecture:
                    self.defines[ "setupname" ]  += "-" + os.getenv("EMERGE_ARCHITECTURE")
            self.defines[ "setupname" ]  += "-" + self.buildTarget + ".7z" 
        if not "srcdir" in self.defines or not self.defines[ "srcdir" ]:
            self.defines[ "srcdir" ] = self.imageDir()
        for f in self.scriptnames:
            utils.copyFile(f,os.path.join(self.defines[ "srcdir" ],os.path.split(f)[1]))
            
        # make absolute path for output file
        if not os.path.isabs( self.defines[ "setupname" ] ):
            dstpath = self.packageDestinationDir()
            self.defines[ "setupname" ] = os.path.join( dstpath, self.defines[ "setupname" ] )

        utils.deleteFile(self.defines[ "setupname" ])
        cmd = "cd %s && %s a -r %s %s" % (self.defines[ "srcdir" ], self.packagerExe,self.defines[ "setupname" ], '*')
        if not utils.system(cmd):
            utils.die( "while packaging. cmd: %s" % cmd )
Exemplo n.º 28
0
    def power(self, desired_state):
        """
        state is either "on" or "off".  Rebooting is implemented at the api.py
        level.

        The user and password need not be supplied.  If not supplied they
        will be taken from the environment, COBBLER_POWER_USER and COBBLER_POWER_PASS.
        If provided, these will override any other data and be used instead.  Users
        interested in maximum security should take that route.
        """

        template = self.get_command_template()
        template_file = open(template, "r")

        meta = utils.blender(self.api, False, self.system)
        meta["power_mode"] = desired_state

        # allow command line overrides of the username/password 
        if self.force_user is not None:
           meta["power_user"] = self.force_user
        if self.force_pass is not None:
           meta["power_pass"] = self.force_pass

        tmp = templar.Templar(self.api._config)
        cmd = tmp.render(template_file, meta, None, self.system)
        template_file.close()

        cmd = cmd.strip()

        self.logger.info("cobbler power configuration is:")

        self.logger.info("      type   : %s" % self.system.power_type)
        self.logger.info("      address: %s" % self.system.power_address)
        self.logger.info("      user   : %s" % self.system.power_user)
        self.logger.info("      id     : %s" % self.system.power_id)

        # if no username/password data, check the environment

        if meta.get("power_user","") == "":
            meta["power_user"] = os.environ.get("COBBLER_POWER_USER","")
        if meta.get("power_pass","") == "":
            meta["power_pass"] = os.environ.get("COBBLER_POWER_PASS","")

        # now reprocess the command so we don't feed it through the shell
        cmd = cmd.split(" ")

        # Try the power command 5 times before giving up.
        # Some power switches are flakey
        for x in range(0,5):
            rc = utils.subprocess_call(self.logger, cmd, shell=False)
            if rc == 0:
                break
            else:
                time.sleep(2)

        if not rc == 0:
           utils.die(self.logger,"command failed (rc=%s), please validate the physical setup and cobbler config" % rc)

        return rc
Exemplo n.º 29
0
 def __init__( self ):
     self.subinfo = subinfo()
     CMakePackageBase.__init__( self )
     if compiler.isMSVC2008() or compiler.isMSVC2010():
         utils.die("""
         src/Interfaces/GraphScene.h contains the static member kBorder
         and sets the initial value in the class statement. 
         This is not supported by msvc compilers, please fix it""")
Exemplo n.º 30
0
def load_user_config():
    if not os.path.exists(CONFIG_PATH):
        info("Warning: {0} does not exist.".format(CONFIG_PATH))
        setup()
    config = load(CONFIG_PATH)
    if "url" not in config or "apikey" not in config:
        die('Configuration file not valid. Please run "dotcloud setup" to create it.')
    return config
Exemplo n.º 31
0
def process_allowlists(db_file):
    """ prompt for and process allowlists """
    source = inquirer.ask_allowlist()

    import_list = []

    if source in whiteLists:
        url_source = whiteLists[source]
        resp = requests.get(url_source["url"])
        import_list = utils.process_lines(resp.text, url_source["comment"],
                                          False)
        # This breaks if we add a new whitelist setup
        if source != ANUDEEP_ALLOWLIST:
            resp = requests.get(ANUDEEP_ALLOWLIST)
            import_list += utils.process_lines(resp.text,
                                               url_source["comment"], False)

    if source == constants.FILE:
        fname = inquirer.ask_import_file()
        import_file = open(fname)
        import_list = utils.process_lines(import_file.read(), f"File: {fname}",
                                          False)

    if source == constants.PASTE:
        import_list = inquirer.ask_paste()
        import_list = utils.process_lines(import_list, "Pasted content",
                                          utils.validate_host)

    if len(import_list) == 0:
        utils.die("No valid urls found, try again")

    if not inquirer.confirm(
            f"Add {len(import_list)} white lists to {db_file}?"):
        utils.warn("Nothing changed. Bye!")
        sys.exit(0)

    conn = sqlite3.connect(db_file)
    sqldb = conn.cursor()
    added = 0
    exists = 0

    for item in import_list:
        sqldb.execute("SELECT COUNT(*) FROM domainlist WHERE domain = ?",
                      (item["url"], ))

        cnt = sqldb.fetchone()

        if cnt[0] > 0:
            exists += 1
        else:
            # 0 = exact whitelist
            # 2 = regex whitelist
            domain_type = 0
            if item["type"] == constants.REGEX:
                domain_type = 2

            vals = (item["url"], domain_type, item["comment"])
            sqldb.execute(
                "INSERT OR IGNORE INTO domainlist (domain, type, comment) VALUES (?,?,?)",
                vals,
            )
            conn.commit()
            added += 1

    sqldb.close()
    conn.close()

    utils.success(f"{added} whitelists added! {exists} already existed.")
Exemplo n.º 32
0
    no_of_instances = int(args.instance)
    start_line = int(args.start) if args.start else 1
    stop_line = int(args.stop) if args.stop else 999999999999
    xvfb = args.xvfb
    capture_screen = args.capture_screen
    if verbose:
        wl_log.setLevel(logging.DEBUG)
    else:
        wl_log.setLevel(logging.INFO)

    # Validate the given arguments
    # Read urls
    url_list = []
    import os
    if not url_list_path or not os.path.isfile(url_list_path):
        ut.die("ERROR: No URL list given!"
               "Run the following to get help: python main --help")
    else:
        try:
            with open(url_list_path) as f:
                url_list = f.read().splitlines()[start_line - 1:stop_line]
        except Exception as e:
            ut.die("Error opening file: {} \n{}".format(
                e, traceback.format_exc()))

    if experiment == cm.EXP_TYPE_WANG_AND_GOLDBERG:
        torrc_dict = cm.TORRC_WANG_AND_GOLDBERG
    elif experiment == cm.EXP_TYPE_MULTITAB_ALEXA:
        torrc_dict = cm.TORRC_DEFAULT
    else:
        ut.die("Experiment type is not recognized."
               " Use --help to see the possible values.")
Exemplo n.º 33
0
        return False, str(sys.exc_info()[1])


def NormalizeRaster(raster, bins):
    #classify the results into bins equal interval bins, and invert (0..bins -> bins..1)
    try:
        raster = (1 + bins) - arcpy.sa.Slice(raster, bins, "EQUAL_INTERVAL")
        return True, raster
    except:
        return False, str(sys.exc_info()[1])


if __name__ == "__main__":

    if arcpy.CheckOutExtension("Spatial") != "CheckedOut":
        utils.die(
            "Unable to checkout the Spatial Analyst Extension.  Quitting.")

    locationLayer = arcpy.GetParameterAsText(0)
    rasterName = arcpy.GetParameterAsText(1)
    smoothingFactor = arcpy.GetParameterAsText(2)
    cellSize = arcpy.GetParameterAsText(3)
    spatialReference = arcpy.GetParameter(4)

    test = False
    if test:
        #locationLayer = r"C:\tmp\test.gdb\fix_ll"
        locationLayer = r"C:\tmp\test.gdb\fix_a_c96"
        rasterName = r"C:\tmp\test.gdb\kde_4"
        smoothingFactor = "8000"
        cellSize = "700"
        spatialReference = arcpy.SpatialReference()
Exemplo n.º 34
0
    def add_entry(self,dirname,kernel,initrd):
        """
        When we find a directory with a valid kernel/initrd in it, create the distribution objects
        as appropriate and save them.  This includes creating xen and rescue distros/profiles
        if possible.
        """

        proposed_name = self.get_proposed_name(dirname,kernel)
        proposed_arch = self.get_proposed_arch(dirname)

        if self.arch and proposed_arch and self.arch != proposed_arch:
            utils.die(self.logger,"Arch from pathname (%s) does not match with supplied one %s"%(proposed_arch,self.arch))

        archs = self.learn_arch_from_tree()
        if not archs:
            if self.arch:
                archs.append( self.arch )
        else:
            if self.arch and self.arch not in archs:
                utils.die(self.logger, "Given arch (%s) not found on imported tree %s"%(self.arch,self.get_pkgdir()))
        if proposed_arch:
            if archs and proposed_arch not in archs:
                self.logger.warning("arch from pathname (%s) not found on imported tree %s" % (proposed_arch,self.get_pkgdir()))
                return

            archs = [ proposed_arch ]

        if len(archs)>1:
            if self.breed in [ "redhat" ]:
                self.logger.warning("directory %s holds multiple arches : %s" % (dirname, archs))
                return
            self.logger.warning("- Warning : Multiple archs found : %s" % (archs))

        distros_added = []

        for pxe_arch in archs:
            name = proposed_name + "-" + pxe_arch
            existing_distro = self.distros.find(name=name)

            if existing_distro is not None:
                self.logger.warning("skipping import, as distro name already exists: %s" % name)
                continue

            else:
                self.logger.info("creating new distro: %s" % name)
                distro = self.config.new_distro()

            if name.find("-autoboot") != -1:
                # this is an artifact of some EL-3 imports
                continue

            distro.set_name(name)
            distro.set_kernel(kernel)
            distro.set_initrd(initrd)
            distro.set_arch(pxe_arch)
            distro.set_breed(self.breed)
            # If a version was supplied on command line, we set it now
            if self.os_version:
                distro.set_os_version(self.os_version)

            self.distros.add(distro,save=True)
            distros_added.append(distro)

            existing_profile = self.profiles.find(name=name)

            # see if the profile name is already used, if so, skip it and
            # do not modify the existing profile

            if existing_profile is None:
                self.logger.info("creating new profile: %s" % name)
                #FIXME: The created profile holds a default kickstart, and should be breed specific
                profile = self.config.new_profile()
            else:
                self.logger.info("skipping existing profile, name already exists: %s" % name)
                continue

            # save our minimal profile which just points to the distribution and a good
            # default answer file

            profile.set_name(name)
            profile.set_distro(name)
            profile.set_kickstart(self.kickstart_file)

            # depending on the name of the profile we can define a good virt-type
            # for usage with koan

            if name.find("-xen") != -1:
                profile.set_virt_type("xenpv")
            elif name.find("vmware") != -1:
                profile.set_virt_type("vmware")
            else:
                profile.set_virt_type("qemu")

            # save our new profile to the collection

            self.profiles.add(profile,save=True)

            # Create a rescue image as well, if this is not a xen distro
            # but only for red hat profiles

            # this code disabled as it seems to be adding "-rescue" to
            # distros that are /not/ rescue related, which is wrong.
            # left as a FIXME for those who find this feature interesting.
            #if name.find("-xen") == -1 and self.breed == "redhat":
            #    rescue_name = 'rescue-' + name
            #    existing_profile = self.profiles.find(name=rescue_name)
            #
            #    if existing_profile is None:
            #        self.logger.info("creating new profile: %s" % rescue_name)
            #        profile = self.config.new_profile()
            #    else:
            #        continue
            #
            #    profile.set_name(rescue_name)
            #    profile.set_distro(name)
            #    profile.set_virt_type("qemu")
            #    profile.kernel_options['rescue'] = None
            #    profile.kickstart = '/var/lib/cobbler/kickstarts/pxerescue.ks'
            #
            #    self.profiles.add(profile,save=True)

        return distros_added
Exemplo n.º 35
0
    def run(self,pkgdir,mirror,mirror_name,network_root=None,kickstart_file=None,rsync_flags=None,arch=None,breed=None,os_version=None):
        self.pkgdir = pkgdir
        self.mirror = mirror
        self.mirror_name = mirror_name
        self.network_root = network_root
        self.kickstart_file = kickstart_file
        self.rsync_flags = rsync_flags
        self.arch = arch
        self.breed = breed
        self.os_version = os_version

        # some fixups for the XMLRPC interface, which does not use "None"
        if self.arch == "":           self.arch           = None
        if self.mirror == "":         self.mirror         = None
        if self.mirror_name == "":    self.mirror_name    = None
        if self.kickstart_file == "": self.kickstart_file = None
        if self.os_version == "":     self.os_version     = None
        if self.rsync_flags == "":    self.rsync_flags    = None
        if self.network_root == "":   self.network_root   = None

        # If no breed was specified on the command line, set it to "redhat" for this module
        if self.breed == None:
            self.breed = "redhat"

        # debug log stuff for testing
        #self.logger.info("self.pkgdir = %s" % str(self.pkgdir))
        #self.logger.info("self.mirror = %s" % str(self.mirror))
        #self.logger.info("self.mirror_name = %s" % str(self.mirror_name))
        #self.logger.info("self.network_root = %s" % str(self.network_root))
        #self.logger.info("self.kickstart_file = %s" % str(self.kickstart_file))
        #self.logger.info("self.rsync_flags = %s" % str(self.rsync_flags))
        #self.logger.info("self.arch = %s" % str(self.arch))
        #self.logger.info("self.breed = %s" % str(self.breed))
        #self.logger.info("self.os_version = %s" % str(self.os_version))

        # both --import and --name are required arguments

        if self.mirror is None:
            utils.die(self.logger,"import failed.  no --path specified")
        if self.mirror_name is None:
            utils.die(self.logger,"import failed.  no --name specified")

        # if --arch is supplied, validate it to ensure it's valid

        if self.arch is not None and self.arch != "":
            self.arch = self.arch.lower()
            if self.arch == "x86":
                # be consistent
                self.arch = "i386"
            if self.arch not in self.get_valid_arches():
                utils.die(self.logger,"arch must be one of: %s" % string.join(self.get_valid_arches(),", "))

        # if we're going to do any copying, set where to put things
        # and then make sure nothing is already there.

        self.path = os.path.normpath( "%s/ks_mirror/%s" % (self.settings.webdir, self.mirror_name) )
        if os.path.exists(self.path) and self.arch is None:
            # FIXME : Raise exception even when network_root is given ?
            utils.die(self.logger,"Something already exists at this import location (%s).  You must specify --arch to avoid potentially overwriting existing files." % self.path)

        # import takes a --kickstart for forcing selection that can't be used in all circumstances

        if self.kickstart_file and not self.breed:
            utils.die(self.logger,"Kickstart file can only be specified when a specific breed is selected")

        if self.os_version and not self.breed:
            utils.die(self.logger,"OS version can only be specified when a specific breed is selected")

        if self.breed and self.breed.lower() not in self.get_valid_breeds():
            utils.die(self.logger,"Supplied import breed is not supported by this module")

        # if --arch is supplied, make sure the user is not importing a path with a different
        # arch, which would just be silly.

        if self.arch:
            # append the arch path to the name if the arch is not already
            # found in the name.
            for x in self.get_valid_arches():
                if self.path.lower().find(x) != -1:
                    if self.arch != x :
                        utils.die(self.logger,"Architecture found on pathname (%s) does not fit the one given in command line (%s)"%(x,self.arch))
                    break
            else:
                # FIXME : This is very likely removed later at get_proposed_name, and the guessed arch appended again
                self.path += ("-%s" % self.arch)

        # make the output path and mirror content but only if not specifying that a network
        # accessible support location already exists (this is --available-as on the command line)

        if self.network_root is None:
            # we need to mirror (copy) the files

            utils.mkdir(self.path)

            # prevent rsync from creating the directory name twice
            # if we are copying via rsync

            if not self.mirror.endswith("/"):
                self.mirror = "%s/" % self.mirror

            if self.mirror.startswith("http://") or self.mirror.startswith("ftp://") or self.mirror.startswith("nfs://"):

                # http mirrors are kind of primative.  rsync is better.
                # that's why this isn't documented in the manpage and we don't support them.
                # TODO: how about adding recursive FTP as an option?

                utils.die(self.logger,"unsupported protocol")

            else:

                # good, we're going to use rsync..
                # we don't use SSH for public mirrors and local files.
                # presence of user@host syntax means use SSH

                spacer = ""
                if not self.mirror.startswith("rsync://") and not self.mirror.startswith("/"):
                    spacer = ' -e "ssh" '
                rsync_cmd = RSYNC_CMD
                if self.rsync_flags:
                    rsync_cmd = rsync_cmd + " " + self.rsync_flags

                # kick off the rsync now

                utils.run_this(rsync_cmd, (spacer, self.mirror, self.path), self.logger)

        else:

            # rather than mirroring, we're going to assume the path is available
            # over http, ftp, and nfs, perhaps on an external filer.  scanning still requires
            # --mirror is a filesystem path, but --available-as marks the network path

            if not os.path.exists(self.mirror):
                utils.die(self.logger, "path does not exist: %s" % self.mirror)

            # find the filesystem part of the path, after the server bits, as each distro
            # URL needs to be calculated relative to this.

            if not self.network_root.endswith("/"):
                self.network_root = self.network_root + "/"
            self.path = os.path.normpath( self.mirror )
            valid_roots = [ "nfs://", "ftp://", "http://" ]
            for valid_root in valid_roots:
                if self.network_root.startswith(valid_root):
                    break
            else:
                utils.die(self.logger, "Network root given to --available-as must be nfs://, ftp://, or http://")
            if self.network_root.startswith("nfs://"):
                try:
                    (a,b,rest) = self.network_root.split(":",3)
                except:
                    utils.die(self.logger, "Network root given to --available-as is missing a colon, please see the manpage example.")

        # now walk the filesystem looking for distributions that match certain patterns

        self.logger.info("adding distros")
        distros_added = []
        # FIXME : search below self.path for isolinux configurations or known directories from TRY_LIST
        os.path.walk(self.path, self.distro_adder, distros_added)

        # find out if we can auto-create any repository records from the install tree

        if self.network_root is None:
            self.logger.info("associating repos")
            # FIXME: this automagic is not possible (yet) without mirroring
            self.repo_finder(distros_added)

        # find the most appropriate answer files for each profile object

        self.logger.info("associating kickstarts")
        self.kickstart_finder(distros_added)

        # ensure bootloaders are present
        self.api.pxegen.copy_bootloaders()

        return True
Exemplo n.º 36
0
    def kickstart_finder(self, distros_added):
        """
        For all of the profiles in the config w/o a kickstart, use the
        given kickstart file, or look at the kernel path, from that,
        see if we can guess the distro, and if we can, assign a kickstart
        if one is available for it.
        """

        # FIXME: this is bass-ackwards... why do we loop through all
        # profiles to find distros we added when we already have the list
        # of distros we added??? It would be easier to loop through the
        # distros_added list and modify all child profiles

        for profile in self.profiles:
            distro = self.distros.find(
                name=profile.get_conceptual_parent().name)
            if distro is None or not (distro in distros_added):
                continue

            kdir = os.path.dirname(distro.kernel)
            release_files = self.get_release_files()
            for release_file in release_files:
                results = self.scan_pkg_filename(release_file)
                if results is None:
                    continue
                (flavor, major, minor, release, update) = results
                version, ks = self.set_variance(flavor, major, minor, release,
                                                update, distro.arch)
                if self.os_version:
                    if self.os_version != version:
                        utils.die(
                            self.logger,
                            "CLI version differs from tree : %s vs. %s" %
                            (self.os_version, version))
                ds = self.get_datestamp()
                distro.set_comment("%s.%s.%s update %s" %
                                   (version, minor, release, update))
                distro.set_os_version(version)
                if ds is not None:
                    distro.set_tree_build_time(ds)
                if self.kickstart_file == None:
                    profile.set_kickstart(ks)
                boot_files = ''
                if version == "esxi4":
                    self.logger.info(
                        "This is an ESXi4 distro - adding extra PXE files to boot-files list"
                    )
                    # add extra files to boot_files in the distro
                    for file in ('vmkernel.gz', 'sys.vgz', 'cim.vgz',
                                 'ienviron.vgz', 'install.vgz'):
                        boot_files += '$img_path/%s=%s/%s ' % (file, self.path,
                                                               file)
                elif version == "esxi5":
                    self.logger.info(
                        "This is an ESXi5 distro - copying all files to boot-files list"
                    )
                    #for file in glob.glob(os.path.join(self.path,"*.*")):
                    #   file_name = os.path.basename(file)
                    #   boot_files += '$img_path/%s=%s ' % (file_name,file)
                    boot_files = '$img_path/=%s' % os.path.join(
                        self.path, "*.*")
                distro.set_boot_files(boot_files.strip())
                self.profiles.add(profile, save=True)
                # we found the correct details above, we can stop looping
                break

            self.configure_tree_location(distro)
            self.distros.add(distro, save=True)  # re-save
            self.api.serialize()
Exemplo n.º 37
0
    def run(self,
            iso=None,
            buildisodir=None,
            profiles=None,
            systems=None,
            distro=None,
            standalone=None,
            source=None,
            exclude_dns=None,
            mkisofs_opts=None):

        # the distro option is for stand-alone builds only
        if not standalone and distro is not None:
            utils.die(
                self.logger,
                "The --distro option should only be used when creating a standalone ISO"
            )
        # if building standalone, we only want --distro,
        # profiles/systems are disallowed
        if standalone:
            if profiles is not None or systems is not None:
                utils.die(
                    self.logger,
                    "When building a standalone ISO, use --distro only instead of --profiles/--systems"
                )
            elif distro is None:
                utils.die(
                    self.logger,
                    "When building a standalone ISO, you must specify a --distro"
                )
            if source != None and not os.path.exists(source):
                utils.die(self.logger,
                          "The source specified (%s) does not exist" % source)

        # if iso is none, create it in . as "kickstart.iso"
        if iso is None:
            iso = "kickstart.iso"

        if buildisodir is None:
            buildisodir = self.settings.buildisodir
        else:
            if not os.path.isdir(buildisodir):
                utils.die(self.logger,
                          "The --tempdir specified is not a directory")

            (buildisodir_head,
             buildisodir_tail) = os.path.split(os.path.normpath(buildisodir))
            if buildisodir_tail != "buildiso":
                buildisodir = os.path.join(buildisodir, "buildiso")

        self.logger.info("using/creating buildisodir: %s" % buildisodir)
        if not os.path.exists(buildisodir):
            os.makedirs(buildisodir)
        else:
            shutil.rmtree(buildisodir)
            os.makedirs(buildisodir)

        # if base of buildisodir does not exist, fail
        # create all profiles unless filtered by "profiles"

        imagesdir = os.path.join(buildisodir, "images")
        isolinuxdir = os.path.join(buildisodir, "isolinux")

        self.logger.info("building tree for isolinux")
        if not os.path.exists(imagesdir):
            os.makedirs(imagesdir)
        if not os.path.exists(isolinuxdir):
            os.makedirs(isolinuxdir)

        self.logger.info("copying miscellaneous files")

        isolinuxbin = "/usr/share/syslinux/isolinux.bin"
        if not os.path.exists(isolinuxbin):
            isolinuxbin = "/usr/lib/syslinux/isolinux.bin"

        menu = "/usr/share/syslinux/menu.c32"
        if not os.path.exists(menu):
            menu = "/var/lib/cobbler/loaders/menu.c32"

        chain = "/usr/share/syslinux/chain.c32"
        if not os.path.exists(chain):
            chain = "/usr/lib/syslinux/chain.c32"

        files = [isolinuxbin, menu, chain]
        for f in files:
            if not os.path.exists(f):
                utils.die(self.logger, "Required file not found: %s" % f)
            else:
                utils.copyfile(f, os.path.join(isolinuxdir,
                                               os.path.basename(f)), self.api)

        if standalone:
            self.generate_standalone_iso(imagesdir, isolinuxdir, distro,
                                         source)
        else:
            self.generate_netboot_iso(imagesdir, isolinuxdir, profiles,
                                      systems, exclude_dns)

        if mkisofs_opts == None:
            mkisofs_opts = ""
        else:
            mkisofs_opts = mkisofs_opts.strip()

        # removed --quiet
        cmd = "mkisofs -o %s %s -r -b isolinux/isolinux.bin -c isolinux/boot.cat" % (
            iso, mkisofs_opts)
        cmd = cmd + " -no-emul-boot -boot-load-size 4"
        cmd = cmd + " -boot-info-table -V Cobbler\ Install -R -J -T %s" % buildisodir

        rc = utils.subprocess_call(self.logger, cmd, shell=True)
        if rc != 0:
            utils.die(self.logger, "mkisofs failed")

        self.logger.info("ISO build complete")
        self.logger.info("You may wish to delete: %s" % buildisodir)
        self.logger.info("The output file is: %s" % iso)

        return True
Exemplo n.º 38
0
    def apt_sync(self, repo):
        """
        Handle copying of http:// and ftp:// debian repos.
        """

        # warn about not having mirror program.

        mirror_program = "/usr/bin/debmirror"
        if not os.path.exists(mirror_program):
            utils.die(self.logger,
                      "no %s found, please install it" % (mirror_program))

        cmd = ""  # command to run

        # detect cases that require special handling

        if repo.rpm_list != "" and repo.rpm_list != []:
            utils.die(self.logger,
                      "has_rpm_list not yet supported on apt repos")

        if not repo.arch:
            utils.die(self.logger,
                      "Architecture is required for apt repositories")

        # built destination path for the repo
        dest_path = os.path.join("/var/www/cobbler/repo_mirror", repo.name)

        if repo.mirror_locally:
            # NOTE: Dropping @@suite@@ replace as it is also dropped from
            # from manage_import_debian_ubuntu.py due that repo has no os_version
            # attribute. If it is added again it will break the Web UI!
            # mirror = repo.mirror.replace("@@suite@@",repo.os_version)
            mirror = repo.mirror

            idx = mirror.find("://")
            method = mirror[:idx]
            mirror = mirror[idx + 3:]

            idx = mirror.find("/")
            host = mirror[:idx]
            mirror = mirror[idx:]

            dists = ",".join(repo.apt_dists)
            components = ",".join(repo.apt_components)

            mirror_data = "--method=%s --host=%s --root=%s --dist=%s --section=%s" % (
                pipes.quote(method), pipes.quote(host), pipes.quote(mirror),
                pipes.quote(dists), pipes.quote(components))

            rflags = "--nocleanup"
            for x in repo.yumopts:
                if repo.yumopts[x]:
                    rflags += " %s %s" % (x, repo.yumopts[x])
                else:
                    rflags += " %s" % x
            cmd = "%s %s %s %s" % (mirror_program, rflags, mirror_data,
                                   dest_path)
            cmd = "%s %s %s %s" % (mirror_program, rflags, mirror_data,
                                   pipes.quote(dest_path))
            if repo.arch == "src":
                cmd = "%s --source" % cmd
            else:
                arch = repo.arch
                if arch == "x86":
                    arch = "i386"  # FIX potential arch errors
                if arch == "x86_64":
                    arch = "amd64"  # FIX potential arch errors
                cmd = "%s --nosource -a %s" % (cmd, arch)

            # Set's an environment variable for subprocess, otherwise debmirror will fail
            # as it needs this variable to exist.
            # FIXME: might this break anything? So far it doesn't
            os.putenv("HOME", "/var/lib/cobbler")

            rc = utils.subprocess_call(self.logger, cmd)
            if rc != 0:
                utils.die(self.logger, "cobbler reposync failed")
Exemplo n.º 39
0
    def run(self, name=None, verbose=True):
        """
        Syncs the current repo configuration file with the filesystem.
        """

        self.logger.info("run, reposync, run!")

        try:
            self.tries = int(self.tries)
        except:
            utils.die(self.logger, "retry value must be an integer")

        self.verbose = verbose

        report_failure = False
        for repo in self.repos:
            if name is not None and repo.name != name:
                # invoked to sync only a specific repo, this is not the one
                continue
            elif name is None and not repo.keep_updated:
                # invoked to run against all repos, but this one is off
                self.logger.info("%s is set to not be updated" % repo.name)
                continue

            repo_mirror = os.path.join(self.settings.webdir, "repo_mirror")
            repo_path = os.path.join(repo_mirror, repo.name)

            if not os.path.isdir(repo_path) and not repo.mirror.lower(
            ).startswith("rhn://"):
                os.makedirs(repo_path)

            # set the environment keys specified for this repo
            # save the old ones if they modify an existing variable

            env = repo.environment
            old_env = {}

            for k in env.keys():
                self.logger.debug("setting repo environment: %s=%s" %
                                  (k, env[k]))
                if env[k] is not None:
                    if os.getenv(k):
                        old_env[k] = os.getenv(k)
                    else:
                        os.environ[k] = env[k]

            # which may actually NOT reposync if the repo is set to not mirror locally
            # but that's a technicality

            for x in range(self.tries + 1, 1, -1):
                success = False
                try:
                    self.sync(repo)
                    success = True
                    break
                except:
                    utils.log_exc(self.logger)
                    self.logger.warning("reposync failed, tries left: %s" %
                                        (x - 2))

            # cleanup/restore any environment variables that were
            # added or changed above

            for k in env.keys():
                if env[k] is not None:
                    if k in old_env:
                        self.logger.debug("resetting repo environment: %s=%s" %
                                          (k, old_env[k]))
                        os.environ[k] = old_env[k]
                    else:
                        self.logger.debug("removing repo environment: %s=%s" %
                                          (k, env[k]))
                        del os.environ[k]

            if not success:
                report_failure = True
                if not self.nofail:
                    utils.die(
                        self.logger,
                        "reposync failed, retry limit reached, aborting")
                else:
                    self.logger.error(
                        "reposync failed, retry limit reached, skipping")

            self.update_permissions(repo_path)

        if report_failure:
            utils.die(
                self.logger,
                "overall reposync failed, at least one repo failed to synchronize"
            )
Exemplo n.º 40
0
    def run(self,
            pkgdir,
            name,
            path,
            network_root=None,
            kickstart_file=None,
            rsync_flags=None,
            arch=None,
            breed=None,
            os_version=None):
        self.pkgdir = pkgdir
        self.network_root = network_root
        self.kickstart_file = kickstart_file
        self.rsync_flags = rsync_flags
        self.arch = arch
        self.breed = breed
        self.os_version = os_version
        self.name = name
        self.path = path
        self.rootdir = path

        # some fixups for the XMLRPC interface, which does not use "None"
        if self.arch == "": self.arch = None
        if self.kickstart_file == "": self.kickstart_file = None
        if self.os_version == "": self.os_version = None
        if self.rsync_flags == "": self.rsync_flags = None
        if self.network_root == "": self.network_root = None

        # If no breed was specified on the command line, set it to "redhat" for this module
        if self.breed == None:
            self.breed = "vmware"

        # import takes a --kickstart for forcing selection that can't be used in all circumstances

        if self.kickstart_file and not self.breed:
            utils.die(
                self.logger,
                "Kickstart file can only be specified when a specific breed is selected"
            )

        if self.os_version and not self.breed:
            utils.die(
                self.logger,
                "OS version can only be specified when a specific breed is selected"
            )

        if self.breed and self.breed.lower() not in self.get_valid_breeds():
            utils.die(self.logger,
                      "Supplied import breed is not supported by this module")

        # now walk the filesystem looking for distributions that match certain patterns

        self.logger.info("adding distros")
        distros_added = []
        # FIXME : search below self.path for isolinux configurations or known directories from TRY_LIST
        os.path.walk(self.path, self.distro_adder, distros_added)

        if len(distros_added) == 0:
            self.logger.warning("No distros imported, bailing out")
            return False

        # find the most appropriate answer files for each profile object

        self.logger.info("associating kickstarts")
        self.kickstart_finder(distros_added)

        # ensure bootloaders are present
        self.api.pxegen.copy_bootloaders()

        return True
Exemplo n.º 41
0
    def add_entry(self,dirname,kernel,initrd):
        """
        When we find a directory with a valid kernel/initrd in it, create the distribution objects
        as appropriate and save them.  This includes creating xen and rescue distros/profiles
        if possible.
        """

        # build a proposed name based on the directory structure
        proposed_name = self.get_proposed_name(dirname,kernel)

        # build a list of arches found in the packages directory
        archs = self.learn_arch_from_tree()
        if not archs and self.arch:
            archs.append( self.arch )
        else:
            if self.arch and self.arch not in archs:
                utils.die(self.logger, "Given arch (%s) not found on imported tree %s"%(self.arch,self.path))

        if len(archs) == 0:
            self.logger.error("No arch could be detected in %s, and none was specified via the --arch option" % dirname)
            return []
        elif len(archs) > 1:
            self.logger.warning("- Warning : Multiple archs found : %s" % (archs))

        distros_added = []
        for pxe_arch in archs:
            name = proposed_name + "-" + pxe_arch
            existing_distro = self.distros.find(name=name)

            if existing_distro is not None:
                self.logger.warning("skipping import, as distro name already exists: %s" % name)
                continue
            else:
                self.logger.info("creating new distro: %s" % name)
                distro = self.config.new_distro()

            if name.find("-autoboot") != -1:
                # this is an artifact of some EL-3 imports
                continue

            distro.set_name(name)
            distro.set_kernel(kernel)
            distro.set_initrd(initrd)
            distro.set_arch(pxe_arch)
            distro.set_breed(self.breed)
            distro.set_os_version(self.os_version)
            distro.set_kernel_options(self.signature.get("kernel_options",""))
            distro.set_kernel_options_post(self.signature.get("kernel_options_post",""))
            distro.set_template_files(self.signature.get("template_files",""))

            boot_files = ''
            for boot_file in self.signature["boot_files"]:
                boot_files += '$local_img_path/%s=%s/%s ' % (boot_file,self.path,boot_file)
            distro.set_boot_files(boot_files.strip())

            self.configure_tree_location(distro)

            self.distros.add(distro,save=True)
            distros_added.append(distro)

            # see if the profile name is already used, if so, skip it and
            # do not modify the existing profile

            existing_profile = self.profiles.find(name=name)

            if existing_profile is None:
                self.logger.info("creating new profile: %s" % name)
                profile = self.config.new_profile()
            else:
                self.logger.info("skipping existing profile, name already exists: %s" % name)
                continue

            profile.set_name(name)
            profile.set_distro(name)
            profile.set_kickstart(self.kickstart_file)

            # depending on the name of the profile we can 
            # define a good virt-type for usage with koan
            if name.find("-xen") != -1:
                profile.set_virt_type("xenpv")
            elif name.find("vmware") != -1:
                profile.set_virt_type("vmware")
            else:
                profile.set_virt_type("kvm")

            self.profiles.add(profile,save=True)

        return distros_added
Exemplo n.º 42
0
    def generate_standalone_iso(self, imagesdir, isolinuxdir, distname,
                                filesource):
        """
        Create bootable CD image to be used for handsoff CD installtions
        """
        # Get the distro object for the requested distro
        # and then get all of its descendants (profiles/sub-profiles/systems)
        distro = self.api.find_distro(distname)
        if distro is None:
            utils.die(self.logger,
                      "distro %s was not found, aborting" % distname)
        descendants = distro.get_descendants()

        if filesource is None:
            # Try to determine the source from the distro kernel path
            self.logger.debug("trying to locate source for distro")
            found_source = False
            (source_head, source_tail) = os.path.split(distro.kernel)
            while source_tail != '':
                if source_head == os.path.join(self.api.settings().webdir,
                                               "ks_mirror"):
                    filesource = os.path.join(source_head, source_tail)
                    found_source = True
                    self.logger.debug("found source in %s" % filesource)
                    break
                (source_head, source_tail) = os.path.split(source_head)
            # Can't find the source, raise an error
            if not found_source:
                utils.die(
                    self.logger,
                    "Error, no installation source found. When building a standalone ISO, you must specify a --source if the distro install tree is not hosted locally"
                )

        self.logger.info("copying kernels and initrds for standalone distro")
        self.copy_boot_files(distro, isolinuxdir, None)

        cmd = "rsync -rlptgu --exclude=boot.cat --exclude=TRANS.TBL --exclude=isolinux/ %s/ %s/../" % (
            filesource, isolinuxdir)
        self.logger.info("- copying distro %s files (%s)" % (distname, cmd))
        rc = utils.subprocess_call(self.logger, cmd, shell=True)
        if rc:
            utils.die(self.logger, "rsync of files failed")

        self.logger.info("generating a isolinux.cfg")
        isolinuxcfg = os.path.join(isolinuxdir, "isolinux.cfg")
        cfg = open(isolinuxcfg, "w+")
        cfg.write(self.iso_template)

        for descendant in descendants:
            data = utils.blender(self.api, False, descendant)

            cfg.write("\n")
            cfg.write("LABEL %s\n" % descendant.name)
            cfg.write("  MENU LABEL %s\n" % descendant.name)
            cfg.write("  kernel %s\n" % os.path.basename(distro.kernel))

            append_line = "  append initrd=%s" % os.path.basename(
                distro.initrd)
            if distro.breed == "redhat":
                append_line += " ks=cdrom:/isolinux/%s.cfg" % descendant.name
            if distro.breed == "suse":
                append_line += " autoyast=file:///isolinux/%s.cfg install=cdrom:///" % descendant.name
                if "install" in data["kernel_options"]:
                    del data["kernel_options"]["install"]
            if distro.breed in ["ubuntu", "debian"]:
                append_line += " auto-install/enable=true preseed/file=/cdrom/isolinux/%s.cfg" % descendant.name

            # add remaining kernel_options to append_line
            append_line += self.add_remaining_kopts(data["kernel_options"])
            cfg.write(append_line)

            if descendant.COLLECTION_TYPE == 'profile':
                kickstart_data = self.api.kickgen.generate_kickstart_for_profile(
                    descendant.name)
            elif descendant.COLLECTION_TYPE == 'system':
                kickstart_data = self.api.kickgen.generate_kickstart_for_system(
                    descendant.name)

            if distro.breed == "redhat":
                cdregex = re.compile("url .*\n", re.IGNORECASE)
                kickstart_data = cdregex.sub("cdrom\n", kickstart_data)

            ks_name = os.path.join(isolinuxdir, "%s.cfg" % descendant.name)
            ks_file = open(ks_name, "w+")
            ks_file.write(kickstart_data)
            ks_file.close()

        self.logger.info("done writing config")
        cfg.write("\n")
        cfg.write("MENU END\n")
        cfg.close()

        return
Exemplo n.º 43
0
    def generate_standalone_iso(self, imagesdir, isolinuxdir, distname,
                                filesource, airgapped, profiles):
        """
        Create bootable CD image to be used for handsoff CD installtions
        """
        # Get the distro object for the requested distro
        # and then get all of its descendants (profiles/sub-profiles/systems)
        # with sort=True for profile/system heirarchy to allow menu indenting
        distro = self.api.find_distro(distname)
        if distro is None:
            utils.die(self.logger,
                      "distro %s was not found, aborting" % distname)
        descendants = distro.get_descendants(sort=True)
        profiles = utils.input_string_or_list(profiles)

        if filesource is None:
            # Try to determine the source from the distro kernel path
            self.logger.debug("trying to locate source for distro")
            found_source = False
            (source_head, source_tail) = os.path.split(distro.kernel)
            while source_tail != '':
                if source_head == os.path.join(self.api.settings().webdir,
                                               "distro_mirror"):
                    filesource = os.path.join(source_head, source_tail)
                    found_source = True
                    self.logger.debug("found source in %s" % filesource)
                    break
                (source_head, source_tail) = os.path.split(source_head)
            # Can't find the source, raise an error
            if not found_source:
                utils.die(
                    self.logger,
                    "Error, no installation source found. When building a standalone ISO, you must specify a --source if the distro install tree is not hosted locally"
                )

        self.logger.info("copying kernels and initrds for standalone distro")
        self.copy_boot_files(distro, isolinuxdir, None)

        self.logger.info("generating an isolinux.cfg")
        isolinuxcfg = os.path.join(isolinuxdir, "isolinux.cfg")
        cfg = open(isolinuxcfg, "w+")
        cfg.write(self.iso_template)

        if airgapped:
            repo_names_to_copy = {}

        for descendant in descendants:
            # if a list of profiles was given, skip any others and their systems
            if (profiles and ((descendant.COLLECTION_TYPE == 'profile'
                               and descendant.name not in profiles) or
                              (descendant.COLLECTION_TYPE == 'system'
                               and descendant.profile not in profiles))):
                continue

            menu_indent = 0
            if descendant.COLLECTION_TYPE == 'system':
                menu_indent = 4

            data = utils.blender(self.api, False, descendant)

            cfg.write("\n")
            cfg.write("LABEL %s\n" % descendant.name)
            if menu_indent:
                cfg.write("  MENU INDENT %d\n" % menu_indent)
            cfg.write("  MENU LABEL %s\n" % descendant.name)
            cfg.write("  kernel %s\n" % os.path.basename(distro.kernel))

            append_line = "  append initrd=%s" % os.path.basename(
                distro.initrd)
            if distro.breed == "redhat":
                append_line += " ks=cdrom:/isolinux/%s.cfg" % descendant.name
            if distro.breed == "suse":
                append_line += " autoyast=file:///isolinux/%s.cfg install=cdrom:///" % descendant.name
                if "install" in data["kernel_options"]:
                    del data["kernel_options"]["install"]
            if distro.breed in ["ubuntu", "debian"]:
                append_line += " auto-install/enable=true preseed/file=/cdrom/isolinux/%s.cfg" % descendant.name

            # add remaining kernel_options to append_line
            append_line += self.add_remaining_kopts(data["kernel_options"])
            cfg.write(append_line)

            if descendant.COLLECTION_TYPE == 'profile':
                autoinstall_data = self.api.autoinstallgen.generate_autoinstall_for_profile(
                    descendant.name)
            elif descendant.COLLECTION_TYPE == 'system':
                autoinstall_data = self.api.autoinstallgen.generate_autoinstall_for_system(
                    descendant.name)

            if distro.breed == "redhat":
                cdregex = re.compile("^\s*url .*\n",
                                     re.IGNORECASE | re.MULTILINE)
                autoinstall_data = cdregex.sub("cdrom\n",
                                               autoinstall_data,
                                               count=1)

            if airgapped:
                descendant_repos = data['repos']
                for repo_name in descendant_repos:
                    repo_obj = self.api.find_repo(repo_name)
                    error_fmt = (descendant.COLLECTION_TYPE + " " +
                                 descendant.name + " refers to repo " +
                                 repo_name +
                                 ", which %%s; cannot build airgapped ISO")

                    if repo_obj is None:
                        utils.die(self.logger, error_fmt % "does not exist")
                    if not repo_obj.mirror_locally:
                        utils.die(
                            self.logger, error_fmt %
                            "is not configured for local mirroring")
                    # FIXME: don't hardcode
                    mirrordir = os.path.join(self.settings.webdir,
                                             "repo_mirror", repo_obj.name)
                    if not os.path.exists(mirrordir):
                        utils.die(
                            self.logger,
                            error_fmt % "has a missing local mirror directory")

                    repo_names_to_copy[repo_obj.name] = mirrordir

                    # update the baseurl in autoinstall_data to use the cdrom copy of this repo
                    reporegex = re.compile(
                        "^(\s*repo --name=" + repo_obj.name + " --baseurl=).*",
                        re.MULTILINE)
                    autoinstall_data = reporegex.sub(
                        r"\1" + "file:///mnt/source/repo_mirror/" +
                        repo_obj.name, autoinstall_data)

                # rewrite any split-tree repos, such as in redhat, to use cdrom
                srcreporegex = re.compile(
                    "^(\s*repo --name=\S+ --baseurl=).*/cobbler/ks_mirror/" +
                    distro.name + "/?(.*)", re.MULTILINE)
                autoinstall_data = srcreporegex.sub(
                    r"\1" + "file:///mnt/source" + r"\2", autoinstall_data)

            autoinstall_name = os.path.join(isolinuxdir,
                                            "%s.cfg" % descendant.name)
            autoinstall_file = open(autoinstall_name, "w+")
            autoinstall_file.write(autoinstall_data)
            autoinstall_file.close()

        self.logger.info("done writing config")
        cfg.write("\n")
        cfg.write("MENU END\n")
        cfg.close()

        if airgapped:
            # copy any repos found in profiles or systems to the iso build
            repodir = os.path.abspath(
                os.path.join(isolinuxdir, "..", "repo_mirror"))
            if not os.path.exists(repodir):
                os.makedirs(repodir)

            for repo_name in repo_names_to_copy:
                src = repo_names_to_copy[repo_name]
                dst = os.path.join(repodir, repo_name)
                self.logger.info(" - copying repo '" + repo_name +
                                 "' for airgapped ISO")

                ok = utils.rsync_files(
                    src,
                    dst,
                    "--exclude=TRANS.TBL --exclude=cache/ --no-g",
                    logger=self.logger,
                    quiet=True)
                if not ok:
                    utils.die(self.logger,
                              "rsync of repo '" + repo_name + "' failed")

        # copy distro files last, since they take the most time
        cmd = "rsync -rlptgu --exclude=boot.cat --exclude=TRANS.TBL --exclude=isolinux/ %s/ %s/../" % (
            filesource, isolinuxdir)
        self.logger.info("- copying distro %s files (%s)" % (distname, cmd))
        rc = utils.subprocess_call(self.logger, cmd, shell=True)
        if rc:
            utils.die(self.logger, "rsync of distro files failed")
Exemplo n.º 44
0
    def run(self,
            cobbler_master=None,
            distro_patterns=None,
            profile_patterns=None,
            system_patterns=None,
            repo_patterns=None,
            image_patterns=None,
            mgmtclass_patterns=None,
            package_patterns=None,
            file_patterns=None,
            prune=False,
            omit_data=False,
            sync_all=False,
            use_ssl=False):
        """
        Get remote profiles and distros and sync them locally
        """

        self.distro_patterns = distro_patterns.split()
        self.profile_patterns = profile_patterns.split()
        self.system_patterns = system_patterns.split()
        self.repo_patterns = repo_patterns.split()
        self.image_patterns = image_patterns.split()
        self.mgmtclass_patterns = mgmtclass_patterns.split()
        self.package_patterns = package_patterns.split()
        self.file_patterns = file_patterns.split()
        self.omit_data = omit_data
        self.prune = prune
        self.sync_all = sync_all
        self.use_ssl = use_ssl

        if self.use_ssl:
            protocol = 'https'
        else:
            protocol = 'http'

        if cobbler_master is not None:
            self.master = cobbler_master
        elif len(self.settings.cobbler_master) > 0:
            self.master = self.settings.cobbler_master
        else:
            utils.die(self.logger,
                      'No cobbler master specified, try --master.')

        self.uri = '%s://%s/cobbler_api' % (protocol, self.master)

        self.logger.info("cobbler_master      = %s" % cobbler_master)
        self.logger.info("distro_patterns     = %s" % self.distro_patterns)
        self.logger.info("profile_patterns    = %s" % self.profile_patterns)
        self.logger.info("system_patterns     = %s" % self.system_patterns)
        self.logger.info("repo_patterns       = %s" % self.repo_patterns)
        self.logger.info("image_patterns      = %s" % self.image_patterns)
        self.logger.info("mgmtclass_patterns  = %s" % self.mgmtclass_patterns)
        self.logger.info("package_patterns    = %s" % self.package_patterns)
        self.logger.info("file_patterns       = %s" % self.file_patterns)
        self.logger.info("omit_data           = %s" % self.omit_data)
        self.logger.info("sync_all            = %s" % self.sync_all)
        self.logger.info("use_ssl             = %s" % self.use_ssl)

        self.logger.info("XMLRPC endpoint: %s" % self.uri)
        self.logger.debug("test ALPHA")
        self.remote = xmlrpclib.Server(self.uri)
        self.logger.debug("test BETA")
        self.remote.ping()
        self.local = xmlrpclib.Server("http://127.0.0.1/cobbler_api")
        self.local.ping()

        self.replicate_data()
        self.link_distros()
        self.logger.info("Syncing")
        self.api.sync(logger=self.logger)
        self.logger.info("Done")
Exemplo n.º 45
0
    def yum_sync(self, repo):
        """
        Handle copying of http:// and ftp:// yum repos.
        """

        # create the config file the hosts will use to access the repository.
        repo_mirror = repo.mirror.strip()
        dest_path = os.path.join(self.settings.webdir + "/repo_mirror",
                                 repo.name.strip())
        self.create_local_file(dest_path, repo)

        if not repo.mirror_locally:
            return

        cmd = self.reposync_cmd()  # command to run
        has_rpm_list = False  # flag indicating not to pull the whole repo

        # detect cases that require special handling

        if repo.rpm_list != "" and repo.rpm_list != []:
            has_rpm_list = True

        # create yum config file for use by reposync
        temp_path = os.path.join(dest_path, ".origin")

        if not os.path.isdir(temp_path):
            # FIXME: there's a chance this might break the RHN D/L case
            os.makedirs(temp_path)

        temp_file = self.create_local_file(temp_path, repo, output=False)

        if not has_rpm_list:
            # if we have not requested only certain RPMs, use reposync
            cmd = "%s %s --config=%s --repoid=%s --download_path=%s" % (
                cmd, self.rflags, temp_file, pipes.quote(repo.name),
                pipes.quote(self.settings.webdir + "/repo_mirror"))
            if repo.arch != "":
                if repo.arch == "x86":
                    repo.arch = "i386"  # FIX potential arch errors
                if repo.arch == "i386":
                    # counter-intuitive, but we want the newish kernels too
                    cmd = "%s -a i686" % (cmd)
                else:
                    cmd = "%s -a %s" % (cmd, repo.arch)

        else:

            # create the output directory if it doesn't exist
            if not os.path.exists(dest_path):
                os.makedirs(dest_path)

            use_source = ""
            if repo.arch == "src":
                use_source = "--source"

            # older yumdownloader sometimes explodes on --resolvedeps
            # if this happens to you, upgrade yum & yum-utils
            extra_flags = self.settings.yumdownloader_flags
            cmd = ""
            if os.path.exists("/usr/bin/dnf"):
                cmd = "/usr/bin/dnf download"
            else:
                cmd = "/usr/bin/yumdownloader"
            cmd = "%s %s %s --disablerepo=* --enablerepo=%s -c %s --destdir=%s %s" % (
                cmd, extra_flags, use_source, pipes.quote(repo.name),
                temp_file, pipes.quote(dest_path), " ".join(repo.rpm_list))

        # now regardless of whether we're doing yumdownloader or reposync
        # or whether the repo was http://, ftp://, or rhn://, execute all queued
        # commands here.  Any failure at any point stops the operation.

        rc = utils.subprocess_call(self.logger, cmd)
        if rc != 0:
            utils.die(self.logger, "cobbler reposync failed")

        repodata_path = os.path.join(dest_path, "repodata")

        # grab repomd.xml and use it to download any metadata we can use
        proxies = None
        if repo.proxy == '<<inherit>>':
            proxies = {'http': self.settings.proxy_url_ext}
        elif repo.proxy != '<<None>>' and repo.proxy != '':
            proxies = {'http': repo.proxy, 'https': repo.proxy}
        src = repo_mirror + "/repodata/repomd.xml"
        dst = temp_path + "/repomd.xml"
        urlgrab_ssl_opts = self.gen_urlgrab_ssl_opts(repo.yumopts)
        try:
            urlgrabber.grabber.urlgrab(src,
                                       filename=dst,
                                       proxies=proxies,
                                       **urlgrab_ssl_opts)
        except Exception as e:
            utils.die(self.logger, "failed to fetch " + src + " " + e.args)

        # create our repodata directory now, as any extra metadata we're
        # about to download probably lives there
        if not os.path.isdir(repodata_path):
            os.makedirs(repodata_path)
        rmd = yum.repoMDObject.RepoMD('', "%s/repomd.xml" % (temp_path))
        for mdtype in rmd.repoData.keys():
            # don't download metadata files that are created by default
            if mdtype not in [
                    "primary", "primary_db", "filelists", "filelists_db",
                    "other", "other_db"
            ]:
                mdfile = rmd.getData(mdtype).location[1]
                src = repo_mirror + "/" + mdfile
                dst = dest_path + "/" + mdfile
                try:
                    urlgrabber.grabber.urlgrab(src,
                                               filename=dst,
                                               proxies=proxies,
                                               **urlgrab_ssl_opts)
                except Exception as e:
                    utils.die(self.logger,
                              "failed to fetch " + src + " " + e.args)

        # now run createrepo to rebuild the index
        if repo.mirror_locally:
            os.path.walk(dest_path, self.createrepo_walker, repo)
Exemplo n.º 46
0
from PyQt5.QtCore import QLocale
from PyQt5.QtCore import QTranslator
from PyQt5.QtWidgets import QApplication

from utils import excepthook, Options, die
from views.main_window import MainWindow
from views.login_form import LoginForm
import resources

if __name__ == '__main__':
    # setting exception hook for pycharm
    sys.excepthook = excepthook

    app = QApplication(sys.argv)
    QLocale().setDefault(QLocale(QLocale.Russian, QLocale.RussianFederation))
    qt_translator = QTranslator()
    qt_translator.load(':/qtbase_ru.qm')
    app.installTranslator(qt_translator)

    login_form = LoginForm()
    if login_form.exec() != LoginForm.Accepted:
        die()

    window = MainWindow()
    window.show()
    app.exec()

    Options.dump()
    die()
Exemplo n.º 47
0
    def generate_netboot_iso(self,
                             imagesdir,
                             isolinuxdir,
                             profiles=None,
                             systems=None,
                             exclude_dns=None):
        self.logger.info("copying kernels and initrds for profiles")
        # copy all images in included profiles to images dir
        for profile in self.api.profiles():
            use_this = True
            if profiles is not None:
                which_profiles = profiles.split(",")
                if not profile.name in which_profiles:
                    use_this = False

            if use_this:
                dist = profile.get_conceptual_parent()
                if dist.name.lower().find("-xen") != -1:
                    self.logger.info("skipping Xen distro: %s" % dist.name)
                    continue
                distname = self.make_shorter(dist.name)
                # tempdir/isolinux/$distro/vmlinuz, initrd.img
                # FIXME: this will likely crash on non-Linux breeds
                f1 = os.path.join(isolinuxdir, "%s.krn" % distname)
                f2 = os.path.join(isolinuxdir, "%s.img" % distname)
                if not os.path.exists(dist.kernel):
                    utils.die(self.logger,
                              "path does not exist: %s" % dist.kernel)
                if not os.path.exists(dist.initrd):
                    utils.die(self.logger,
                              "path does not exist: %s" % dist.initrd)
                shutil.copyfile(dist.kernel, f1)
                shutil.copyfile(dist.initrd, f2)

        if systems is not None:
            self.logger.info("copying kernels and initrds for systems")
            # copy all images in included profiles to images dir
            for system in self.api.systems():
                if system.name in systems:
                    profile = system.get_conceptual_parent()
                    dist = profile.get_conceptual_parent()
                    if dist.name.find("-xen") != -1:
                        continue
                    distname = self.make_shorter(dist.name)
                    # tempdir/isolinux/$distro/vmlinuz, initrd.img
                    # FIXME: this will likely crash on non-Linux breeds
                    shutil.copyfile(
                        dist.kernel,
                        os.path.join(isolinuxdir, "%s.krn" % distname))
                    shutil.copyfile(
                        dist.initrd,
                        os.path.join(isolinuxdir, "%s.img" % distname))

        self.logger.info("generating a isolinux.cfg")
        isolinuxcfg = os.path.join(isolinuxdir, "isolinux.cfg")
        cfg = open(isolinuxcfg, "w+")
        cfg.write(HEADER)  # fixme, use template

        self.logger.info("generating profile list")
        #sort the profiles
        profile_list = [profile for profile in self.profiles]

        def sort_name(a, b):
            return cmp(a.name, b.name)

        profile_list.sort(sort_name)

        for profile in profile_list:
            use_this = True
            if profiles is not None:
                which_profiles = profiles.split(",")
                if not profile.name in which_profiles:
                    use_this = False

            if use_this:
                dist = profile.get_conceptual_parent()
                if dist.name.find("-xen") != -1:
                    continue
                data = utils.blender(self.api, True, profile)
                distname = self.make_shorter(dist.name)

                cfg.write("\n")
                cfg.write("LABEL %s\n" % profile.name)
                cfg.write("  MENU LABEL %s\n" % profile.name)
                cfg.write("  kernel %s.krn\n" % distname)

                if data["kickstart"].startswith("/"):
                    data[
                        "kickstart"] = "http://%s/cblr/svc/op/ks/profile/%s" % (
                            data["server"], profile.name)

                append_line = "  append initrd=%s.img" % distname
                append_line = append_line + " ks=%s " % data["kickstart"]
                append_line = append_line + " %s\n" % data["kernel_options"]

                length = len(append_line)
                if length > 254:
                    self.logger.warning(
                        "append line length is greater than 254 chars: (%s chars)"
                        % length)

                cfg.write(append_line)

        if systems is not None:
            self.logger.info("generating system list")

            cfg.write("\nMENU SEPARATOR\n")

            #sort the systems
            system_list = [system for system in self.systems]

            def sort_name(a, b):
                return cmp(a.name, b.name)

            system_list.sort(sort_name)

            for system in system_list:
                use_this = False
                if systems is not None:
                    which_systems = systems.split(",")
                    if system.name in which_systems:
                        use_this = True

                if use_this:
                    profile = system.get_conceptual_parent()
                    dist = profile.get_conceptual_parent()
                    if dist.name.find("-xen") != -1:
                        continue
                    data = utils.blender(self.api, True, system)
                    distname = self.make_shorter(dist.name)

                    cfg.write("\n")
                    cfg.write("LABEL %s\n" % system.name)
                    cfg.write("  MENU LABEL %s\n" % system.name)
                    cfg.write("  kernel %s.krn\n" % distname)

                    if data["kickstart"].startswith("/"):
                        data[
                            "kickstart"] = "http://%s/cblr/svc/op/ks/system/%s" % (
                                data["server"], system.name)

                    append_line = "  append initrd=%s.img" % distname
                    append_line = append_line + " ks=%s" % data["kickstart"]
                    append_line = append_line + " %s" % data["kernel_options"]

                    # add network info to avoid DHCP only if it is available

                    if data.has_key("bonding_master_eth0"
                                    ) and data["bonding_master_eth0"] != "":
                        primary_interface = data["bonding_master_eth0"]
                    else:
                        primary_interface = "eth0"

                    # check if ksdevice entry exists and use that for network info

                    blended = utils.blender(self.api, False,
                                            system)  # don't collapse

                    if blended["kernel_options"].has_key(
                            "ksdevice"
                    ) and blended["kernel_options"]["ksdevice"] != "":
                        ksdevice = blended["kernel_options"]["ksdevice"]
                        self.logger.info(" - ksdevice %s set for system %s" %
                                         (ksdevice, system.name))

                        if data.has_key("ip_address_" +
                                        ksdevice) and data["ip_address_" +
                                                           ksdevice] != "":
                            primary_interface = ksdevice
                        else:

                            for (obj_iname, obj_interface
                                 ) in data['interfaces'].iteritems():
                                mac = obj_interface["mac_address"].upper()
                                ksdevice_mac = ksdevice.upper()

                                if mac == ksdevice_mac:
                                    primary_interface = obj_iname

                    if data.has_key("ip_address_" + primary_interface
                                    ) and data["ip_address_" +
                                               primary_interface] != "":
                        append_line = append_line + " ip=%s" % data[
                            "ip_address_" + primary_interface]

                    if data.has_key("subnet_" + primary_interface) and data[
                            "subnet_" + primary_interface] != "":
                        append_line = append_line + " netmask=%s" % data[
                            "subnet_" + primary_interface]

                    if data.has_key("gateway") and data["gateway"] != "":
                        append_line = append_line + " gateway=%s" % data[
                            "gateway"]

                    if not exclude_dns and data.has_key(
                            "name_servers") and data["name_servers"]:
                        append_line = append_line + " dns=%s\n" % ",".join(
                            data["name_servers"])

                    length = len(append_line)
                    if length > 254:
                        self.logger.warning(
                            "append line length is greater than 254 chars: (%s chars)"
                            % length)

                    cfg.write(append_line)

        self.logger.info("done writing config")
        cfg.write("\n")
        cfg.write("MENU END\n")
        cfg.close()
Exemplo n.º 48
0
    def run(self,
            path,
            name,
            network_root=None,
            autoinstall_file=None,
            arch=None,
            breed=None,
            os_version=None):
        """
        path: the directory we are scanning for files
        name: the base name of the distro
        network_root: the remote path (nfs/http/ftp) for the distro files
        autoinstall_file: user-specified response file, which will override the default
        arch: user-specified architecture
        breed: user-specified breed
        os_version: user-specified OS version
        """
        self.name = name
        self.network_root = network_root
        self.autoinstall_file = autoinstall_file
        self.arch = arch
        self.breed = breed
        self.os_version = os_version

        self.path = path
        self.rootdir = path
        self.pkgdir = path

        # some fixups for the XMLRPC interface, which does not use "None"
        if self.arch == "":
            self.arch = None

        if self.name == "":
            self.name = None

        if self.autoinstall_file == "":
            self.autoinstall_file = None

        if self.os_version == "":
            self.os_version = None

        if self.network_root == "":
            self.network_root = None

        if self.os_version and not self.breed:
            utils.die(
                self.logger,
                "OS version can only be specified when a specific breed is selected"
            )

        self.signature = self.scan_signatures()
        if not self.signature:
            error_msg = "No signature matched in %s" % path
            self.logger.error(error_msg)
            raise CX(error_msg)

        # now walk the filesystem looking for distributions that match certain patterns
        self.logger.info("Adding distros from path %s:" % self.path)
        distros_added = []
        os.path.walk(self.path, self.distro_adder, distros_added)

        if len(distros_added) == 0:
            self.logger.warning("No distros imported, bailing out")
            return

        # find out if we can auto-create any repository records from the install tree
        if self.network_root is None:
            self.logger.info("associating repos")
            # FIXME: this automagic is not possible (yet) without mirroring
            self.repo_finder(distros_added)
Exemplo n.º 49
0
def check_login(options):
    try:
        from libs.mbrowser import Browser

        proc = Browser()

        resp = proc.open_url(options.url)
        """
			Check URL type. If Website directs to other URL,
			options.url is website's panel
			else: it is login url.
			Example: options.url = site.com/wp-admin/ -> panel
				site directs user to wp-login -> login URL
				options.url = site.com/wp-login.php -> login URL
		"""
        if proc.url() != options.url:
            utils.printf("[*] Website moves to: ['%s']" % (proc.url()), "norm")
            options.panel_url, options.login_url = options.url, proc.url()
        else:
            options.login_url = options.url

        # utils.printf("[*] Connect success!", "good")
        options.attack_mode = "--loginbrute"
        if options.run_options["--verbose"]:
            utils.printf("[*] %s" % (proc.get_title()), "norm")
        # utils.printf("[+] Analyzing login form....")
        if resp.status_code == 401:
            if "WWW-Authenticate" in resp.headers:
                loginID = checkHTTPGetLogin(resp.headers)
                loginInfo = (loginID, ["Password", "User Name"])
                if options.verbose:
                    utils.printf("[+] Using HTTP GET Authentication mode",
                                 "norm")
                options.attack_mode = "--httpget"
            else:
                loginInfo = False
        else:
            loginInfo = parseLoginForm(proc.forms())
            # if not loginInfo:
            # 	from libs.sbrowser import sBrowser
            # 	jscheck = sBrowser()
            # 	jscheck.open_url(options.url)
            # 	loginInfo = parseLoginForm(jscheck.forms())
            # 	if loginInfo:
            # 		options.tech = "selenium"

        return loginInfo

    except Exception as error:
        loginInfo = False
        utils.die("[x] Target check:", error)

    except KeyboardInterrupt:
        loginInfo = False

    finally:
        try:
            proc.close()
        except:
            pass
        try:
            jscheck.close()
        except:
            pass
        return loginInfo
Exemplo n.º 50
0
    def add_entry(self, dirname, kernel, initrd):
        """
        When we find a directory with a valid kernel/initrd in it, create the distribution objects
        as appropriate and save them.  This includes creating xen and rescue distros/profiles
        if possible.
        """

        proposed_name = self.get_proposed_name(dirname, kernel)
        proposed_arch = self.get_proposed_arch(dirname)

        if self.arch and proposed_arch and self.arch != proposed_arch:
            utils.die(
                self.logger,
                "Arch from pathname (%s) does not match with supplied one %s" %
                (proposed_arch, self.arch))

        archs = self.learn_arch_from_tree()
        if not archs:
            if self.arch:
                archs.append(self.arch)
        else:
            if self.arch and self.arch not in archs:
                utils.die(
                    self.logger,
                    "Given arch (%s) not found on imported tree %s" %
                    (self.arch, self.get_pkgdir()))
        if proposed_arch:
            if archs and proposed_arch not in archs:
                self.logger.warning(
                    "arch from pathname (%s) not found on imported tree %s" %
                    (proposed_arch, self.get_pkgdir()))
                return

            archs = [proposed_arch]

        if len(archs) > 1:
            self.logger.warning("- Warning : Multiple archs found : %s" %
                                (archs))

        distros_added = []

        for pxe_arch in archs:
            name = proposed_name + "-" + pxe_arch
            existing_distro = self.distros.find(name=name)

            if existing_distro is not None:
                self.logger.warning(
                    "skipping import, as distro name already exists: %s" %
                    name)
                continue

            else:
                self.logger.info("creating new distro: %s" % name)
                distro = self.config.new_distro()

            if name.find("-autoboot") != -1:
                # this is an artifact of some EL-3 imports
                continue

            distro.set_name(name)
            distro.set_kernel(kernel)
            distro.set_initrd(initrd)
            distro.set_arch(pxe_arch)
            distro.set_breed(self.breed)
            # If a version was supplied on command line, we set it now
            if self.os_version:
                distro.set_os_version(self.os_version)

            self.distros.add(distro, save=True)
            distros_added.append(distro)

            existing_profile = self.profiles.find(name=name)

            # see if the profile name is already used, if so, skip it and
            # do not modify the existing profile

            if existing_profile is None:
                self.logger.info("creating new profile: %s" % name)
                #FIXME: The created profile holds a default kickstart, and should be breed specific
                profile = self.config.new_profile()
            else:
                self.logger.info(
                    "skipping existing profile, name already exists: %s" %
                    name)
                continue

            # save our minimal profile which just points to the distribution and a good
            # default answer file

            profile.set_name(name)
            profile.set_distro(name)
            profile.set_kickstart(self.kickstart_file)

            # We just set the virt type to vmware for these
            # since newer VMwares support running ESX as a guest for testing

            profile.set_virt_type("vmware")

            # save our new profile to the collection

            self.profiles.add(profile, save=True)

        return distros_added
Exemplo n.º 51
0
        action="store",
        dest="map_name",
        default="logistic",
        choices=["logistic", "cubic", "sine"],
        help="select the desired map (logistic, cubic, or sine)")

    return parser.parse_args()


def main():
    args = parse_args()
    mapobj = Map(args.map_name)

    # range to vector: "1:4" --> [1., 4.]
    r2v = (lambda a, minval, maxval: [float(i) for i in a.split(':')]
           if a else [minval, maxval])

    # Plot the entire diagram by default
    Bifurcation(r2v(args.r, mapobj.map_rmin, mapobj.map_rmax),
                r2v(args.y, mapobj.map_ymin, mapobj.map_ymax), args.n, args.s,
                args.map_name).plot()


if __name__ == '__main__':
    try:
        main()
    except KeyboardInterrupt:
        die(3, 'Exiting on user request')

    sys.exit()
Exemplo n.º 52
0
    def generate_standalone_iso(self, imagesdir, isolinuxdir, distname,
                                filesource):

        # Get the distro object for the requested distro
        # and then get all of its descendants (profiles/sub-profiles/systems)
        distro = self.api.find_distro(distname)
        if distro is None:
            utils.die(self.logger,
                      "distro %s was not found, aborting" % distname)
        descendants = distro.get_descendants()

        if filesource is None:
            # Try to determine the source from the distro kernel path
            self.logger.debug("trying to locate source for distro")
            found_source = False
            (source_head, source_tail) = os.path.split(distro.kernel)
            while source_tail != '':
                if source_head == os.path.join(self.api.settings().webdir,
                                               "ks_mirror"):
                    filesource = os.path.join(source_head, source_tail)
                    found_source = True
                    self.logger.debug("found source in %s" % filesource)
                    break
                (source_head, source_tail) = os.path.split(source_head)
            # Can't find the source, raise an error
            if not found_source:
                utils.die(
                    self.logger,
                    " Error, no installation source found. When building a standalone ISO, you must specify a --source if the distro install tree is not hosted locally"
                )

        self.logger.info("copying kernels and initrds for standalone distro")
        # tempdir/isolinux/$distro/vmlinuz, initrd.img
        # FIXME: this will likely crash on non-Linux breeds
        f1 = os.path.join(isolinuxdir, "vmlinuz")
        f2 = os.path.join(isolinuxdir, "initrd.img")
        if not os.path.exists(distro.kernel):
            utils.die(self.logger, "path does not exist: %s" % distro.kernel)
        if not os.path.exists(distro.initrd):
            utils.die(self.logger, "path does not exist: %s" % distro.initrd)
        shutil.copyfile(distro.kernel, f1)
        shutil.copyfile(distro.initrd, f2)

        cmd = "rsync -rlptgu --exclude=boot.cat --exclude=TRANS.TBL --exclude=isolinux/ %s/ %s/../" % (
            filesource, isolinuxdir)
        self.logger.info("- copying distro %s files (%s)" % (distname, cmd))
        rc = utils.subprocess_call(self.logger, cmd, shell=True)
        if rc:
            utils.die(self.logger, "rsync of files failed")

        self.logger.info("generating a isolinux.cfg")
        isolinuxcfg = os.path.join(isolinuxdir, "isolinux.cfg")
        cfg = open(isolinuxcfg, "w+")
        cfg.write(HEADER)  # fixme, use template

        for descendant in descendants:
            data = utils.blender(self.api, True, descendant)

            cfg.write("\n")
            cfg.write("LABEL %s\n" % descendant.name)
            cfg.write("  MENU LABEL %s\n" % descendant.name)
            cfg.write("  kernel vmlinuz\n")

            data["kickstart"] = "cdrom:/isolinux/ks-%s.cfg" % descendant.name

            append_line = "  append initrd=initrd.img"
            append_line = append_line + " ks=%s " % data["kickstart"]
            append_line = append_line + " %s\n" % data["kernel_options"]

            cfg.write(append_line)

            if descendant.COLLECTION_TYPE == 'profile':
                kickstart_data = self.api.kickgen.generate_kickstart_for_profile(
                    descendant.name)
            elif descendant.COLLECTION_TYPE == 'system':
                kickstart_data = self.api.kickgen.generate_kickstart_for_system(
                    descendant.name)

            cdregex = re.compile("url .*\n", re.IGNORECASE)
            kickstart_data = cdregex.sub("cdrom\n", kickstart_data)

            ks_name = os.path.join(isolinuxdir, "ks-%s.cfg" % descendant.name)
            ks_file = open(ks_name, "w+")
            ks_file.write(kickstart_data)
            ks_file.close()

        self.logger.info("done writing config")
        cfg.write("\n")
        cfg.write("MENU END\n")
        cfg.close()

        return
Exemplo n.º 53
0
    def run(self,pkgdir,name,path,network_root=None,kickstart_file=None,rsync_flags=None,arch=None,breed=None,os_version=None):
        self.pkgdir = pkgdir
        self.name = name
        self.network_root = network_root
        self.kickstart_file = kickstart_file
        self.rsync_flags = rsync_flags
        self.arch = arch
        self.breed = breed
        self.os_version = os_version

        self.path = path
        self.rootdir = path

        # some fixups for the XMLRPC interface, which does not use "None"
        if self.arch == "":           self.arch           = None
        if self.name == "":           self.name           = None
        if self.kickstart_file == "": self.kickstart_file = None
        if self.os_version == "":     self.os_version     = None
        if self.rsync_flags == "":    self.rsync_flags    = None
        if self.network_root == "":   self.network_root   = None

        # If no breed was specified on the command line, set it to "redhat" for this module
        if self.breed == None:
            self.breed = "redhat"

        # import takes a --kickstart for forcing selection that can't be used in all circumstances

        if self.kickstart_file and not self.breed:
            utils.die(self.logger,"Kickstart file can only be specified when a specific breed is selected")

        if self.os_version and not self.breed:
            utils.die(self.logger,"OS version can only be specified when a specific breed is selected")

        if self.breed and self.breed.lower() not in self.get_valid_breeds():
            utils.die(self.logger,"Supplied import breed is not supported by this module")

        # if --arch is supplied, make sure the user is not importing a path with a different
        # arch, which would just be silly.

        if self.arch:
            # validate it first
            if self.arch not in self.get_valid_arches():
                utils.die(self.logger,"arch must be one of: %s" % string.join(self.get_valid_arches(),", "))

        # now walk the filesystem looking for distributions that match certain patterns

        self.logger.info("adding distros")
        distros_added = []
        # FIXME : search below self.path for isolinux configurations or known directories from TRY_LIST
        os.path.walk(self.path, self.distro_adder, distros_added)

        # find out if we can auto-create any repository records from the install tree

        if self.network_root is None:
            self.logger.info("associating repos")
            # FIXME: this automagic is not possible (yet) without mirroring
            self.repo_finder(distros_added)

        # find the most appropriate answer files for each profile object

        self.logger.info("associating kickstarts")
        self.kickstart_finder(distros_added)

        # ensure bootloaders are present
        self.api.pxegen.copy_bootloaders()

        return True
Exemplo n.º 54
0
    def rhn_sync(self, repo):
        """
        Handle mirroring of RHN repos.
        """

        cmd = self.reposync_cmd()  # reposync command

        has_rpm_list = False  # flag indicating not to pull the whole repo

        # detect cases that require special handling

        if repo.rpm_list != "" and repo.rpm_list != []:
            has_rpm_list = True

        # create yum config file for use by reposync
        # FIXME: don't hardcode
        dest_path = os.path.join(self.settings.webdir + "/repo_mirror",
                                 repo.name)
        temp_path = os.path.join(dest_path, ".origin")

        if not os.path.isdir(temp_path):
            # FIXME: there's a chance this might break the RHN D/L case
            os.makedirs(temp_path)

        # how we invoke reposync depends on whether this is RHN content or not.

        # this is the somewhat more-complex RHN case.
        # NOTE: this requires that you have entitlements for the server and you give the mirror as rhn://$channelname
        if not repo.mirror_locally:
            utils.die(self.logger,
                      "rhn:// repos do not work with --mirror-locally=1")

        if has_rpm_list:
            self.logger.warning(
                "warning: --rpm-list is not supported for RHN content")
        rest = repo.mirror[6:]  # everything after rhn://
        cmd = "%s %s --repo=%s --download_path=%s" % (
            cmd, self.rflags, pipes.quote(rest),
            pipes.quote(self.settings.webdir + "/repo_mirror"))
        if repo.name != rest:
            args = {"name": repo.name, "rest": rest}
            utils.die(
                self.logger,
                "ERROR: repository %(name)s needs to be renamed %(rest)s as the name of the cobbler repository must match the name of the RHN channel"
                % args)

        if repo.arch == "i386":
            # counter-intuitive, but we want the newish kernels too
            repo.arch = "i686"

        if repo.arch != "":
            cmd = "%s -a %s" % (cmd, repo.arch)

        # now regardless of whether we're doing yumdownloader or reposync
        # or whether the repo was http://, ftp://, or rhn://, execute all queued
        # commands here.  Any failure at any point stops the operation.

        if repo.mirror_locally:
            utils.subprocess_call(self.logger, cmd)

        # some more special case handling for RHN.
        # create the config file now, because the directory didn't exist earlier

        self.create_local_file(temp_path, repo, output=False)

        # now run createrepo to rebuild the index

        if repo.mirror_locally:
            os.path.walk(dest_path, self.createrepo_walker, repo)

        # create the config file the hosts will use to access the repository.

        self.create_local_file(dest_path, repo)
Exemplo n.º 55
0
def parsedsf(folder, secondary, missing, nobackup, names, f, parent):
    try:
        h = file(join(folder, f), 'rb')
        if h.read(8) != 'XPLNEDSF' or unpack(
                '<I', h.read(4)) != (1, ) or h.read(4) != 'DAEH':
            raise IOError
        (l, ) = unpack('<I', h.read(4))
        headend = h.tell() + l - 8
        if h.read(4) != 'PORP':
            raise IOError
        h.seek(headend)

        # Definitions Atom
        if h.read(4) != 'NFED':
            raise IOError
        (l, ) = unpack('<I', h.read(4))
        defnend = h.tell() + l - 8
        while h.tell() < defnend:
            c = h.read(4)
            (l, ) = unpack('<I', h.read(4))
            if l == 8:
                pass  # empty
            elif c in ['TRET', 'TJBO', 'YLOP', 'WTEN']:
                objs = h.read(l - 9).split('\0')
                for o in objs:
                    obj = unicodeify(o.replace(':', '/').replace('\\', '/'))
                    if c == 'TJBO':
                        seq = ['', 'custom objects']  # v7 style for objs only
                    else:
                        seq = ['']
                    for d in seq:
                        obj2 = casepath(folder, join(d, obj))
                        if not obj2.startswith('..') and exists(
                                join(folder, obj2)):
                            if obj2 not in secondary:
                                parseobj(folder, secondary, missing, nobackup,
                                         names, obj2, f)
                            elif f not in secondary[obj2]:
                                secondary[obj2].append(f)
                            break
                    else:
                        if obj in names:  # terrain_Water or library obj
                            if names[obj]:
                                if obj not in nobackup:
                                    nobackup[obj] = [f]
                                elif f not in nobackup[obj]:
                                    nobackup[obj].append(f)
                        elif obj not in missing:
                            missing[obj] = [f]
                        elif f not in missing[obj]:
                            missing[obj].append(f)

                h.read(1)
            else:
                h.seek(l - 8, 1)

        h.close()
    except:
        if __debug__:
            print f
            print_exc()
        die("Can't read %s" % f)
Exemplo n.º 56
0
    def yum_sync(self, repo):

        """
        Handle copying of http:// and ftp:// yum repos.
        """

        repo_mirror = repo.mirror

        # warn about not having yum-utils.  We don't want to require it in the package because
        # RHEL4 and RHEL5U0 don't have it.

        if not os.path.exists("/usr/bin/reposync"):
            utils.die(self.logger, "no /usr/bin/reposync found, please install yum-utils")

        cmd = ""                  # command to run
        has_rpm_list = False      # flag indicating not to pull the whole repo

        # detect cases that require special handling

        if repo.rpm_list != "" and repo.rpm_list != []:
            has_rpm_list = True

        # create yum config file for use by reposync
        dest_path = os.path.join(self.settings.webdir + "/repo_mirror", repo.name)
        temp_path = os.path.join(dest_path, ".origin")

        if not os.path.isdir(temp_path) and repo.mirror_locally:
            # FIXME: there's a chance this might break the RHN D/L case
            os.makedirs(temp_path)

        # create the config file that yum will use for the copying

        if repo.mirror_locally:
            temp_file = self.create_local_file(temp_path, repo, output=False)

        if not has_rpm_list and repo.mirror_locally:
            # if we have not requested only certain RPMs, use reposync
            cmd = "/usr/bin/reposync %s --config=%s --repoid=%s --download_path=%s" % (self.rflags, temp_file, repo.name, self.settings.webdir + "/repo_mirror")
            if repo.arch != "":
                if repo.arch == "x86":
                    repo.arch = "i386"      # FIX potential arch errors
                if repo.arch == "i386":
                    # counter-intuitive, but we want the newish kernels too
                    cmd = "%s -a i686" % (cmd)
                else:
                    cmd = "%s -a %s" % (cmd, repo.arch)

        elif repo.mirror_locally:

            # create the output directory if it doesn't exist
            if not os.path.exists(dest_path):
                os.makedirs(dest_path)

            use_source = ""
            if repo.arch == "src":
                use_source = "--source"

            # older yumdownloader sometimes explodes on --resolvedeps
            # if this happens to you, upgrade yum & yum-utils
            extra_flags = self.settings.yumdownloader_flags
            cmd = "/usr/bin/yumdownloader %s %s --disablerepo=* --enablerepo=%s -c %s --destdir=%s %s" % (extra_flags, use_source, repo.name, temp_file, dest_path, " ".join(repo.rpm_list))

        # now regardless of whether we're doing yumdownloader or reposync
        # or whether the repo was http://, ftp://, or rhn://, execute all queued
        # commands here.  Any failure at any point stops the operation.

        if repo.mirror_locally:
            rc = utils.subprocess_call(self.logger, cmd)
            if rc != 0:
                utils.die(self.logger, "cobbler reposync failed")

        repodata_path = os.path.join(dest_path, "repodata")

        if not os.path.exists("/usr/bin/wget"):
            utils.die(self.logger, "no /usr/bin/wget found, please install wget")

        # grab repomd.xml and use it to download any metadata we can use
        cmd2 = "/usr/bin/wget -q %s/repodata/repomd.xml -O %s/repomd.xml" % (repo_mirror, temp_path)
        rc = utils.subprocess_call(self.logger, cmd2)
        if rc == 0:
            # create our repodata directory now, as any extra metadata we're
            # about to download probably lives there
            if not os.path.isdir(repodata_path):
                os.makedirs(repodata_path)
            rmd = yum.repoMDObject.RepoMD('', "%s/repomd.xml" % (temp_path))
            for mdtype in rmd.repoData.keys():
                # don't download metadata files that are created by default
                if mdtype not in ["primary", "primary_db", "filelists", "filelists_db", "other", "other_db"]:
                    mdfile = rmd.getData(mdtype).location[1]
                    cmd3 = "/usr/bin/wget -q %s/%s -O %s/%s" % (repo_mirror, mdfile, dest_path, mdfile)
                    utils.subprocess_call(self.logger, cmd3)
                    if rc != 0:
                        utils.die(self.logger, "wget failed")

        # now run createrepo to rebuild the index

        if repo.mirror_locally:
            os.path.walk(dest_path, self.createrepo_walker, repo)

        # create the config file the hosts will use to access the repository.

        self.create_local_file(dest_path, repo)
Exemplo n.º 57
0
    if verbose:
        wl_log.setLevel(logging.DEBUG)
    else:
        wl_log.setLevel(logging.INFO)

    # Validate the given arguments
    # Read urls
    url_list = np.loadtxt(url_list_path, delimiter='\n', dtype=str)
    url_list = url_list.tolist()
    url_list = url_list[start_line - 1:stop_line]
    torrc_dict = cm.TORRC_DEFAULT

    if not tbb_version:
        tbb_version = cm.TBB_DEFAULT_VERSION
    elif tbb_version not in cm.TBB_KNOWN_VERSIONS:
        ut.die('Version of Tor browser is not recognized.')

    crawler = Crawler(torrc_dict, url_list, tbb_version, xvfb, capture_screen)
    wl_log.info('Command line parameters: %s' % sys.argv)

    # Run the crawl
    try:
        crawler.crawl(no_of_batches,
                      no_of_instances,
                      start_line=start_line - 1)
    except KeyboardInterrupt:
        wl_log.warning('Keyboard interrupt! Quitting...')
    except Exception as e:
        wl_log.error('Exception: \n%s' % (traceback.format_exc()))
    finally:
        crawler.stop_crawl()
Exemplo n.º 58
0
    elif options.options["-U"]:
        options.username = list(set(lread(options.options["-U"])))
    else:
        if options.options["-u"] in options.WORDLISTS:
            if options.options["-u"] == "sqli":
                options.username = tuple(
                    eval("data.%s_user()" % (options.options["-u"])))
            else:
                options.username = tuple(
                    eval("data.%s_user()" % (options.options["-u"])).replace(
                        "\t", "").split("\n"))
        else:
            options.username = tuple(fread(options.options["-u"]).split("\n"))
            options.username = filter(None, options.username)

    # CHECK passlist option
    if options.options["-p"] in options.WORDLISTS:
        options.passwd = tuple(
            eval("data.%s_pass()" % (options.options["-p"])).replace(
                "\t", "").split("\n"))
    else:
        options.passwd = tuple(fread(options.options["-p"]).split("\n"))
        options.passwd = filter(None, options.passwd)

    options.report = options.run_options["--report"]
    options.verbose = options.run_options["--verbose"]


if __name__ == "__main__":
    die("Oops! Wrong place", "Find other place")
Exemplo n.º 59
0
        else:
            check.check_options(options)

            if "--getproxy" in options.extras:
                getproxy.getnew(options)
                if not options.target:
                    utils.printf("[!] No URL provided! or just use tor .-. ",
                                 "good")
                    sys.exit(0)
                else:
                    if not options.run_options["--proxy"]:
                        utils.printf(
                            "[!] WARNING!!! Program runs without proxy! either use tor or use \"--proxy\"!",
                            "bad")
            if not options.target:
                utils.die("[!] URL error", "A URL is required to run!")

            else:

                # To fix some stupid SSL errors.. check out --> https://stackoverflow.com/a/35960702

                try:
                    _create_unverified_https_context = ssl._create_unverified_context

                except AttributeError:
                    # Legacy Python that doesn't do some stupid verification of HTTPS certificates by default >:(
                    pass
                else:

                    # Handle target environments that doesn't f**k your shit up. like HTTPS verification
                    ssl._create_default_https_context = _create_unverified_https_context
Exemplo n.º 60
0
def parseacf(folder, secondary, missing, nobackup, names, f, parent):
    newacf = {}
    try:
        h = file(join(folder, f), 'rb')
        c = h.read(1)
        if c == 'a':
            fmt = '>'
        elif c == 'i':
            fmt = '<'
        elif c in ['I', 'A']:
            # >1000 style?
            h.seek(0)
            try:
                if not h.readline().strip()[0] in ['I', 'A']: raise IOError
                version = int(h.readline().split()[0])
                if version <= 1000 or h.readline().split()[0] != 'ACF':
                    raise IOError
                for line in h:
                    line = line.split('#')[0].strip()
                    if not line: continue
                    line = line.split(None, 2)
                    if line[0] == 'P':
                        newacf[line[1]] = line[2].strip()
            except:
                die("%s isn't a v7, v8, v9 or v10 X-Plane file! " % f)
        else:
            die("%s isn't a v7, v8, v9 or v10 X-Plane file! " % f)

        if newacf:
            aflDIM = int(newacf['_wing/count'])
            Rafl0 = '_afl_file_R0'
            Rafl1 = '_afl_file_R1'
            Tafl0 = '_afl_file_T0'
            Tafl1 = '_afl_file_T1'
            wpnDIM = int(newacf['_wpna/count'])
            objDIM = int(newacf['_obja/count'])
        else:
            (version, ) = unpack(fmt + 'i', h.read(4))
            if version == 8000:
                version = 800  # v8.00 format was labelled 8000
            elif version < 600 or version > 1000:
                die("%s isn't a v7, v8, v9 or v10 X-Plane file! " % f)
            elif version < 740:
                die("%s is in X-Plane %4.2f format! \n\nPlease re-save it using Plane-Maker 7.63. "
                    % (f, version / 100.0))
            elif version not in [
                    740, 810, 815, 830, 840, 860, 900, 901, 902, 920, 941
            ]:
                die("%s is in X-Plane %4.2f format! \n\nI can't read %4.2f format planes. "
                    % (f, version / 100.0, version / 100.0))

            txtLEN = 40
            if version < 800:
                aflDIM = 44
                partSTRIDE = 4
                part = 0x2acd
                aflSTRIDE = 0x28
                Rafl0 = 0x02bf1
                Rafl1 = 0x03759
                Tafl0 = 0x042c1
                Tafl1 = 0x04e29
                wpnDIM = 24
                wpnSTRIDE = 500
                wpn = 0x098c29
            elif version == 800:
                version = 800
                aflDIM = 56
                aflSTRIDE = 0x6cc
                Rafl0 = 0x26ae9
                Rafl1 = 0x26b11
                Tafl0 = 0x26b39
                Tafl1 = 0x26b61
                partSTRIDE = 0x2ee4
                part = 0x3e77d
                wpnDIM = 24
                wpnSTRIDE = 0x48
                wpn = 0x155251
            else:  # version>800
                aflDIM = 56
                aflSTRIDE = 0x8b4
                Rafl0 = 0x26ae9
                Rafl1 = 0x26b11
                Tafl0 = 0x26b39
                Tafl1 = 0x26b61
                partSTRIDE = 0x2ee4
                part = 0x4523d
                wpnDIM = 24
                wpnSTRIDE = 0x48
                wpn = 0x15bd11
                objDIM = 24
                objSTRIDE = 0x48  # 8.40 and later
                obj = 0x15ee31

        for i in range(aflDIM):
            if newacf:
                eq = '_wing/%d/' % i
            else:
                h.seek(part + i * partSTRIDE)
                (eq, ) = unpack(fmt + 'i', h.read(4))
                if not eq:
                    continue  # airfoil names not cleared if doesn't exist
            for b in [Rafl0, Rafl1, Tafl0, Tafl1]:
                if newacf:
                    if not eq + b in newacf: continue
                    name = newacf[eq + b]
                else:
                    h.seek(b + i * aflSTRIDE)
                    name = unicodeify(h.read(txtLEN).split('\0')[0].strip())
                if not name: continue
                thing = casepath(folder, join('airfoils', name))
                if exists(join(folder, thing)):
                    if not thing in secondary:
                        secondary[thing] = [f]
                    elif f not in secondary[thing]:
                        secondary[thing].append(f)
                elif name.lower() in names:
                    pass
                elif thing not in missing:
                    missing[thing] = [f]
                elif f not in missing[thing]:
                    missing[thing].append(f)

        for i in range(wpnDIM):
            if newacf:
                eq = '_wpna/%d/_v10_att_file_stl' % i
                if not eq in newacf: continue
                name = newacf[eq]
            else:
                h.seek(wpn + i * wpnSTRIDE)
                name = unicodeify(h.read(txtLEN).split('\0')[0].strip())
            if not name: continue
            thing = casepath(folder, join('weapons', name))
            if exists(join(folder, thing)):
                if not thing in secondary:
                    secondary[thing] = [f]
                    # XXX weapon airfoils!
                    found = False
                    for ext in textypes:
                        tex2 = casepath(folder, join('weapons',
                                                     name[:-4] + ext))
                        if exists(join(folder, tex2)):
                            found = True
                            secondary[tex2] = [thing]
                    if not found:
                        missing[join('weapons', name[:-4] + '.png')] = [thing]
                elif f not in secondary[thing]:
                    secondary[thing].append(f)
            elif name.lower() in names:
                pass
            elif thing not in missing:
                missing[thing] = [f]
            elif f not in missing[thing]:
                missing[thing].append(f)

        if version < 840: return

        for i in range(objDIM):
            if newacf:
                eq = '_obja/%d/_v10_att_file_stl' % i
                if not eq in newacf: continue
                name = newacf[eq]
            else:
                h.seek(obj + i * objSTRIDE)
                name = unicodeify(h.read(txtLEN).split('\0')[0].strip())
            if not name: continue
            thing = casepath(folder, join('objects', name))
            if exists(join(folder, thing)):
                if not thing in secondary:
                    secondary[thing] = [f]
                    parseobj(folder, secondary, missing, nobackup, {}, thing,
                             f)
                elif f not in secondary[thing]:
                    secondary[thing].append(f)
            elif thing not in missing:
                missing[thing] = [f]
            elif f not in missing[thing]:
                missing[thing].append(f)

    except:
        if __debug__:
            print f
            print_exc()
        die("Can't read %s" % f)