Esempio n. 1
0
def get_metadata_from_archive(kit_archive_path: str) -> dict:
    """
    Extracts and validates kit metadata from a kit archive file.

    :param str kit_archive_path: the path to the kit archive

    :return dict: the validated kit metadata

    """
    cmd = 'tar -xjOf {} \*/kit.json'.format(kit_archive_path)
    p = TortugaSubprocess(cmd)
    p.run()

    try:
        meta_dict: dict = json.loads(p.getStdOut().decode())
        errors = KitMetadataSchema().validate(meta_dict)
        if errors:
            raise TortugaException(
                'Incomplete kit metadata: {}'.format(meta_dict)
            )

    except json.JSONDecodeError:
        raise Exception('Invalid JSON for kit metadata: {}'.format(p.stdout))

    return meta_dict
Esempio n. 2
0
def unpack_archive(kit_archive_path, dest_root_dir):
    """
    :raises InvalidArgument:

    """
    logger = logging.getLogger('tortuga.kit.utils')
    logger.addHandler(logging.NullHandler())

    kit_name, kit_version, kit_iteration = \
        getKitNameVersionIteration(kit_archive_path)

    destdir = os.path.join(
        dest_root_dir, 'kit-{}'.format(
            format_kit_descriptor(kit_name, kit_version, kit_iteration)))

    if not os.path.exists(destdir):
        os.mkdir(destdir)

    logger.debug('[utils.parse()] Unpacking [%s] into [%s]' %
                 (kit_archive_path, destdir))

    cmd = 'tar --extract --bzip2 --strip-components 1 --file %s -C %s' % (
        kit_archive_path, destdir)

    p = TortugaSubprocess(cmd)

    p.run()

    logger.debug('[utils.parse()] Unpacked [%s] into [%s]' %
                 (kit_archive_path, destdir))

    return kit_name, kit_version, kit_iteration
Esempio n. 3
0
    def _login_navops(self, config: TortugaScriptConfig):
        cmd = '{} login'.format(config.navops_cli)
        p = TortugaSubprocess(cmd,
                              stdin=sys.stdin,
                              stdout=sys.stdout,
                              stderr=sys.stderr)
        p.run()

        if p.getExitStatus() != 0:
            sys.exit(1)
Esempio n. 4
0
File: rpm.py Progetto: ilumb/tortuga
    def extract_license_file(self, pkgFile, path, license_fulldir, txtfile):         \
            # pylint: disable=no-self-use
        '''
        Extract it into the license_fulldir, changing all
        slashes to dashes, removing any leading punctuation,
        and adding an extension that makes browsers happy.
        '''

        p = TortugaSubprocess('rpm2cpio %s | cpio -i --to-stdout %s > %s/%s' %
                              (pkgFile, path, license_fulldir, txtfile))

        p.run()
Esempio n. 5
0
File: rpm.py Progetto: ilumb/tortuga
    def get_package_license(self, pkgFile):  # pylint: disable=no-self-use
        '''
        Returns the packages' license (BSD, GPL, etc...)
        '''

        p = TortugaSubprocess(
            'rpm -qp --queryformat "%%{LICENSE}" %s 2>/dev/null' % (pkgFile))

        p.run()

        licensetxt = p.getStdOut()

        return licensetxt
Esempio n. 6
0
def unpack_archive(kit_archive_path: str,
                   dest_root_dir: str) -> Tuple[str, str, str]:
    """
    Unpacks a kit archive into a directory.

    :param str kit_archive_path: the path to the kit archive
    :param str dest_root_dir:    the destination directory in which the
                                 archive will be extracted

    :return Tuple[str, str, str]: the kit (name, version, iteration)

    """
    meta_dict = get_metadata_from_archive(kit_archive_path)

    destdir = os.path.join(
        dest_root_dir,
        'kit-{}'.format(
            format_kit_descriptor(meta_dict['name'],
                                  meta_dict['version'],
                                  meta_dict['iteration'])
        )
    )

    if not os.path.exists(destdir):
        os.mkdir(destdir)

    logger.debug(
        '[utils.parse()] Unpacking [%s] into [%s]' % (
            kit_archive_path, destdir))

    #
    # Extract the file
    #
    cmd = 'tar --extract --bzip2 --strip-components 1 --file {} -C {}'.format(
        kit_archive_path, destdir)
    TortugaSubprocess(cmd).run()

    #
    # Remove world write permissions, if any
    #
    cmd = 'chmod -R a-w {}'.format(destdir)
    TortugaSubprocess(cmd).run()

    logger.debug(
        '[utils.parse()] Unpacked [%s] into [%s]' % (
            kit_archive_path, destdir))

    return meta_dict['name'], meta_dict['version'], meta_dict['iteration']
Esempio n. 7
0
File: rpm.py Progetto: ilumb/tortuga
    def get_rpm_license_files(self, pkgFile):  # pylint: disable=no-self-use
        '''
        Returns a list of license files found in the package
        '''

        p = TortugaSubprocess(
            'rpm2cpio %s | cpio -it | grep -e COPYING -e LICENSE || true' %
            (pkgFile))

        p.run()

        a = p.getStdOut().split("\n")

        while a and a[-1] == '':
            a.pop()  # There's always a blank line at the end

        return a
Esempio n. 8
0
    def __runClusterUpdate(self, opts={}):
        """ Run cluster update. """
        self._logger.debug('Update timer running, opts={}'.format(opts))

        updateCmd = os.path.join(self._cm.getBinDir(), 'run_cluster_update.sh')

        delay = 0
        updateCnt = 0
        while self.__resetIsUpdateScheduled():
            self._isUpdateRunning = True

            self._logger.debug('New cluster update delay: %s seconds' %
                               (delay))

            time.sleep(delay)
            delay += SyncManager.CLUSTER_UPDATE_DELAY_INCREASE

            # Log warning if timer has been running for too many times.
            updateCnt += 1
            self._logger.debug('Cluster update timer count: %s' % (updateCnt))

            if updateCnt > SyncManager.CLUSTER_UPDATE_WARNING_LIMIT:
                self._logger.warning(
                    'Cluster updated more than %s times using the same'
                    ' timer (possible configuration problem)' %
                    (SyncManager.CLUSTER_UPDATE_WARNING_LIMIT))

            self._logger.debug('Starting cluster update using: %s' %
                               (updateCmd))

            # Since we might sleep for a while, we need to
            # reset update flag just before we run update to avoid
            # unnecessary syncs.

            self.__resetIsUpdateScheduled()

            if 'node' in opts:
                node_update = opts['node']
                env = {
                    **os.environ, 'FACTER_node_tags_update':
                    json.dumps(node_update)
                }
                self._logger.debug('FACTER_node_tags_update={}'.format(
                    env['FACTER_node_tags_update']))
                p = TortugaSubprocess(updateCmd, env=env)
            elif 'software_profile' in opts:
                sp_update = opts['software_profile']
                env = {
                    **os.environ, 'FACTER_softwareprofile_tags_update':
                    json.dumps(sp_update)
                }
                self._logger.debug(
                    'FACTER_softwareprofile_tags_update={}'.format(
                        env['FACTER_softwareprofile_tags_update']))
                p = TortugaSubprocess(updateCmd, env=env)
            else:
                p = TortugaSubprocess(updateCmd)

            try:
                p.run()
                self._logger.debug('Cluster update successful')
                self._logger.debug('stdout: {}'.format(
                    p.getStdOut().decode().rstrip()))
                self._logger.debug('stderr: {}'.format(
                    p.getStdErr().decode().rstrip()))
            except CommandFailed:
                if p.getExitStatus() == tortugaStatus.\
                        TORTUGA_ANOTHER_INSTANCE_OWNS_LOCK_ERROR:
                    self._logger.debug(
                        'Another cluster update is already running, will'
                        ' try to reschedule it')

                    self._isUpdateRunning = False

                    self.scheduleClusterUpdate(
                        updateReason='another update already running',
                        delay=60,
                        opts=opts)

                    break
                else:
                    self._logger.error(
                        'Update command "%s" failed (exit status: %s):'
                        ' %s' % (updateCmd, p.getExitStatus(), p.getStdErr()))

            self._logger.debug('Done with cluster update')

        self._isUpdateRunning = False

        self._logger.debug('Update timer exiting')
Esempio n. 9
0
    def __runClusterUpdate(self):
        """ Run cluster update. """
        self.getLogger().debug('Update timer running')

        updateCmd = '%s %s' % (self._sudoCmd,
                               os.path.join(self._cm.getRoot(),
                                            'bin/run_cluster_update.sh'))

        delay = 0
        updateCnt = 0
        while self.__resetIsUpdateScheduled():
            self._isUpdateRunning = True

            self.getLogger().debug('New cluster update delay: %s seconds' %
                                   (delay))

            time.sleep(delay)
            delay += SyncManager.CLUSTER_UPDATE_DELAY_INCREASE

            # Log warning if timer has been running for too many times.
            updateCnt += 1
            self.getLogger().debug('Cluster update timer count: %s' %
                                   (updateCnt))

            if updateCnt > SyncManager.CLUSTER_UPDATE_WARNING_LIMIT:
                self.getLogger().warn(
                    'Cluster updated more than %s times using the same'
                    ' timer (possible configuration problem)' %
                    (SyncManager.CLUSTER_UPDATE_WARNING_LIMIT))

            self.getLogger().debug('Starting cluster update using: %s' %
                                   (updateCmd))

            # Since we might sleep for a while, we need to
            # reset update flag just before we run update to avoid
            # unnecessary syncs.

            self.__resetIsUpdateScheduled()

            p = TortugaSubprocess(updateCmd)

            try:
                p.run()

                self.getLogger().debug('Cluster update successful')
            except CommandFailed:
                if p.getExitStatus() == tortugaStatus.\
                        TORTUGA_ANOTHER_INSTANCE_OWNS_LOCK_ERROR:
                    self.getLogger().debug(
                        'Another cluster update is already running, will'
                        ' try to reschedule it')

                    self._isUpdateRunning = False

                    self.scheduleClusterUpdate(
                        updateReason='another update already running',
                        delay=60)

                    break
                else:
                    self.getLogger().error(
                        'Update command "%s" failed (exit status: %s):'
                        ' %s' % (updateCmd, p.getExitStatus(), p.getStdErr()))

            self.getLogger().debug('Done with cluster update')

        self._isUpdateRunning = False

        self.getLogger().debug('Update timer exiting')