Example #1
0
 def _call_function(self, function, arglist, kwargs={}):
     succeed = 0
     while succeed == 0:
         try:
             ret = function(*arglist, **kwargs)
         except rpclib.InvalidRedirectionError:
             raise
         except xmlrpclib.Fault:
             e = sys.exc_info()[1]
             save_traceback = sys.exc_info()[2]
             try:
                 self._failover()
             except NoMoreServers:
                 f = sys.exc_info()[1]
                 raise_with_tb(e, save_traceback)  #Don't raise the NoMoreServers error, raise the error that triggered the failover.
             continue
         except (error, sslerror, herror, gaierror, timeout):
             e = sys.exc_info()[1]
             save_traceback = sys.exc_info()[2]
             try:
                 self._failover()
             except NoMoreServers:
                 raise_with_tb(e, save_traceback)
             continue
         succeed = 1 #If we get here then the function call eventually succeeded and we don't need to try again.
     return ret
Example #2
0
def _extract_pstamp_as_release(pstamp):
    """
    This function convert a PSTAMP in the format

        nameYYMMDDHHMMSS

    into a release number of the format

        YYYY.MM.DD.HH.MM

    If the PSTAMP is of an unknown format, PStampParseException is raised.
    Otherwise, the release number is returned.
    """

    if pstamp is None:
        raise PStampParseException("PSTAMP is null")

    # Extract the last 12 characters from the pstamp.  This will represent the
    # date and time.
    date_time_stamp = pstamp[-12:]
    if len(date_time_stamp) != 12:
        raise PStampParseException("Date stamp is not 12 characters.")

    # Now break the date/time stamp into a time structure.
    date_time_struct = None
    try:
        date_time_struct = time.strptime(date_time_stamp, "%y%m%d%H%M%S")
    except ValueError:
        ve = sys.exc_info()[1]
        raise_with_tb(PStampParseException("Error parsing date/time: %s" % str(ve)), sys.exc_info()[2])

    # Convert the structure into a string in the release number format.
    release_number = time.strftime("%Y.%m.%d.%H.%M", date_time_struct)

    return release_number
Example #3
0
def _prepare_guest_kernel_and_ramdisk(config):
    """
    Use PyGrub to extract the kernel and ramdisk from the given disk image.
    """

    disk_image = config.getConfigItem(DomainConfig.DISK_IMAGE_PATH)

    # Use pygrub to extract the initrd and the kernel from the disk image.
    (status, output) = \
        commands.getstatusoutput("%s -q %s" % (PYGRUB, disk_image))
    if status != 0:
        raise VirtualizationException("Error occurred while executing '%s' (status=%d). Output=%s" %
                                      (PYGRUB, status, output))

    # Now analyze the output and extract the names of the new kernel and initrd
    # images from it.
    (pygrub_kernel_path, pygrub_initrd_path) = \
        _extract_image_paths_from_pygrub_output(output)

    # Rename the extracted images to the names we are pointing to in the
    # configuration file.
    runtime_kernel_path = config.getConfigItem(DomainConfig.KERNEL_PATH)
    runtime_initrd_path = config.getConfigItem(DomainConfig.RAMDISK_PATH)

    try:
        os.rename(pygrub_kernel_path, runtime_kernel_path)
        os.rename(pygrub_initrd_path, runtime_initrd_path)
    except OSError:
        oe = sys.exc_info()[1]
        raise_with_tb(VirtualizationException("Error occurred while renaming runtime image paths: %s" % str(oe)),
                      sys.exc_info()[2])
Example #4
0
 def connect(self, reconnect=1):
     log_debug(1, "Connecting to database", self.dbtxt)
     self._fix_environment_vars()
     try:
         self.dbh = self._connect()
     except self.OracleError:
         e = sys.exc_info()[1]
         ret = self._get_oracle_error_info(e)
         if isinstance(ret, usix.StringType):
             raise_with_tb(sql_base.SQLConnectError(self.dbtxt, -1,
                                            "Unable to connect to database", ret), sys.exc_info()[2])
         (errno, errmsg) = ret[:2]
         log_error("Connection attempt failed", errno, errmsg)
         if reconnect:
             # we don't try to reconnect blindly.  We have a list of
             # known "good" failure codes that warrant a reconnect
             # attempt
             if errno in [12547]:  # lost contact
                 return self.connect(reconnect=0)
             err_args = [self.dbtxt, errno, errmsg]
             err_args.extend(list(ret[2:]))
             raise_with_tb(sql_base.SQLConnectError(*err_args), sys.exc_info()[2])
         # else, this is a reconnect attempt
         raise sql_base.SQLConnectError(*(
             [self.dbtxt, errno, errmsg,
              "Attempting Re-Connect to the database failed", ] + ret[2:])).with_traceback(sys.exc_info()[2])
     dbh_id = id(self.dbh)
     # Reset the statement cache for this database connection
     self._cursor_class._cursor_cache[dbh_id] = {}
Example #5
0
def _to_db_timestamp(s):
    """Convert common Solaris date convention to a unix timestamp"""

    arr = s.split('.', 2)
    if len(arr) != 3:
        return None
    y, m, d = arr

    try:
        m = int(m)

    except ValueError:
        for i, item in enumerate(_months):
            if m == item:
                break
            else:

                raise_with_tb(Exception("unknown month %s" % arr[0]), sys.exc_info()[2])
            m = i + 1

    d = int(d)
    y = int(y)
    if y < 30:
        y = 2000 + y
    elif y < 100:
        y = 1900 + y

    return time.strftime("%Y-%m-%d %H:%M:%S", (y, m, d, 0, 0, 0, 0, 1, -1))
Example #6
0
    def __getV2(self, action, dry_run=0):
        """ Fetches queued actions for the clients version 2+. """
        log_debug(3, self.server_id)
        # Get the root dir of this install
        try:
            method = getMethod.getMethod(action['method'],
                                         'server.action')
        except getMethod.GetMethodException:
            Traceback("queue.get V2")
            raise_with_tb(EmptyAction("Could not get a valid method for %s" % (
                action['method'],)), sys.exc_info()[2])
        # Call the method
        result = method(self.server_id, action['id'], dry_run)
        if result is None:
            # None are mapped to the empty list
            result = ()
        elif not isinstance(result, TupleType):
            # Everything other than a tuple is wrapped in a tuple
            result = (result, )

        xmlblob = xmlrpclib.dumps(result, methodname=action['method'])
        log_debug(5, "returning xmlblob for action", xmlblob)
        return {
            'id': action['id'],
            'action': xmlblob,
            'version': action['version'],
        }
    def connect(self, reconnect=1):
        try:
            dsndata = {
                'dbname': self.database,
                'user': self.username,
                'password': self.password}
            if self.host is not None:
                dsndata['host'] = self.host
                dsndata['port'] = self.port
            if self.sslmode is not None and self.sslmode == 'verify-full' and self.sslrootcert is not None:
                dsndata['sslmode'] = self.sslmode
                dsndata['sslrootcert'] = self.sslrootcert
            elif self.sslmode is not None:
                raise AttributeError("Only sslmode=\"verify-full\" (or None) is supported.")
            if self.sslmode is not None and self.sslrootcert is None:
                raise AttributeError("Attribute sslrootcert needs to be set if sslmode is set.")

            self.dbh = psycopg2.connect(" ".join("%s=%s" % (k, re.escape(str(v))) for k, v in dsndata.items()))

            # convert all DECIMAL types to float (let Python to choose one)
            DEC2INTFLOAT = psycopg2.extensions.new_type(psycopg2._psycopg.DECIMAL.values,
                                                        'DEC2INTFLOAT', decimal2intfloat)
            psycopg2.extensions.register_type(DEC2INTFLOAT)
        except psycopg2.Error:
            e = sys.exc_info()[1]
            if reconnect > 0:
                # Try one more time:
                return self.connect(reconnect=reconnect - 1)

            # Failed reconnect, time to error out:
            raise_with_tb(sql_base.SQLConnectError(
                self.database, e.pgcode, e.pgerror,
                "All attempts to connect to the database failed"), sys.exc_info()[2])
    def management_remove_channel(self, dict):
        log_debug(1)
        self._get_and_validate_session(dict)

        config_channel = dict.get('config_channel')
        # XXX Validate the namespace

        row = rhnSQL.fetchone_dict(self._query_config_channel_by_label,
                                   org_id=self.org_id, label=config_channel)

        if not row:
            raise rhnFault(4009, "Channel not found")

        delete_call = rhnSQL.Procedure('rhn_config.delete_channel')

        try:
            delete_call(row['id'])
        except rhnSQL.SQLError:
            e = sys.exc_info()[1]
            errno = e.args[0]
            if errno == 2292:
                raise_with_tb(rhnFault(4005, "Cannot remove non-empty channel %s" %
                               config_channel, explain=0), sys.exc_info()[2])
            raise

        log_debug(5, "Removed:", config_channel)
        rhnSQL.commit()
        return ""
Example #9
0
    def get(self, name, modified=None):
        pickled = self.cache.get(name, modified)

        try:
            return cPickle.loads(pickled)
        except cPickle.UnpicklingError:
            raise_with_tb(KeyError(name), sys.exc_info()[2])
Example #10
0
    def _repodata_taskomatic(self, file_name):
        log_debug(3, 'repodata', file_name)

        content_type = "application/x-gzip"

        if file_name in ["repomd.xml", "comps.xml"]:
            content_type = "text/xml"
        elif file_name not in ["primary.xml.gz", "other.xml.gz",
                               "filelists.xml.gz", "updateinfo.xml.gz", "Packages.gz"]:
            log_debug(2, "Unknown repomd file requested: %s" % file_name)
            raise rhnFault(6)

        # XXX this won't be repconned or CDNd
        if file_name == "comps.xml":
            return self._repodata_python(file_name)

        file_path = "%s/%s/%s" % (CFG.REPOMD_PATH_PREFIX, self.channelName, file_name)
        rhnFlags.set('Content-Type', content_type)
        try:
            rhnFlags.set('Download-Accelerator-Path', file_path)
            return self._getFile(CFG.REPOMD_CACHE_MOUNT_POINT + "/" + file_path)
        except IOError:
            e = sys.exc_info()[1]
            # For file not found, queue up a regen, and return 404
            if e.errno == 2 and file_name != "comps.xml":
                taskomatic.add_to_repodata_queue(self.channelName,
                                                 "repodata request", file_name, bypass_filters=True)
                rhnSQL.commit()
                # This returns 404 to the client
                raise_with_tb(rhnFault(6), sys.exc_info()[2])
            raise
Example #11
0
def load(filename=None, file_obj=None, fd=None):
    """ Loads an MPM and returns its header and its payload """
    if filename is None and file_obj is None and fd is None:
        raise ValueError("No parameters passed")

    if filename is not None:
        f = open(filename)
    elif file_obj is not None:
        f = file_obj
    else:  # fd is not None
        f = os.fdopen(os.dup(fd), "r")

    f.seek(0, 0)

    p = MPM_Package()
    try:
        p.load(f)
    except InvalidPackageError:
        e = sys.exc_info()[1]
        try:
            return load_rpm(f)
        except InvalidPackageError:
            raise_with_tb(e, sys.exc_info()[2])
        except:
            raise_with_tb(e, sys.exc_info()[2])

    return p.header, p.payload_stream
Example #12
0
    def getAnyChecksum(self, info, username=None, password=None, session=None, is_source=0):
        """ returns checksum info of available packages
            also does an existance check on the filesystem.
        """
        log_debug(3)

        pkg_infos = info.get('packages')
        channels = info.get('channels', [])
        force = info.get('force', 0)
        orgid = info.get('org_id')

        if orgid == 'null':
            null_org = 1
        else:
            null_org = None

        if not session:
            org_id, force = rhnPackageUpload.authenticate(username, password,
                                                          channels=channels,
                                                          null_org=null_org,
                                                          force=force)
        else:
            try:
                org_id, force = rhnPackageUpload.authenticate_session(
                    session, channels=channels, null_org=null_org, force=force)
            except rhnSession.InvalidSessionError:
                raise_with_tb(rhnFault(33), sys.exc_info()[2])
            except rhnSession.ExpiredSessionError:
                raise_with_tb(rhnFault(34), sys.exc_info()[2])

        if is_source:
            ret = self._getSourcePackageChecksum(org_id, pkg_infos)
        else:
            ret = self._getPackageChecksum(org_id, pkg_infos)
        return ret
Example #13
0
def start_domain(uuid):
    """
    Boots the domain for the first time after installation is complete.
    """
    # Load the configuration file for this UUID.
    domain = DomainDirectory()
    config = domain.load_config(uuid)

    # Connect to the hypervisor.
    connection = libvirt.open(None)

    # We will attempt to determine if the domain is configured to use a
    # bootloader.  If not, we'll have to explicitly use the kernel and initrd
    # data provided in the config to start the domain.
    try:
        config.getConfigItem(DomainConfig.BOOTLOADER)
    except DomainConfigError:
        dce = sys.exc_info()[1]
        # No bootloader tag present.  Use pygrub to extract the kernel from
        # the disk image if its Xen. For fully virt we dont have pygrub, it
        # directly emulates the BIOS loading the first sector of the boot disk.
        if connection.getType() == 'Xen':
            # This uses pygrub which comes only with xen
            _prepare_guest_kernel_and_ramdisk(config)

    # Now, we'll restart the instance, this time using the re-create XML.
    try:
        domain = connection.createLinux(config.toXML(), 0)
    except Exception:
        e = sys.exc_info()[1]
        raise_with_tb(VirtualizationException("Error occurred while attempting to recreate domain %s: %s" %
                                              (uuid, str(e))), sys.exc_info()[2])
Example #14
0
def token_server_groups(server_id, tokens_obj):
    """ Handle server group subscriptions for the registration token """
    assert(isinstance(tokens_obj, ActivationTokens))
    h = rhnSQL.prepare(_query_token_server_groups)
    server_groups = {}
    for token in tokens_obj.tokens:
        token_id = token['token_id']
        h.execute(token_id=token_id)
        while 1:
            row = h.fetchone_dict()
            if not row:
                break
            server_group_id = row['server_group_id']
            server_groups[server_group_id] = row

    # Now try to subscribe server to group
    ret = []
    for server_group_id, sg in server_groups.items():
        log_debug(4, "token server group", sg)

        try:
            join_server_group(server_id, server_group_id)
        except rhnSQL.SQLError:
            e = sys.exc_info()[1]
            log_error("Failed to add server to group", server_id,
                      server_group_id, sg["name"])
            raise_with_tb(rhnFault(80, _("Failed to add server to group %s") %
                           sg["name"]), sys.exc_info()[2])
        else:
            ret.append("Subscribed to server group '%s'" % sg["name"])
    return ret
Example #15
0
 def read_header(self):
     self._stream_copy(self.input_stream, self.header_data)
     try:
         self.header_data.seek(0, 0)
         self.header = deb_Header(self.header_data)
     except:
         raise_with_tb(InvalidPackageError, sys.exc_info()[2])
Example #16
0
def store_rhnCryptoKey(description, cert, org_id, verbosity=0):
    """ stores cert in rhnCryptoKey
        uses:
            _checkCertMatch_rhnCryptoKey
            _delete_rhnCryptoKey - not currently used
            _insertPrep_rhnCryptoKey
            _lobUpdate_rhnCryptoKey
    """
    try:
        # look for a cert match in the database
        rhn_cryptokey_id = _checkCertMatch_rhnCryptoKey(cert, description,
                                                        org_id, deleteRowYN=1,
                                                        verbosity=verbosity)
        if rhn_cryptokey_id is None:
            # nothing to do - cert matches
            return
        # insert into the database
        if rhn_cryptokey_id == -1:
            rhn_cryptokey_id = _insertPrep_rhnCryptoKey(rhn_cryptokey_id,
                                                        description, org_id)
        # write/update
        _lobUpdate_rhnCryptoKey(rhn_cryptokey_id, cert)
        rhnSQL.commit()
    except rhnSQL.sql_base.SQLError:
        raise_with_tb(CaCertInsertionError(
            "...the traceback: %s" % fetchTraceback()), sys.exc_info()[2])
Example #17
0
 def diff_file_revisions(self, path, config_channel_src, revision_src,
         config_channel_dst, revision_dst):
     log_debug(4)
     params = {
         'session'           : self.session,
         'path'              : path,
         'config_channel_src': config_channel_src,
         'revision_src'      : revision_src,
     }
     if config_channel_dst is not None:
         params['config_channel_dst'] = config_channel_dst
     if revision_dst is not None:
         params['revision_dst'] = revision_dst
     try:
         ret = self.rpc_call('config.management.diff', params)
     except xmlrpclib.Fault:
         e = sys.exc_info()[1]
         if e.faultCode == -4011:
             # File not present
             raise_with_tb(cfg_exceptions.RepositoryFileMissingError(e.faultString), sys.exc_info()[2])
         if e.faultCode == -4004:
             # Binary file requested
             raise_with_tb(cfg_exceptions.BinaryFileDiffError(e.faultString), sys.exc_info()[2])
         raise
     return ret
Example #18
0
def check_password(username, password, service):
    global __username, __password
    auth = PAM.pam()
    auth.start(service, username, __pam_conv)

    # Save the username and passwords in the globals, the conversation
    # function needs access to them
    __username = username
    __password = password

    try:
        try:
            auth.authenticate()
            auth.acct_mgmt()
        finally:
            # Something to be always executed - cleanup
            __username = __password = None
    except PAM.error:
        e = sys.exc_info()[1]
        resp, code = e.args[:2]
        log_error("Password check failed (%s): %s" % (code, resp))
        return 0
    except:
        raise_with_tb(rhnException('Internal PAM error'), sys.exc_info()[2])
    else:
        # Good password
        return 1
Example #19
0
def parse_byteranges(byterange_header, file_size=None):
    log_debug(4, "Parsing byte range", byterange_header)
    regexp = re.compile(r"^bytes\s*=\s*(.*)$")
    mo = regexp.match(byterange_header)
    if not mo:
        raise InvalidByteRangeException

    arr = mo.groups()[0].split(",")
    regexp = re.compile(r"^([^-]*)-([^-]*)$")

    if len(arr) > 1:
        # We don't support very fancy byte ranges yet
        raise UnsatisfyableByteRangeException

    mo = regexp.match(arr[0])
    if not mo:
        # Invalid byterange
        raise InvalidByteRangeException
    try:
        start, end = list(map(_str2int, mo.groups()))
    except ValueError:
        # Invalid
        raise_with_tb(InvalidByteRangeException, sys.exc_info()[2])
    if start is not None:
        if start < 0:
            # Invalid
            raise InvalidByteRangeException
        if file_size is not None:
            if start >= file_size:
                raise UnsatisfyableByteRangeException
        if end is not None:
            if start > end:
                # Invalid
                raise InvalidByteRangeException
            end = end + 1
        else:
            if file_size:
                end = file_size
    else:
        # No start specified
        if end is None:
            # Invalid
            raise InvalidByteRangeException
        if end <= 0:
            # Invalid
            raise InvalidByteRangeException
        if file_size:
            if end > file_size:
                raise UnsatisfyableByteRangeException
            start = file_size - end
            end = file_size
        else:
            start = -end
            end = None

    byteranges = (start, end)

    log_debug(4, "Request byterange", byteranges)
    return byteranges
Example #20
0
 def _function(self, name, ret_type):
     try:
         c = self.dbh.cursor()
     except cx_Oracle.DatabaseError:
         error = sys.exc_info()[1]
         e = error[0]
         raise_with_tb(sql_base.SQLSchemaError(e.code, e.message, e.context), sys.exc_info()[2])
     return Function(name, c, ret_type)
Example #21
0
    def _processFile(filename, relativeDir=None, source=None, nosig=None):
        """ Processes a file
            Returns a hash containing:
              header
              packageSize
              checksum
              relativePath
              nvrea
         """

        # Is this a file?
        if not os.access(filename, os.R_OK):
            raise UploadError("Could not stat the file %s" % filename)
        if not os.path.isfile(filename):
            raise UploadError("%s is not a file" % filename)

        # Size
        size = os.path.getsize(filename)

        try:
            a_pkg = package_from_filename(filename)
            a_pkg.read_header()
            a_pkg.payload_checksum()
            assert a_pkg.header
        except:
            raise_with_tb(UploadError("%s is not a valid package" % filename), sys.exc_info()[2])

        if nosig is None and not a_pkg.header.is_signed():
            raise UploadError("ERROR: %s: unsigned rpm (use --nosig to force)"
                              % filename)

        # Get the name, version, release, epoch, arch
        lh = []
        for k in ['name', 'version', 'release', 'epoch']:
            if k == 'epoch' and not a_pkg.header[k]:
            # Fix the epoch
                lh.append(sstr(""))
            else:
                lh.append(sstr(a_pkg.header[k]))

        if source:
            lh.append('src')
        else:
            lh.append(sstr(a_pkg.header['arch']))

        # Build the header hash to be sent
        info = {'header': Binary(a_pkg.header.unload()),
                'checksum_type': a_pkg.checksum_type,
                'checksum': a_pkg.checksum,
                'packageSize': size,
                'header_start': a_pkg.header_start,
                'header_end': a_pkg.header_end}
        if relativeDir:
            # Append the relative dir too
            info["relativePath"] = "%s/%s" % (relativeDir,
                                              os.path.basename(filename))
        info['nvrea'] = tuple(lh)
        return info
Example #22
0
def initDB(backend=None, host=None, port=None, username=None,
           password=None, database=None, sslmode=None, sslrootcert=None, initsecond=False):
    """
    Initialize the database.

    Either we get backend and all parameter which means the caller
    knows what they are doing, or we populate everything from the
    config files.

    initsecond: If set to True it initialize a second DB connection.
                By default only one DB connection is needed.
    """

    if backend is None:
        if CFG is None or not CFG.is_initialized():
            initCFG('server')
        backend = CFG.DB_BACKEND
        host = CFG.DB_HOST
        port = CFG.DB_PORT
        database = CFG.DB_NAME
        username = CFG.DB_USER
        password = CFG.DB_PASSWORD
        sslmode = None
        sslrootcert = None
        if CFG.DB_SSL_ENABLED:
            sslmode = 'verify-full'
            sslrootcert = CFG.DB_SSLROOTCERT

    if backend not in SUPPORTED_BACKENDS:
        raise rhnException("Unsupported database backend", backend)

    if port:
        port = int(port)

    # Hide the password
    add_to_seclist(password)
    try:
        if initsecond == False:
            __init__DB(backend, host, port, username, password, database, sslmode, sslrootcert)
        else:
            __init__DB2(backend, host, port, username, password, database, sslmode, sslrootcert)
#    except (rhnException, SQLError):
#        raise  # pass on, we know those ones
#    except (KeyboardInterrupt, SystemExit):
#        raise
    except SQLConnectError:
        e = sys.exc_info()[1]
        try:
            closeDB()
        except NameError:
            pass
        raise_with_tb(e, sys.exc_info()[2])
    except:
        raise
        #e_type, e_value = sys.exc_info()[:2]
        # raise rhnException("Could not initialize Oracle database connection",
        #                   str(e_type), str(e_value))
    return 0
Example #23
0
 def _get_item_id(prefix, name, errnum, errmsg):
     prefix_len = len(prefix)
     if name[:prefix_len] != prefix:
         raise rhnFault(errnum, errmsg % name)
     try:
         uuid = int(name[prefix_len:])
     except ValueError:
         raise_with_tb(rhnFault(errnum, errmsg % name), sys.exc_info()[2])
     return uuid
Example #24
0
def import_channels(channels, orgid=None, master=None):
    collection = ChannelCollection()
    batch = []
    org_map = None
    my_backend = diskImportLib.get_backend()
    if master:
        org_map = my_backend.lookupOrgMap(master)['master-id-to-local-id']
    for c in channels:
        try:
            timestamp = collection.get_channel_timestamp(c)
        except KeyError:
            raise_with_tb(Exception("Could not find channel %s" % c), sys.exc_info()[2])
        c_obj = collection.get_channel(c, timestamp)
        if c_obj is None:
            raise Exception("Channel not found in cache: %s" % c)

        # Check to see if we're asked to sync to an orgid,
        # make sure the org from the export is not null org,
        # finally if the orgs differ so we might wanna use
        # requested org's channel-family.
        # TODO: Move these checks somewhere more appropriate
        if not orgid and c_obj['org_id'] is not None:
            # If the src org is not present default to org 1
            orgid = DEFAULT_ORG
        if orgid is not None and c_obj['org_id'] is not None and \
                c_obj['org_id'] != orgid:
            # If we know the master this is coming from and the master org
            # has been mapped to a local org, transform org_id to the local
            # org_id. Otherwise just put it in the default org.
            if (org_map and c_obj['org_id'] in list(org_map.keys())
                    and org_map[c_obj['org_id']]):
                c_obj['org_id'] = org_map[c_obj['org_id']]
            else:
                c_obj['org_id'] = orgid
                if c_obj.has_key('trust_list'):
                    del(c_obj['trust_list'])
            for family in c_obj['families']:
                family['label'] = 'private-channel-family-' + \
                    str(c_obj['org_id'])
        # If there's a trust list on the channel, transform the org ids to
        # the local ones
        if c_obj.has_key('trust_list') and c_obj['trust_list']:
            trusts = []
            for trust in c_obj['trust_list']:
                if trust['org_trust_id'] in org_map:
                    trust['org_trust_id'] = org_map[trust['org_trust_id']]
                    trusts.append(trust)
            c_obj['trust_list'] = trusts

        syncLib.log(6, "Syncing Channel %s to Org %s " % (c_obj['label'], c_obj['org_id']))
        batch.append(c_obj)

    importer = channelImport.ChannelImport(batch, my_backend)
    # Don't commit just yet
    importer.will_commit = 0
    importer.run()
    return importer
Example #25
0
 def procedure(self, name):
     try:
         c = self.dbh.cursor()
     except cx_Oracle.DatabaseError:
         error = sys.exc_info()[1]
         e = error[0]
         raise_with_tb(sql_base.SQLSchemaError(e.code, e.message, e.context), sys.exc_info()[2])
     # Pass the cursor in so we can close it after execute()
     return self._procedure_class(name, c)
Example #26
0
def check_password(username, password, service):
    try:
        auth = pam.pam()
        if not auth.authenticate(username, password, service=service):
            log_error("Password check failed (%s): %s" % (auth.code, auth.reason))
            return 0
        else:
            return 1
    except:
        raise_with_tb(rhnException('Internal PAM error'), sys.exc_info()[2])
Example #27
0
    def get_fd(self, name, user, group, mode):
        try:
            fd = _safe_create(self.fname, user, group, mode)
        except UnreadableFileError:
            raise_with_tb(OSError("cache entry exists, but is not accessible: %s" % \
                name), sys.exc_info()[2])

        # now we have the fd open, lock it
        fcntl.lockf(fd, fcntl.LOCK_EX)
        return os.fdopen(fd, 'w')
Example #28
0
def action(action_name, query, server_id, action_id, dry_run=0):
    log_debug(3, action_name, dry_run)
    try:
        uuid = _get_uuid(query, action_id)
    except NoRowFoundException:
        raise_with_tb(InvalidAction("No %s actions found." % action_name.lower()), sys.exc_info()[2])
    except NoUUIDException:
        raise_with_tb(InvalidAction("%s action %s has no uuid associated with it." %
                            (action_name, str(action_id))), sys.exc_info()[2])
    return (uuid,)
Example #29
0
    def __init__(self, stream):
        self.packaging = 'deb'
        self.signatures = []
        self.is_source = 0
        self.deb = None

        try:
            self.deb = debfile.DebFile(stream.name)
        except Exception:
            e = sys.exc_info()[1]
            raise_with_tb(InvalidPackageError(e), sys.exc_info()[2])

        try:
            # Fill info about package
            debcontrol = self.deb.debcontrol()
            self.hdr = {
                'name': debcontrol.get_as_string('Package'),
                'arch': debcontrol.get_as_string('Architecture') + '-deb',
                'summary': debcontrol.get_as_string('Description').splitlines()[0],
                'vendor': debcontrol.get_as_string('Maintainer'),
                'package_group': debcontrol.get_as_string('Section'),
                'epoch':   '',
                'version': 0,
                'release': 0,
                'description': debcontrol.get_as_string('Description'),
            }
            for hdr_k, deb_k in [('requires', 'Depends'),
                                 ('provides', 'Provides'),
                                 ('conflicts', 'Conflicts'),
                                 ('obsoletes', 'Replaces'),
                                 ('recommends', 'Recommends'),
                                 ('suggests', 'Suggests'),
                                 ('breaks', 'Breaks'),
                                 ('predepends', 'Pre-Depends'),
                                 ('payload_size', 'Installed-Size')]:
                if deb_k in debcontrol:
                    self.hdr[hdr_k] = debcontrol.get_as_string(deb_k)
            for k in debcontrol.keys():
                if k not in self.hdr:
                    self.hdr[k] = debcontrol.get_as_string(k)

            version = debcontrol.get_as_string('Version')
            if version.find(':') != -1:
                self.hdr['epoch'], version = version.split(':')
                self.hdr['version'] = version
            if version.find('-') != -1:
                version_tmpArr = version.split('-')
                self.hdr['version'] = '-'.join(version_tmpArr[:-1])
                self.hdr['release'] = version_tmpArr[-1]
            else:
                self.hdr['version'] = version
                self.hdr['release'] = 'X'
        except Exception:
            e = sys.exc_info()[1]
            raise_with_tb(InvalidPackageError(e), sys.exc_info()[2])
Example #30
0
    def put_files(self, action_id, files, upload_contents=1):
        """Inserts a set of files into the repo, as a result of a scheduled
        action"""
        log_debug(4)
        missing_files = []
        files_too_large = []
        failed_due_to_quota = []

        max_file_size = self.get_maximum_file_size()

        for file in files:
            try:
                params = self._make_file_info(file, local_path=None,
                    load_contents=upload_contents)
            except cfg_exceptions.RepositoryLocalFileError:
                missing_files.append(file)
                continue

            if upload_contents and (params['size'] > max_file_size):
                files_too_large.append(file)
                continue

            try:
                self.rpc_call('config.client.upload_file',
                    self.system_id, action_id, params)
            except xmlrpclib.Fault:
                e = sys.exc_info()[1]
                fault_code, fault_string = e.faultCode, e.faultString
                # deal with particular faults
                if fault_code == -4003:
                    # File too large
                    files_too_large.append(file)
                elif fault_code == -4014:
                    # Ran out of org quota space
                    failed_due_to_quota.append(file)
                else:
                    raise_with_tb(cfg_exceptions.RepositoryFilePushError(fault_code,
                        fault_string), sys.exc_info()[2])
            except Exception:
                traceback.print_exc()
                raise

        result = {}
        # If there are files too large to be pushed, result will have a key
        # `file_too_large'
        if len(files_too_large) > 0:
            result['files_too_large'] = files_too_large

        if len(failed_due_to_quota) > 0:
            result['failed_due_to_quota'] = failed_due_to_quota

        if len(missing_files) > 0:
            result['missing_files'] = missing_files

        return result
Example #31
0
def parse_file(filename, single_key=0):
    """
    parse a config file (read it in, parse its lines)
    """
    lines = read_file(filename)
    # the base case, an empty tuple component, is always present.
    ret = {(): {}}
    lineno = 0
    # okay, read the file, parse the lines one by one
    for line in lines:
        # lineno is 1-based
        lineno = lineno + 1
        try:
            (keys, values) = parse_line(line)
        except:
            raise_with_tb(
                ConfigParserError("Parse Error: <%s:%s>: '%s'" %
                                  (filename, lineno, line)),
                sys.exc_info()[2])
        if keys is None:  # We don't care about this line
            continue
        # now process the parsed line
        if single_key and len(keys) > 1:
            # Error, we should not have more than one key in the this
            # config file
            #            raise ConfigParserError("Parse Error: <%s:%s>: too many keys"
            #              % (filename, lineno))
            # let's fix the faulty config=file setup...
            # XXX: needs more testing!!! (2003-04-17)
            del keys[:-1]
        # Store this line in a dictionary filled by component
        comp = tuple(keys[:-1])
        key = keys[-1]
        if comp not in ret:
            # Don't make it a UserDictCase since we know exactly we
            # already used string.lower
            ret[comp] = {}
        ret[comp][key] = (values, lineno)
    return ret
Example #32
0
 def get_raw_file_info(self, config_channel, repopath, revision=None):
     """ given a namepath, return the raw data  passed by the server """
     log_debug(5)
     params = {
         'session': self.session,
         'config_channel': config_channel,
         'path': repopath,
     }
     if revision is not None:
         params['revision'] = revision
     try:
         result = self.rpc_call('config.management.get_file', params)
     except xmlrpclib.Fault:
         e = sys.exc_info()[1]
         if e.faultCode == -4011:
             # File not present
             raise_with_tb(
                 cfg_exceptions.RepositoryFileMissingError(
                     config_channel, repopath),
                 sys.exc_info()[2])
         raise
     return result
Example #33
0
def read_file(filename):
    """
    reads a text config file and returns its lines in a list
    """
    try:
        lines = open(filename, 'rb').readlines()
        new_lines = []
        combined = ''
        for line in lines:
            # if the line isn't part of a multiline, lets add it
            if line.find('\\\n') < 0:
                combined = combined + line
                new_lines.append(combined)
                combined = ''
            else:
                combined = combined + line.replace('\\\n', ' ')
        return new_lines
    except (IOError, OSError):
        e = sys.exc_info()[1]
        raise_with_tb(
            ConfigParserError("Can not read config file", filename, e.args[1]),
            sys.exc_info()[2])
Example #34
0
    def send_http_headers(self, method, content_length=None):
        try:
            self.connection.connect()
        except socket.error:
            e = sys.exc_info()[1]
            raise_with_tb(ConnectionError("Error connecting", str(e)), sys.exc_info()[2])

        # Add content_length
        if 'Content-Length' not in self.headers and \
                content_length is not None:
            self.set_header('Content-Length', content_length)
        self.connection.putrequest(method)

        # Additional headers
        for hname, hval in self.headers.items():
            if not isinstance(hval, (ListType, TupleType)):
                hval = [hval]

            for v in hval:
                self.connection.putheader(str(hname), str(v))

        self.connection.endheaders()
    def _repodata_taskomatic(self, file_name):
        log_debug(3, 'repodata', file_name)

        content_type = "application/x-gzip"

        if file_name in ["repomd.xml", "comps.xml"]:
            content_type = "text/xml"
        elif file_name not in [
                "primary.xml.gz", "other.xml.gz", "filelists.xml.gz",
                "updateinfo.xml.gz", "Packages.gz", "modules.yaml",
                "InRelease", "Release", "Release.gpg"
        ]:
            log_debug(2, "Unknown repomd file requested: %s" % file_name)
            raise rhnFault(6)

        # XXX this won't be repconned or CDNd
        if file_name in ["comps.xml", "modules.yaml"]:
            return self._repodata_python(file_name)

        file_path = "%s/%s/%s" % (CFG.REPOMD_PATH_PREFIX, self.channelName,
                                  file_name)
        rhnFlags.set('Content-Type', content_type)
        try:
            rhnFlags.set('Download-Accelerator-Path', file_path)
            return self._getFile(CFG.REPOMD_CACHE_MOUNT_POINT + "/" +
                                 file_path)
        except IOError:
            e = sys.exc_info()[1]
            # For file not found, queue up a regen, and return 404
            if e.errno == 2 and file_name != "comps.xml" and file_name != "modules.yaml":
                taskomatic.add_to_repodata_queue(self.channelName,
                                                 "repodata request",
                                                 file_name,
                                                 bypass_filters=True)
                rhnSQL.commit()
                # This returns 404 to the client
                raise_with_tb(rhnFault(6), sys.exc_info()[2])
            raise
Example #36
0
def load_package(package_stream):
    if package_stream.name.endswith('.deb'):
        try:
            header, payload_stream = rhn_deb.load(filename=package_stream.name)
        except:
            raise_with_tb(rhnFault(50, "Unable to load package", explain=0),
                          sys.exc_info()[2])
    else:
        try:
            header, payload_stream = rhn_mpm.load(file=package_stream)
        except:
            raise_with_tb(rhnFault(50, "Unable to load package", explain=0),
                          sys.exc_info()[2])

    payload_stream.seek(0, 0)
    if header.packaging == "mpm" or header.packaging == "deb":
        header.header_start = header.header_end = 0
        (header_start, header_end) = (0, 0)
    else:
        (header_start, header_end) = get_header_byte_range(payload_stream)
        payload_stream.seek(0, 0)

    return header, payload_stream, header_start, header_end
Example #37
0
 def get_file_revisions(self, config_channel, repopath):
     """
     Fetch the file's revisions
     """
     log_debug(4)
     params = {
         'session': self.session,
         'config_channel': config_channel,
         'path': repopath,
     }
     try:
         revisions = self.rpc_call('config.management.list_file_revisions',
                                   params)
     except xmlrpclib.Fault:
         e = sys.exc_info()[1]
         if e.faultCode == -4011:
             # File not present
             raise_with_tb(
                 cfg_exceptions.RepositoryFileMissingError(
                     config_channel, repopath),
                 sys.exc_info()[2])
         raise
     return revisions
Example #38
0
    def process_extra_data(self,
                           server_id,
                           action_id,
                           data={},
                           action_type=None):
        log_debug(4, server_id, action_id, action_type)

        if not action_type:
            # Shouldn't happen
            return

        try:
            method = getMethod.getMethod(action_type,
                                         'server.action_extra_data')
        except getMethod.GetMethodException:
            Traceback("queue.get V2")
            raise_with_tb(
                EmptyAction("Could not get a valid method for %s" %
                            action_type),
                sys.exc_info()[2])
        # Call the method
        result = method(self.server_id, action_id, data=data)
        return result
Example #39
0
def _extract_pstamp_as_release(pstamp):
    """
    This function convert a PSTAMP in the format

        nameYYMMDDHHMMSS

    into a release number of the format

        YYYY.MM.DD.HH.MM

    If the PSTAMP is of an unknown format, PStampParseException is raised.
    Otherwise, the release number is returned.
    """

    if pstamp is None:
        raise PStampParseException("PSTAMP is null")

    # Extract the last 12 characters from the pstamp.  This will represent the
    # date and time.
    date_time_stamp = pstamp[-12:]
    if len(date_time_stamp) != 12:
        raise PStampParseException("Date stamp is not 12 characters.")

    # Now break the date/time stamp into a time structure.
    date_time_struct = None
    try:
        date_time_struct = time.strptime(date_time_stamp, "%y%m%d%H%M%S")
    except ValueError:
        ve = sys.exc_info()[1]
        raise_with_tb(
            PStampParseException("Error parsing date/time: %s" % str(ve)),
            sys.exc_info()[2])

    # Convert the structure into a string in the release number format.
    release_number = time.strftime("%Y.%m.%d.%H.%M", date_time_struct)

    return release_number
Example #40
0
 def connect(self, reconnect=1):
     log_debug(1, "Connecting to database", self.dbtxt)
     self._fix_environment_vars()
     try:
         self.dbh = self._connect()
     except self.OracleError:
         e = sys.exc_info()[1]
         ret = self._get_oracle_error_info(e)
         if isinstance(ret, usix.StringType):
             raise_with_tb(
                 sql_base.SQLConnectError(self.dbtxt, -1,
                                          "Unable to connect to database",
                                          ret),
                 sys.exc_info()[2])
         (errno, errmsg) = ret[:2]
         log_error("Connection attempt failed", errno, errmsg)
         if reconnect:
             # we don't try to reconnect blindly.  We have a list of
             # known "good" failure codes that warrant a reconnect
             # attempt
             if errno in [12547]:  # lost contact
                 return self.connect(reconnect=0)
             err_args = [self.dbtxt, errno, errmsg]
             err_args.extend(list(ret[2:]))
             raise_with_tb(sql_base.SQLConnectError(*err_args),
                           sys.exc_info()[2])
         # else, this is a reconnect attempt
         raise sql_base.SQLConnectError(*([
             self.dbtxt,
             errno,
             errmsg,
             "Attempting Re-Connect to the database failed",
         ] + ret[2:])).with_traceback(sys.exc_info()[2])
     dbh_id = id(self.dbh)
     # Reset the statement cache for this database connection
     self._cursor_class._cursor_cache[dbh_id] = {}
Example #41
0
    def _getHeaderFromFile(self, filePath, stat_info=None):
        """ Wraps around common.rhnRepository's method, adding a caching layer
        If stat_info was already passed, don't re-stat the file
        """
        log_debug(3, filePath)
        if not CFG.CACHE_PACKAGE_HEADERS:
            return rhnRepository.Repository._getHeaderFromFile(self, filePath,
                                                               stat_info=stat_info)
        # Ignore stat_info for now - nobody sets it anyway
        stat_info = None
        try:
            stat_info = os.stat(filePath)
        except:
            raise_with_tb(rhnFault(17, "Unable to read package %s"
                               % os.path.basename(filePath)), sys.exc_info()[2])
        lastModified = stat_info[stat.ST_MTIME]

        # OK, file exists, check the cache
        cache_key = os.path.normpath("headers/" + filePath)
        header = rhnCache.get(cache_key, modified=lastModified, raw=1,
                              compressed=1)
        if header:
            # We're good to go
            log_debug(2, "Header cache HIT for %s" % filePath)
            extra_headers = {
                'X-RHN-Package-Header': os.path.basename(filePath),
            }
            self._set_last_modified(lastModified, extra_headers=extra_headers)
            return header
        log_debug(3, "Header cache MISS for %s" % filePath)
        header = rhnRepository.Repository._getHeaderFromFile(self, filePath,
                                                             stat_info=stat_info)
        if header:
            rhnCache.set(cache_key, header, modified=lastModified, raw=1,
                         compressed=1)
        return header
Example #42
0
    def set_cached_token(self, token):
        """ Caches current token in the auth cache.
        """
        log_debug(3)
        # Try to connect to the token-cache.
        shelf = get_auth_shelf()
        # Cache the token.
        try:
            shelf[self.__cache_proxy_key()] = token
        except:
            text = _("""\
Caching of authentication token for proxy id %s failed!
Either the authentication caching daemon is experiencing
problems, isn't running, or the token is somehow corrupt.
""") % self.__serverid
            Traceback("ProxyAuth.set_cached_token", extra=text)
            raise_with_tb(
                rhnFault(
                    1000,
                    _("SUSE Manager Proxy error (auth caching issue). "
                      "Please contact your system administrator.")),
                sys.exc_info()[2])
        log_debug(4, "successfully returning")
        return token
Example #43
0
def schedule_virt_host_pkg_install(server_id, action_id, dry_run=0):
    """
        ShadowAction that schedules a package installation action for the
        rhn-virtualization-host and osad packages.
    """
    log_debug(3)

    virt_host_package_name = "mgr-virtualization-host"
    messaging_package_name = "mgr-osad"

    tools_channel = SubscribedChannel(server_id, "rhn-tools")
    found_tools_channel = tools_channel.is_subscribed_to_channel()

    if not found_tools_channel:
        raise InvalidAction("System not subscribed to the Tools channel.")

    rhn_v12n_package = ChannelPackage(server_id, virt_host_package_name)

    if not rhn_v12n_package.exists():
        raise InvalidAction(
            "Could not find the mgr-virtualization-host package.")

    messaging_package = ChannelPackage(server_id, messaging_package_name)

    if not messaging_package.exists():
        raise InvalidAction("Could not find the mgr-osad package.")

    try:
        rhn_v12n_install_scheduler = PackageInstallScheduler(
            server_id, action_id, rhn_v12n_package)
        messaging_package = PackageInstallScheduler(server_id, action_id,
                                                    messaging_package)
        if (not dry_run):
            rhn_v12n_install_scheduler.schedule_package_install()
            messaging_package.schedule_package_install()
        else:
            log_debug(4, "dry run requested")
    except NoActionInfo:
        nai = sys.exc_info()[1]
        raise_with_tb(InvalidAction(str(nai)), sys.exc_info()[2])
    except PackageNotFound:
        pnf = sys.exc_info()[1]
        raise_with_tb(InvalidAction(str(pnf)), sys.exc_info()[2])
    except Exception:
        e = sys.exc_info()[1]
        raise_with_tb(InvalidAction(str(e)), sys.exc_info()[2])

    log_debug(
        3,
        "Completed scheduling install of mgr-virtualization-host and mgr-osad!"
    )
    raise ShadowAction(
        "Scheduled installation of Virtualization Host packages.")
Example #44
0
 def read_header(self):
     self._get_header_byte_range()
     try:
         self.header = get_package_header(file_obj=self.header_data)
     except InvalidPackageError:
         e = sys.exc_info()[1]
         raise_with_tb(InvalidPackageError(*e.args), sys.exc_info()[2])
     except error:
         e = sys.exc_info()[1]
         raise_with_tb(InvalidPackageError(e), sys.exc_info()[2])
     except:
         raise_with_tb(InvalidPackageError, sys.exc_info()[2])
     self.checksum_type = self.header.checksum_type()
Example #45
0
    def __call__(self, *args):
        """
        Wrap the __call__ method from the parent class to catch Oracle specific
        actions and convert them to something generic.
        """
        log_debug(2, self.name, args)
        retval = None
        try:
            retval = self._call_proc(args)
        except cx_Oracle.DatabaseError:
            e = sys.exc_info()[1]
            if not hasattr(e, "args"):
                raise_with_tb(sql_base.SQLError(self.name, args), sys.exc_info()[2])
            elif 20000 <= e[0].code <= 20999:  # error codes we know we raise as schema errors

                raise_with_tb(sql_base.SQLSchemaError(e[0].code, str(e[0])), sys.exc_info()[2])

            raise_with_tb(sql_base.SQLError(e[0].code, str(e[0])), sys.exc_info()[2])
        except cx_Oracle.NotSupportedError:
            error = sys.exc_info()[1]
            raise_with_tb(sql_base.SQLError(*error.args), sys.exc_info()[2])
        return retval
Example #46
0
def _call_domain_control_routine(uuid, routine_name, *args):
    """
    Call a function in a domain, optionally with a set of arguments.
    """

    # If libvirt is not available, this is a no-op.
    if not libvirt: return

    # Lookup the domain by its UUID.
    (conn, domain) = _get_domain(uuid)

    # Get a reference to the domain's control routine.
    ctrl_func = None
    try:
        ctrl_func = getattr(domain, routine_name)
    except AttributeError:
        raise_with_tb(VirtualizationException("Unknown function: %s" % routine_name), sys.exc_info()[2])

    result = 0
    try:
        if sys.version_info[0] == 3:
            result = ctrl_func(*args)
        else:
            result = apply(ctrl_func, args)
    except TypeError:
        te = sys.exc_info()[1]
        raise_with_tb(VirtualizationException("Invalid arguments (%s) to %s: %s" % (str(args), routine_name, str(te))),
                      sys.exc_info()[2])
    except libvirt.libvirtError:
        le = sys.exc_info()[1]
        raise_with_tb(VirtualizationException("LibVirt Error %s: %s" % (routine_name, str(le))), sys.exc_info()[2])

    # Handle the return code.  Anything non-zero is an error.
    if result != 0:
        raise_with_tb(VirtualizationException("Could not perform function '%s' on domain %s.  Error: %s" %
                      (routine_name, uuid, str(result))), sys.exc_info()[2])
Example #47
0
    def autoentitle(self):
        entitlement_hierarchy = ['enterprise_entitled']

        any_base_entitlements = 0

        for entitlement in entitlement_hierarchy:
            try:
                self._entitle(entitlement)
                any_base_entitlements = 1
            except rhnSQL.SQLSchemaError:
                e = sys.exc_info()[1]
                if e.errno == 20287:
                    # ORA-20287: (invalid_entitlement) - The server can not be
                    # entitled to the specified level
                    #
                    # ignore for now, since any_base_entitlements will throw
                    # an error at the end if not set
                    continue

                # Should not normally happen
                log_error("Failed to entitle", self.server["id"], entitlement,
                          e.errmsg)
                raise_with_tb(
                    server_lib.rhnSystemEntitlementException(
                        "Unable to entitle"),
                    sys.exc_info()[2])
            except rhnSQL.SQLError:
                e = sys.exc_info()[1]
                log_error("Failed to entitle", self.server["id"], entitlement,
                          str(e))
                raise_with_tb(
                    server_lib.rhnSystemEntitlementException(
                        "Unable to entitle"),
                    sys.exc_info()[2])
            else:
                if any_base_entitlements:
                    # All is fine
                    return
                else:
                    raise_with_tb(server_lib.rhnNoSystemEntitlementsException,
                                  sys.exc_info()[2])
Example #48
0
    def get_package_path_by_filename(self, fileName, channel):
        log_debug(3, fileName, channel)
        fileName = str(fileName)
        n, e, v, r, a = rhnLib.parseRPMFilename(fileName)

        h = rhnSQL.prepare(self._query_get_package_path_by_nvra)
        h.execute(name=n, version=v, release=r, epoch=e, arch=a, channel=channel)
        try:
            return _get_path_from_cursor(h)
        except InvalidPackageError:
            log_debug(4, "Error", "Non-existent package requested", fileName)
            raise_with_tb(rhnFault(17, _("Invalid RPM package %s requested") % fileName), sys.exc_info()[2])
        except NullPathPackageError:
            e = sys.exc_info()[1]
            package_id = e[0]
            log_error("Package path null for package id", package_id)
            raise_with_tb(rhnFault(17, _("Invalid RPM package %s requested") % fileName), sys.exc_info()[2])
        except MissingPackageError:
            e = sys.exc_info()[1]
            filePath = e[0]
            log_error("Package not found", filePath)
            raise_with_tb(rhnFault(17, _("Package not found")), sys.exc_info()[2])
Example #49
0
    def _channelPackageSubscription(self, authobj, info):
        # Authorize the org id passed
        authobj.authzOrg(info)

        packageList = info.get('packages') or []
        if not packageList:
            log_debug(1, "No packages found; done")
            return 0

        if 'channels' not in info or not info['channels']:
            log_debug(1, "No channels found; done")
            return 0

        channelList = info['channels']
        authobj.authzChannels(channelList)

        # Have to turn the channel list into a list of Channel objects
        channelList = [Channel().populate({'label': x}) for x in channelList]

        # Since we're dealing with superusers, we allow them to change the org
        # id
        # XXX check if we don't open ourselves too much (misa 20030422)
        org_id = info.get('orgId')
        if org_id == '':
            org_id = None

        batch = Collection()
        package_keys = ['name', 'version', 'release', 'epoch', 'arch']
        for package in packageList:
            for k in package_keys:
                if k not in package:
                    raise Exception("Missing key %s" % k)
                if k == 'epoch':
                    if package[k] is not None:
                        if package[k] == '':
                            package[k] = None
                        else:
                            package[k] = str(package[k])
                else:
                    package[k] = str(package[k])

            if package['arch'] == 'src' or package['arch'] == 'nosrc':
                # Source package - no reason to continue
                continue
            _checksum_sql_filter = ""
            if 'md5sum' in package:  # for old rhnpush compatibility
                package['checksum_type'] = 'md5'
                package['checksum'] = package['md5sum']

            exec_args = {
                'name': package['name'],
                'pkg_epoch': package['epoch'],
                'pkg_version': package['version'],
                'pkg_rel': package['release'],
                'pkg_arch': package['arch'],
                'orgid': org_id
            }

            if 'checksum' in package and CFG.ENABLE_NVREA:
                _checksum_sql_filter = """and c.checksum = :checksum
                                          and c.checksum_type = :checksum_type"""
                exec_args.update({
                    'checksum_type': package['checksum_type'],
                    'checksum': package['checksum']
                })

            h = rhnSQL.prepare(self._get_pkg_info_query % _checksum_sql_filter)
            h.execute(**exec_args)
            row = h.fetchone_dict()

            package['checksum_type'] = row['checksum_type']
            package['checksum'] = row['checksum']
            package['org_id'] = org_id
            package['channels'] = channelList
            batch.append(IncompletePackage().populate(package))

        caller = "server.app.channelPackageSubscription"

        backend = SQLBackend()
        importer = ChannelPackageSubscription(batch, backend, caller=caller)
        try:
            importer.run()
        except IncompatibleArchError:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(50, string.join(e.args), explain=0),
                          sys.exc_info()[2])
        except InvalidChannelError:
            e = sys.exc_info()[1]
            raise_with_tb(rhnFault(50, str(e), explain=0), sys.exc_info()[2])

        affected_channels = importer.affected_channels

        log_debug(3, "Computing errata cache for systems affected by channels",
                  affected_channels)

        schedule_errata_cache_update(affected_channels)
        rhnSQL.commit()

        return 0
Example #50
0
    def _chown_chmod_chcon(self,
                           temp_file_path,
                           dest_path,
                           file_info,
                           strict_ownership=1):
        if file_info['filetype'] != 'symlink':
            uid = file_info.get('uid')
            if uid is None:
                if 'username' in file_info:
                    # determine uid

                    try:
                        user_record = pwd.getpwnam(file_info['username'])
                        uid = user_record[2]
                    except Exception:
                        e = sys.exc_info()[1]
                        #Check if username is an int
                        try:
                            uid = int(file_info['username'])
                        except ValueError:
                            raise_with_tb(
                                cfg_exceptions.UserNotFound(
                                    file_info['username']),
                                sys.exc_info()[2])
                else:
                    #default to root (3.2 sats)
                    uid = 0

            gid = file_info.get('gid')
            if gid is None:
                if 'groupname' in file_info:
                    # determine gid
                    try:
                        group_record = grp.getgrnam(file_info['groupname'])
                        gid = group_record[2]
                    except Exception:
                        e = sys.exc_info()[1]
                        try:
                            gid = int(file_info['groupname'])
                        except ValueError:
                            raise_with_tb(
                                cfg_exceptions.GroupNotFound(
                                    file_info['groupname']),
                                sys.exc_info()[2])

                else:
                    #default to root (3.2 sats)
                    gid = 0

        try:
            if file_info['filetype'] != 'symlink':
                os.chown(temp_file_path, uid, gid)

                mode = '600'
                if 'filemode' in file_info:
                    if file_info['filemode'] is "":
                        mode = '000'
                    else:
                        mode = file_info['filemode']

                mode = int(str(mode), 8)
                os.chmod(temp_file_path, mode)

            if 'selinux_ctx' in file_info:
                sectx = file_info.get('selinux_ctx')
                if sectx is not None and sectx is not "":
                    log_debug(1, "selinux context: " + sectx)
                    try:
                        if lsetfilecon(temp_file_path, sectx) < 0:
                            raise Exception(
                                "failed to set selinux context on %s" %
                                dest_path)
                    except OSError:
                        e = sys.exc_info()[1]
                        raise_with_tb(
                            Exception(
                                "failed to set selinux context on %s" %
                                dest_path, e),
                            sys.exc_info()[2])

        except OSError:
            e = sys.exc_info()[1]
            if e.errno == errno.EPERM and not strict_ownership:
                sys.stderr.write(
                    "cannonical file ownership and permissions lost on %s\n" %
                    dest_path)
            else:
                raise
Example #51
0
    def dump_channel_packages_short(self,
                                    channel_label,
                                    last_modified,
                                    filepath=None,
                                    validate_channels=False,
                                    send_headers=False,
                                    open_stream=True):
        log_debug(2, channel_label)
        if validate_channels:
            channels = self._validate_channels(channel_labels=[channel_label])
            channel_obj = channels[channel_label]
        else:
            channels = channel_label
            channel_obj = channels
        db_last_modified = int(rhnLib.timestamp(channel_obj['last_modified']))
        last_modified = int(rhnLib.timestamp(last_modified))
        log_debug(3, "last modified", last_modified, "db last modified",
                  db_last_modified)
        if last_modified != db_last_modified:
            raise rhnFault(3013, "The requested channel version does not match"
                           " the upstream version",
                           explain=0)
        channel_id = channel_obj['channel_id']
        if filepath:
            key = filepath
        else:
            key = "xml-channel-packages/rhn-channel-%d.data" % channel_id
        # Try to get everything off of the cache
        val = rhnCache.get(key, compressed=0, raw=1, modified=last_modified)
        if val is None:
            # Not generated yet
            log_debug(4,
                      "Cache MISS for %s (%s)" % (channel_label, channel_id))
            stream = self._cache_channel_packages_short(
                channel_id, key, last_modified)
        else:
            log_debug(4, "Cache HIT for %s (%s)" % (channel_label, channel_id))
            temp_stream = tempfile.TemporaryFile()
            temp_stream.write(val)
            temp_stream.flush()
            stream = self._normalize_compressed_stream(temp_stream)

        # Copy the results to the output stream
        # They shold be already compressed if they were requested to be
        # compressed
        buffer_size = 16384
        # Send the HTTP headers - but don't init the compressed stream since
        # we send the data ourselves
        if send_headers:
            self._send_headers(init_compressed_stream=0)
        if open_stream:
            self._raw_stream = open(key, "w")
        while 1:
            buff = stream.read(buffer_size)
            if not buff:
                break
            try:
                self._raw_stream.write(buff)
            except IOError:
                log_error("Client disconnected prematurely")
                self.close()
                raise_with_tb(ClosedConnectionError, sys.exc_info()[2])
        # We're done
        if open_stream:
            self._raw_stream.close()
        return 0
Example #52
0
    def write_file(self, stream_in):
        """Writes the contents of stream_in to the filesystem
        Returns the file size(success) or raises FileCreationError"""
        dirname = os.path.dirname(self.full_path)
        createPath(dirname)
        stat = os.statvfs(dirname)

        f_bsize = stat[0]  # file system block size
        # misa: it's kind of icky whether to use f_bfree (free blocks) or
        # f_bavail (free blocks for non-root). f_bavail is more correct, since
        # you don't want to have the system out of disk space because of
        # satsync; but people would get confused when looking at the output of
        # df
        f_bavail = stat[4]  # free blocks
        freespace = f_bsize * float(f_bavail)
        if self.file_size is not None and self.file_size > freespace:
            msg = messages.not_enough_diskspace % (freespace / 1024)
            log(-1, msg, stream=sys.stderr)
            # pkilambi: As the metadata download does'nt check for unfetched rpms
            # abort the sync when it runs out of disc space
            sys.exit(-1)
            #raise FileCreationError(msg)
        if freespace < 5000 * 1024:  # arbitrary
            msg = messages.not_enough_diskspace % (freespace / 1024)
            log(-1, msg, stream=sys.stderr)
            # pkilambi: As the metadata download does'nt check for unfetched rpms
            # abort the sync when it runs out of disc space
            sys.exit(-1)
            #raise FileCreationError(msg)

        fout = open(self.full_path, 'wb')
        # setting file permissions; NOTE: rhnpush uses apache to write to disk,
        # hence the 6 setting.
        if rhnLib.isSUSE():
            setPermsPath(self.full_path, user='******', group='www', chmod=int('0644', 8))
        else:
            setPermsPath(self.full_path, user='******', group='apache', chmod=int('0644', 8))
        size = 0
        try:
            while 1:
                buf = stream_in.read(self.buffer_size)
                if not buf:
                    break
                buf_len = len(buf)
                fout.write(buf)
                size = size + buf_len
        except IOError:
            e = sys.exc_info()[1]
            msg = "IOError: %s" % e
            log(-1, msg, stream=sys.stderr)
            # Try not to leave garbage around
            try:
                os.unlink(self.full_path)
            except (OSError, IOError):
                pass
            raise_with_tb(FileCreationError(msg), sys.exc_info()[2])
        l_file_size = fout.tell()
        fout.close()

        if self.file_size is not None and self.file_size != l_file_size:
            # Something bad happened
            msg = "Error: file %s has wrong size. Expected %s bytes, got %s bytes" % (
                self.full_path, self.file_size, l_file_size)
            log(-1, msg, stream=sys.stderr)
            # Try not to leave garbage around
            try:
                os.unlink(self.full_path)
            except (OSError, IOError):
                pass
            raise FileCreationError(msg)

        os.utime(self.full_path, (self.timestamp, self.timestamp))
        return l_file_size
Example #53
0
    def __db_update_domain(self, host_id, uuid, properties, existing_row):

        # First, update the rhnVirtualInstance table.  If a guest domain was
        # registered but its host was not, it is possible that the
        # rhnVirtualInstance table's host_system_id column is null.  We'll
        # update that now, if need be.

        # __db_get_domain is responsible for ensuring that the org for any
        # existing_row matches the org for host_id

        new_values_array = []
        bindings = {}

        if not existing_row.get('confirmed'):
            new_values_array.append('confirmed=1')

        if existing_row['host_system_id'] != host_id:
            new_values_array.append('host_system_id=:host_id')
            bindings['host_id'] = host_id

        # Only touch the database if something changed.
        if new_values_array:
            new_values = string.join(new_values_array, ', ')

            bindings['row_id'] = existing_row['rvi_id']

            update_sql = """
                UPDATE rhnVirtualInstance SET %s WHERE id=:row_id
            """ % (new_values)
            query = rhnSQL.prepare(update_sql)

            try:
                query.execute(**bindings)
            except rhnSQL.SQLError:
                e = sys.exc_info()[1]
                log_error(str(e))
                raise_with_tb(VirtualizationEventError(str(e)), sys.exc_info()[2])

        # Now update the rhnVirtualInstanceInfo table.

        new_values_array = []
        bindings = {}

        if PropertyType.NAME in properties and \
           existing_row['name'] != properties[PropertyType.NAME]:
            new_values_array.append('name=:name')
            bindings['name'] = properties[PropertyType.NAME]

        if PropertyType.VCPUS in properties and \
           existing_row['vcpus'] != properties[PropertyType.VCPUS]:
            new_values_array.append('vcpus=:vcpus')
            bindings['vcpus'] = properties[PropertyType.VCPUS]

        if PropertyType.MEMORY in properties and \
           existing_row['memory_size_k'] != properties[PropertyType.MEMORY]:
            new_values_array.append('memory_size_k=:memory')
            bindings['memory'] = properties[PropertyType.MEMORY]

        if PropertyType.TYPE in properties and \
           existing_row['instance_type'] != properties[PropertyType.TYPE]:
            new_values_array.append("""
                instance_type = (
                    select rvit.id
                    from rhnVirtualInstanceType rvit
                    where rvit.label = :virt_type)
            """)
            bindings['virt_type'] = properties[PropertyType.TYPE]

        if PropertyType.STATE in properties and \
           existing_row['state'] != properties[PropertyType.STATE]:
            new_values_array.append("""
                state = (
                    SELECT rvis.id
                    FROM rhnVirtualInstanceState rvis
                    WHERE rvis.label = :state)
            """)
            bindings['state'] = properties[PropertyType.STATE]

        # Only touch the database if something changed.
        if new_values_array:
            new_values = string.join(new_values_array, ', ')

            bindings['row_id'] = existing_row['instance_id']

            update_sql = """
                UPDATE rhnVirtualInstanceInfo SET %s WHERE instance_id=:row_id
            """ % (new_values)
            query = rhnSQL.prepare(update_sql)
            query.execute(**bindings)
Example #54
0
def push_package(a_pkg,
                 org_id=None,
                 force=None,
                 channels=[],
                 relative_path=None):
    """Uploads a package"""

    # First write the package to the filesystem to final location
    try:
        importLib.move_package(a_pkg.payload_stream.name,
                               basedir=CFG.MOUNT_POINT,
                               relpath=relative_path,
                               checksum_type=a_pkg.checksum_type,
                               checksum=a_pkg.checksum,
                               force=1)
    except OSError:
        e = sys.exc_info()[1]
        raise_with_tb(rhnFault(50, "Package upload failed: %s" % e),
                      sys.exc_info()[2])
    except importLib.FileConflictError:
        raise_with_tb(rhnFault(50, "File already exists"), sys.exc_info()[2])
    except:
        raise_with_tb(rhnFault(50, "File error"), sys.exc_info()[2])

    pkg = mpmSource.create_package(a_pkg.header,
                                   size=a_pkg.payload_size,
                                   checksum_type=a_pkg.checksum_type,
                                   checksum=a_pkg.checksum,
                                   relpath=relative_path,
                                   org_id=org_id,
                                   header_start=a_pkg.header_start,
                                   header_end=a_pkg.header_end,
                                   channels=channels)

    batch = importLib.Collection()
    batch.append(pkg)

    backend = SQLBackend()

    if force:
        upload_force = 4
    else:
        upload_force = 0
    importer = packageImport.packageImporter(batch,
                                             backend,
                                             source=a_pkg.header.is_source,
                                             caller="server.app.uploadPackage")
    importer.setUploadForce(upload_force)
    importer.run()

    package = batch[0]
    log_debug(5, "Package diff", package.diff)

    if package.diff and not force and package.diff.level > 1:
        # Packages too different; bail out
        log_debug(1, "Packages too different", package.toDict(), "Level:",
                  package.diff.level)
        pdict = package.toDict()
        orig_path = package['path']
        orig_path = os.path.join(CFG.MOUNT_POINT, orig_path)
        log_debug(4, "Original package", orig_path)

        # MPMs do not store their headers on disk, so we must avoid performing
        # operations which rely on information only contained in the headers
        # (such as header signatures).
        if os.path.exists(orig_path) and a_pkg.header.packaging != 'mpm':
            oh = rhn_pkg.get_package_header(orig_path)
            _diff_header_sigs(a_pkg.header, oh, pdict['diff']['diff'])

        return pdict, package.diff.level

    # Remove any pending scheduled file deletion for this package
    h = rhnSQL.prepare("""
        delete from rhnPackageFileDeleteQueue where path = :path
    """)
    h.execute(path=relative_path)

    if package.diff and not force and package.diff.level:
        # No need to copy it - just the path is modified
        # pkilambi bug#180347
        # case 1:check if the path exists in the db and also on the file system.
        # if it does then no need to copy
        # case2: file exists on file system but path not in db.then add the
        # realtive path in the db based on checksum of the pkg
        # case3: if no file on file system but path exists.then we write the
        # file to file system
        # case4:no file exists on FS and no path in db .then we write both.
        orig_path = package['path']
        orig_path = os.path.join(CFG.MOUNT_POINT, orig_path)
        log_debug(3, "Original package", orig_path)

        # check included to query for source and binary rpms
        h_path_sql = """
            select ps.path path
                from %s ps,
                     rhnChecksumView c
            where
                c.checksum = :csum
            and c.checksum_type = :ctype
            and ps.checksum_id = c.id
            and (ps.org_id = :org_id or
                 (ps.org_id is null and :org_id is null)
                )
            """
        if a_pkg.header.is_source:
            h_package_table = 'rhnPackageSource'
        else:
            h_package_table = 'rhnPackage'
        h_path = rhnSQL.prepare(h_path_sql % h_package_table)
        h_path.execute(ctype=a_pkg.checksum_type,
                       csum=a_pkg.checksum,
                       org_id=org_id)

        rs_path = h_path.fetchall_dict()
        path_dict = {}
        if rs_path:
            path_dict = rs_path[0]

        if os.path.exists(orig_path) and path_dict['path']:
            return {}, 0
        elif not path_dict['path']:
            h_upd = rhnSQL.prepare("""
            update rhnpackage
               set path = :path
            where checksum_id = (
                        select id from rhnChecksumView c
                                 where c.checksum = :csum
                                   and c.checksum_type = :ctype)
            """)
            h_upd.execute(path=relative_path,
                          ctype=a_pkg.checksum_type,
                          csum=a_pkg.checksum)

    # commit the transactions
    rhnSQL.commit()
    if not a_pkg.header.is_source:
        # Process Package Key information
        server_packages.processPackageKeyAssociations(a_pkg.header,
                                                      a_pkg.checksum_type,
                                                      a_pkg.checksum)

    if not a_pkg.header.is_source:
        errataCache.schedule_errata_cache_update(importer.affected_channels)

    log_debug(2, "Returning")
    return {}, 0
Example #55
0
def poll_hypervisor():
    """
    This function polls the hypervisor for information about the currently
    running set of domains.  It returns a dictionary object that looks like the
    following:

    { uuid : { 'name'        : '...',
               'uuid'        : '...',
               'virt_type'   : '...',
               'memory_size' : '...',
               'vcpus'       : '...',
               'state'       : '...' }, ... }
    """
    if not libvirt:
        return {}

    try:
        conn = libvirt.openReadOnly(None)
    except libvirt.libvirtError:
        # virConnectOpen() failed
        sys.stderr.write(rhncli.utf8_encode(_("Warning: Could not retrieve virtualization information!\n\t" +
                                              "libvirtd service needs to be running.\n")))
        conn = None

    if not conn:
        # No connection to hypervisor made
        return {}

    domainIDs = conn.listDomainsID()

    state = {}

    for domainID in domainIDs:
        try:
            domain = conn.lookupByID(domainID)
        except libvirt.libvirtError:
            lve = sys.exc_info()[1]
            raise_with_tb(VirtualizationException("Failed to obtain handle to domain %d: %s" % (domainID, repr(lve)),
                                                  sys.exc_info()[2]))

        uuid = binascii.hexlify(domain.UUID())
        # SEE: http://libvirt.org/html/libvirt-libvirt.html#virDomainInfo
        # for more info.
        domain_info = domain.info()

        # Set the virtualization type.  We can tell if the domain is fully virt
        # by checking the domain's OSType() attribute.
        virt_type = VirtualizationType.PARA
        if is_fully_virt(domain):
            virt_type = VirtualizationType.FULLY

        # we need to filter out the small per/minute KB changes
        # that occur inside a vm.  To do this we divide by 1024 to
        # drop our precision down to megabytes with an int then
        # back up to KB
        memory = int(domain_info[2] / 1024);
        memory = memory * 1024;
        properties = {
            PropertyType.NAME   : domain.name(),
            PropertyType.UUID   : uuid,
            PropertyType.TYPE   : virt_type,
            PropertyType.MEMORY : str(memory), # current memory
            PropertyType.VCPUS  : domain_info[3],
            PropertyType.STATE  : VIRT_STATE_NAME_MAP[domain_info[0]] }

        state[uuid] = properties

    if state: _log_debug("Polled state: %s" % repr(state))

    return state
    def __request(self, methodname, params):
        # pylint: disable=R0915
        log_debug(6, methodname, params)
        # Init the socket
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        try:
            sock.connect(self.serverAddr)
        except socket.error as e:
            sock.close()
            methodname = None
            log_error("Error connecting to the auth cache: %s" % str(e))
            Traceback("Shelf.__request",
                      extra="""
              Error connecting to the the authentication cache daemon.
              Make sure it is started on %s""" % str(self.serverAddr))
            # FIXME: PROBLEM: this rhnFault will never reach the client
            raise_with_tb(
                rhnFault(
                    1000,
                    _("Spacewalk Proxy error (issues connecting to auth cache). "
                      "Please contact your system administrator")),
                sys.exc_info()[2])

        wfile = sock.makefile("w")

        try:
            send(wfile, methodname, None, *params)
        except CommunicationError:
            wfile.close()
            sock.close()
            Traceback("Shelf.__request",
                      extra="Encountered a CommunicationError")
            raise
        except socket.error:
            wfile.close()
            sock.close()
            log_error("Error communicating to the auth cache: %s" % str(e))
            Traceback("Shelf.__request",
                      extra="""\
                     Error sending to the authentication cache daemon.
                     Make sure the authentication cache daemon is started""")
            # FIXME: PROBLEM: this rhnFault will never reach the client
            raise_with_tb(
                rhnFault(
                    1000,
                    _("Spacewalk Proxy error (issues connecting to auth cache). "
                      "Please contact your system administrator")),
                sys.exc_info()[2])

        wfile.close()

        rfile = sock.makefile("r")
        try:
            params, methodname = recv(rfile)
        except CommunicationError as e:
            log_error(e.faultString)
            rfile.close()
            sock.close()
            log_error("Error communicating to the auth cache: %s" % str(e))
            Traceback("Shelf.__request",
                      extra="""\
                      Error receiving from the authentication cache daemon.
                      Make sure the authentication cache daemon is started""")
            # FIXME: PROBLEM: this rhnFault will never reach the client
            raise_with_tb(
                rhnFault(
                    1000,
                    _("Spacewalk Proxy error (issues communicating to auth cache). "
                      "Please contact your system administrator")),
                sys.exc_info()[2])
        except Fault as e:
            rfile.close()
            sock.close()
            # If e.faultCode is 0, it's another exception
            if e.faultCode != 0:
                # Treat is as a regular xmlrpc fault
                raise

            _dict = e.faultString
            if not isinstance(_dict, type({})):
                # Not the expected type
                raise

            if 'name' not in _dict:
                # Doesn't look like a marshalled exception
                raise

            name = _dict['name']
            args = _dict.get('args')
            # Look up the exception
            if not hasattr(__builtins__, name):
                # Unknown exception name
                raise

            # Instantiate the exception object
            import new
            _dict = {'args': args}
            # pylint: disable=bad-option-value,nonstandard-exception
            raise_with_tb(new.instance(getattr(__builtins__, name), _dict),
                          sys.exc_info()[2])

        return params[0]
Example #57
0
 def _prepare(self, force=None):
     try:
         return sql_base.Cursor._prepare(self, force)
     except self.OracleError:
         e = sys.exc_info()[1]
         raise_with_tb(self._build_exception(e), sys.exc_info()[2])
Example #58
0
    def _processFile(filename, relativeDir=None, source=None, nosig=None):
        """ Processes a file
            Returns a hash containing:
              header
              packageSize
              checksum
              relativePath
              nvrea
         """

        # Is this a file?
        if not os.access(filename, os.R_OK):
            raise UploadError("Could not stat the file %s" % filename)
        if not os.path.isfile(filename):
            raise UploadError("%s is not a file" % filename)

        # Size
        size = os.path.getsize(filename)

        try:
            a_pkg = package_from_filename(filename)
            a_pkg.read_header()
            a_pkg.payload_checksum()
            assert a_pkg.header
        except:
            raise_with_tb(UploadError("%s is not a valid package" % filename),
                          sys.exc_info()[2])

        if nosig is None and not a_pkg.header.is_signed():
            raise UploadError(
                "ERROR: %s: unsigned rpm (use --nosig to force)" % filename)

        # Get the name, version, release, epoch, arch
        lh = []
        for k in ['name', 'version', 'release', 'epoch']:
            if k == 'epoch' and not a_pkg.header[k]:
                # Fix the epoch
                lh.append(sstr(""))
            else:
                lh.append(sstr(a_pkg.header[k]))

        if source:
            lh.append('src')
        else:
            lh.append(sstr(a_pkg.header['arch']))

        # Build the header hash to be sent
        info = {
            'header': Binary(a_pkg.header.unload()),
            'checksum_type': a_pkg.checksum_type,
            'checksum': a_pkg.checksum,
            'packageSize': size,
            'header_start': a_pkg.header_start,
            'header_end': a_pkg.header_end
        }
        if relativeDir:
            # Append the relative dir too
            info["relativePath"] = "%s/%s" % (relativeDir,
                                              os.path.basename(filename))
        info['nvrea'] = tuple(lh)
        return info
Example #59
0
    def submit(self, system_id, action_id, result, message="", data={}):
        """ Submit the results of a queue run.
            Maps old and new rhn_check behavior to new database status codes

            The new API uses 4 slightly different status codes than the
            old client does.  This function will "hopefully" sensibly
            map them.  Old methodology:
               -rhn_check retrieves an action from the top of the action queue.
               -It attempts to execute the desired action and returns either
                   (a) 0   -- presumed successful.
                   (b) rhnFault object -- presumed failed
                   (c) some other non-fault object -- *assumed* successful.
               -Regardless of result code, action is marked as "executed"

            We try to make a smarter status selection (i.e. failed||completed).

            For reference:
            New DB status codes:      Old DB status codes:
                  0: Queued               0: queued
                  1: Picked Up            1: picked up
                  2: Completed            2: executed
                  3: Failed               3: completed
        """
        if type(action_id) is not IntType:
            # Convert it to int
            try:
                action_id = int(action_id)
            except ValueError:
                log_error("Invalid action_id", action_id)
                raise_with_tb(rhnFault(30, _("Invalid action value type %s (%s)") %
                               (action_id, type(action_id))), sys.exc_info()[2])
        # Authenticate the system certificate
        self.auth_system(system_id)
        log_debug(1, self.server_id, action_id, result)
        # check that the action is valid
        # We have a uniqueness constraint on (action_id, server_id)
        h = rhnSQL.prepare("""
            select at.label action_type,
                   at.trigger_snapshot,
                   at.name
              from rhnServerAction sa,
                   rhnAction a,
                   rhnActionType at
             where sa.server_id = :server_id
               and sa.action_id = :action_id
               and sa.status = 1
               and a.id = :action_id
               and a.action_type = at.id
        """)
        h.execute(server_id=self.server_id, action_id=action_id)
        row = h.fetchone_dict()
        if not row:
            log_error("Server %s does not own action %s" % (
                self.server_id, action_id))
            raise rhnFault(22, _("Action %s does not belong to server %s") % (
                action_id, self.server_id))

        action_type = row['action_type']
        trigger_snapshot = (row['trigger_snapshot'] == 'Y')

        if 'missing_packages' in data:
            missing_packages = "Missing-Packages: %s" % str(
                data['missing_packages'])
            rmsg = "%s %s" % (message, missing_packages)
        elif 'koan' in data:
            rmsg = "%s: %s" % (message, data['koan'])
        else:
            rmsg = message

        rcode = result
        # Careful with this one, result can be a very complex thing
        # and this processing is required for compatibility with old
        # rhn_check clients
        if type(rcode) == type({}):
            if "faultCode" in result:
                rcode = result["faultCode"]
            if "faultString" in result:
                rmsg = result["faultString"] + str(data)
        if type(rcode) in [type({}), type(()), type([])] \
                or type(rcode) is not IntType:
            rmsg = u"%s [%s]" % (UnicodeType(message), UnicodeType(rcode))
            rcode = -1
        # map to db codes.
        status = self.status_for_action_type_code(action_type, rcode)

        if status == 3:
            # Failed action - invalidate children
            self._invalidate_child_actions(action_id)
        elif action_type == 'reboot.reboot':
            # reboot action should stay as pickup
            rhnSQL.commit()
            return 0
        elif status == 2 and trigger_snapshot and self.__should_snapshot():
            # if action status is 'Completed', snapshot if allowed and if needed
            self.server.take_snapshot("Scheduled action completion:  %s" % row['name'])

        self.__update_action(action_id, status, rcode, rmsg)

        # Store the status in a flag - easier than to complicate the action
        # plugin API by adding a status
        rhnFlags.set('action_id', action_id)
        rhnFlags.set('action_status', status)

        self.process_extra_data(self.server_id, action_id, data=data,
                                action_type=action_type)

        # commit, because nobody else will
        rhnSQL.commit()
        return 0
Example #60
0
    def run(self):
        log_debug(2)
        r = self.repository

        channel = self.options.channel
        if not channel:
            die(6, "Config channel not specified")

        topdir = self.options.topdir
        if topdir:
            if not os.path.isdir(self.options.topdir):
                die(
                    8, "--topdir specified, but `%s' not a directory" %
                    self.options.topdir)

        if not self.args:
            die(7, "No files specified")

        revision = self.options.revision
        if revision:
            if len(self.args) > 1:
                die(9, "--revision specified with multiple files")

        dep_trans = None

        if topdir:
            dep_trans = DeployTransaction(transaction_root=topdir)
            dep_trans.deploy_callback(deploying_mesg_callback)

        for f in self.args:
            try:
                directory = topdir or tempfile.gettempdir()

                #5/11/05 wregglej - 157066 dirs_created is returned from get_file_info.
                (temp_file, info,
                 dirs_created) = r.get_file_info(channel,
                                                 f,
                                                 revision=revision,
                                                 auto_delete=0,
                                                 dest_directory=directory)

            except cfg_exceptions.RepositoryFileMissingError:
                if revision is not None:
                    die(
                        2, "Error: file %s (revision %s) not in config "
                        "channel %s" % (f, revision, channel))
                else:
                    die(
                        2, "Error: file %s not in config channel %s" %
                        (f, channel))

            if topdir:
                #5/11/05 wregglej - 157066 dirs_created now gets passed into add_preprocessed.
                dep_trans.add_preprocessed(f,
                                           temp_file,
                                           info,
                                           dirs_created,
                                           strict_ownership=0)
                continue
            elif info.get('filetype') == 'symlink':
                print("%s -> %s" % (info['path'], info['symlink']))
                continue
            elif info.get('filetype') == 'directory':
                print("%s is a directory entry, nothing to get" % info['path'])
                continue
            else:
                print(open(temp_file).read())
                os.unlink(temp_file)

        if topdir:
            try:
                dep_trans.deploy()
            except Exception:
                try:
                    dep_trans.rollback()
                except FailedRollback:
                    raise_with_tb(FailedRollback("FAILED ROLLBACK:  "),
                                  sys.exc_info()[2])
                #5/3/05 wregglej - 136415 Added exception stuff for missing user info.
                except cfg_exceptions.UserNotFound:
                    raise
                #5/5/05 wregglej - 136415 Added exception handling for unknown group.
                except cfg_exceptions.GroupNotFound:
                    raise
                else:
                    raise_with_tb(
                        Exception("Deploy failed, rollback successful:  "),
                        sys.exc_info()[2])