def import_channels(channels, orgid=None, master=None): collection = ChannelCollection() batch = [] org_map = None my_backend = diskImportLib.get_backend() if master: org_map = my_backend.lookupOrgMap(master)['master-id-to-local-id'] for c in channels: try: timestamp = collection.get_channel_timestamp(c) except KeyError: raise_with_tb(Exception("Could not find channel %s" % c), sys.exc_info()[2]) c_obj = collection.get_channel(c, timestamp) if c_obj is None: raise Exception("Channel not found in cache: %s" % c) # Check to see if we're asked to sync to an orgid, # make sure the org from the export is not null org, # finally if the orgs differ so we might wanna use # requested org's channel-family. # TODO: Move these checks somewhere more appropriate if not orgid and c_obj['org_id'] is not None: # If the src org is not present default to org 1 orgid = DEFAULT_ORG if orgid is not None and c_obj['org_id'] is not None and \ c_obj['org_id'] != orgid: # If we know the master this is coming from and the master org # has been mapped to a local org, transform org_id to the local # org_id. Otherwise just put it in the default org. if (org_map and c_obj['org_id'] in list(org_map.keys()) and org_map[c_obj['org_id']]): c_obj['org_id'] = org_map[c_obj['org_id']] else: c_obj['org_id'] = orgid if 'trust_list' in c_obj: del (c_obj['trust_list']) for family in c_obj['families']: family['label'] = 'private-channel-family-' + \ str(c_obj['org_id']) # If there's a trust list on the channel, transform the org ids to # the local ones if 'trust_list' in c_obj and c_obj['trust_list']: trusts = [] for trust in c_obj['trust_list']: if trust['org_trust_id'] in org_map: trust['org_trust_id'] = org_map[trust['org_trust_id']] trusts.append(trust) c_obj['trust_list'] = trusts syncLib.log( 6, "Syncing Channel %s to Org %s " % (c_obj['label'], c_obj['org_id'])) batch.append(c_obj) importer = channelImport.ChannelImport(batch, my_backend) # Don't commit just yet importer.will_commit = 0 importer.run() return importer
def diff_file_revisions(self, path, config_channel_src, revision_src, config_channel_dst, revision_dst): log_debug(4) params = { 'session': self.session, 'path': path, 'config_channel_src': config_channel_src, 'revision_src': revision_src, } if config_channel_dst is not None: params['config_channel_dst'] = config_channel_dst if revision_dst is not None: params['revision_dst'] = revision_dst try: ret = self.rpc_call('config.management.diff', params) except xmlrpclib.Fault: e = sys.exc_info()[1] if e.faultCode == -4011: # File not present raise_with_tb( cfg_exceptions.RepositoryFileMissingError(e.faultString), sys.exc_info()[2]) if e.faultCode == -4004: # Binary file requested raise_with_tb( cfg_exceptions.BinaryFileDiffError(e.faultString), sys.exc_info()[2]) raise return ret
def _prepare_guest_kernel_and_ramdisk(config): """ Use PyGrub to extract the kernel and ramdisk from the given disk image. """ disk_image = config.getConfigItem(DomainConfig.DISK_IMAGE_PATH) # Use pygrub to extract the initrd and the kernel from the disk image. (status, output) = \ commands.getstatusoutput("%s -q %s" % (PYGRUB, disk_image)) if status != 0: raise VirtualizationException( "Error occurred while executing '%s' (status=%d). Output=%s" % (PYGRUB, status, output)) # Now analyze the output and extract the names of the new kernel and initrd # images from it. (pygrub_kernel_path, pygrub_initrd_path) = \ _extract_image_paths_from_pygrub_output(output) # Rename the extracted images to the names we are pointing to in the # configuration file. runtime_kernel_path = config.getConfigItem(DomainConfig.KERNEL_PATH) runtime_initrd_path = config.getConfigItem(DomainConfig.RAMDISK_PATH) try: os.rename(pygrub_kernel_path, runtime_kernel_path) os.rename(pygrub_initrd_path, runtime_initrd_path) except OSError: oe = sys.exc_info()[1] raise_with_tb( VirtualizationException( "Error occurred while renaming runtime image paths: %s" % str(oe)), sys.exc_info()[2])
def _call_function(self, function, arglist, kwargs={}): succeed = 0 while succeed == 0: try: ret = function(*arglist, **kwargs) except rpclib.InvalidRedirectionError: raise except xmlrpclib.Fault: e = sys.exc_info()[1] save_traceback = sys.exc_info()[2] try: self._failover() except NoMoreServers: f = sys.exc_info()[1] raise_with_tb( e, save_traceback ) #Don't raise the NoMoreServers error, raise the error that triggered the failover. continue except (error, sslerror, herror, gaierror, timeout): e = sys.exc_info()[1] save_traceback = sys.exc_info()[2] try: self._failover() except NoMoreServers: raise_with_tb(e, save_traceback) continue succeed = 1 #If we get here then the function call eventually succeeded and we don't need to try again. return ret
def token_server_groups(server_id, tokens_obj): """ Handle server group subscriptions for the registration token """ assert (isinstance(tokens_obj, ActivationTokens)) h = rhnSQL.prepare(_query_token_server_groups) server_groups = {} for token in tokens_obj.tokens: token_id = token['token_id'] h.execute(token_id=token_id) while 1: row = h.fetchone_dict() if not row: break server_group_id = row['server_group_id'] server_groups[server_group_id] = row # Now try to subscribe server to group ret = [] for server_group_id, sg in list(server_groups.items()): log_debug(4, "token server group", sg) try: join_server_group(server_id, server_group_id) except rhnSQL.SQLError: e = sys.exc_info()[1] log_error("Failed to add server to group", server_id, server_group_id, sg["name"]) raise_with_tb( rhnFault(80, _("Failed to add server to group %s") % sg["name"]), sys.exc_info()[2]) else: ret.append("Subscribed to server group '%s'" % sg["name"]) return ret
def __test_DB(): global __DB try: return __DB except NameError: raise_with_tb(SystemError("Not connected to any database!"), sys.exc_info()[2])
def store_rhnCryptoKey(description, cert, org_id, verbosity=0): """ stores cert in rhnCryptoKey uses: _checkCertMatch_rhnCryptoKey _delete_rhnCryptoKey - not currently used _insertPrep_rhnCryptoKey _lobUpdate_rhnCryptoKey """ try: # look for a cert match in the database rhn_cryptokey_id = _checkCertMatch_rhnCryptoKey(cert, description, org_id, deleteRowYN=1, verbosity=verbosity) if rhn_cryptokey_id is None: # nothing to do - cert matches return # insert into the database if rhn_cryptokey_id == -1: rhn_cryptokey_id = _insertPrep_rhnCryptoKey( rhn_cryptokey_id, description, org_id) # write/update _lobUpdate_rhnCryptoKey(rhn_cryptokey_id, cert) rhnSQL.commit() except rhnSQL.sql_base.SQLError: raise_with_tb( CaCertInsertionError("...the traceback: %s" % fetchTraceback()), sys.exc_info()[2])
def management_remove_channel(self, dict): log_debug(1) self._get_and_validate_session(dict) config_channel = dict.get('config_channel') # XXX Validate the namespace row = rhnSQL.fetchone_dict(self._query_config_channel_by_label, org_id=self.org_id, label=config_channel) if not row: raise rhnFault(4009, "Channel not found") delete_call = rhnSQL.Procedure('rhn_config.delete_channel') try: delete_call(row['id']) except rhnSQL.SQLError: e = sys.exc_info()[1] errno = e.args[0] if errno == 2292: raise_with_tb( rhnFault(4005, "Cannot remove non-empty channel %s" % config_channel, explain=0), sys.exc_info()[2]) raise log_debug(5, "Removed:", config_channel) rhnSQL.commit() return ""
def _make_file_info(self, remote_path, local_path=None, delim_start=None, delim_end=None, load_contents=1): if not local_path: # Safe enough to assume local path is the same as the remote one local_path = remote_path try: file_stat = os.lstat(local_path) except OSError: e = sys.exc_info()[1] raise_with_tb( cfg_exceptions.RepositoryLocalFileError( "Error lstat()-ing local file: %s" % e), sys.exc_info()[2]) # Dlimiters if delim_start or delim_end: if not (delim_start and delim_end): # If only one delimiter is provided, assume the delimiters are # the same, whatever that is (or is nice) delim_start = delim_end = (delim_start or delim_end) else: # Use the default delim_start, delim_end = self.get_file_delimiters(remote_path) params = { 'path': remote_path, 'delim_start': delim_start, 'delim_end': delim_end, } file_contents = None if os.path.islink(local_path): params['config_file_type_id'] = 3 params['symlink'] = os.readlink(local_path) load_contents = 0 elif os.path.isdir(local_path): params['config_file_type_id'] = 2 load_contents = 0 else: params['config_file_type_id'] = 1 if load_contents: try: file_contents = open(local_path, "rb").read() except IOError: e = sys.exc_info()[1] raise_with_tb( cfg_exceptions.RepositoryLocalFileError( "Error opening local file: %s" % e), sys.exc_info()[2]) self._add_content(file_contents, params) params.update(self.make_stat_info(local_path, file_stat)) return params
def _repodata_taskomatic(self, file_name): log_debug(3, 'repodata', file_name) content_type = "application/x-gzip" if file_name.endswith(".xml"): content_type = "text/xml" elif file_name in ["repomd.xml.asc", "repomd.xml.key"]: content_type = "text/plain" elif file_name.endswith(".yaml"): content_type = "text/yaml" file_path = "%s/%s/%s" % (CFG.REPOMD_PATH_PREFIX, self.channelName, file_name) if file_name in ["comps.xml", "modules.yaml"]: # without checksum in the filename, they are only available in the old style return self._repodata_python(file_name) elif not os.path.exists(os.path.join(CFG.REPOMD_CACHE_MOUNT_POINT, file_path)): log_debug(2, "Unknown repomd file requested: %s" % file_name) raise rhnFault(6) rhnFlags.set('Content-Type', content_type) try: rhnFlags.set('Download-Accelerator-Path', file_path) return self._getFile(CFG.REPOMD_CACHE_MOUNT_POINT + "/" + file_path) except IOError: e = sys.exc_info()[1] # For file not found, queue up a regen, and return 404 if e.errno == 2: if file_name not in ["repomd.xml.key", "repomd.xml.asc"]: taskomatic.add_to_repodata_queue(self.channelName, "repodata request", file_name, bypass_filters=True) rhnSQL.commit() # This returns 404 to the client raise_with_tb(rhnFault(6), sys.exc_info()[2]) raise
def __init__(self, dbh, sql=None, force=None, blob_map=None): try: sql_base.Cursor.__init__(self, dbh=dbh, sql=sql, force=force) self._type_mapping = ORACLE_TYPE_MAPPING self.blob_map = blob_map except sql_base.SQLSchemaError: e = sys.exc_info()[1] (errno, errmsg) = e.errno, e.errmsg if 900 <= errno <= 999: # Per Oracle's documentation, SQL parsing error raise_with_tb( sql_base.SQLStatementPrepareError(self.dbh, errmsg, self.sql), sys.exc_info()[2]) # XXX: we should be handling the lost connection cases # in here too, but we don't get that many of these and # besides, this is much harder to get right # XXX: Normally we expect the e.args to include a dump of # the SQL code we just passed in since we're dealing with # an OracleError. I hope this is always the case, of not, # we'll have to log the sql code here raise_with_tb(rhnException("Can not prepare statement", e.args), sys.exc_info()[2])
def __getV2(self, action, dry_run=0): """ Fetches queued actions for the clients version 2+. """ log_debug(3, self.server_id) # Get the root dir of this install try: method = getMethod.getMethod(action['method'], 'server.action') except getMethod.GetMethodException: Traceback("queue.get V2") raise_with_tb( EmptyAction("Could not get a valid method for %s" % (action['method'], )), sys.exc_info()[2]) # Call the method result = method(self.server_id, action['id'], dry_run) if result is None: # None are mapped to the empty list result = () elif not isinstance(result, TupleType): # Everything other than a tuple is wrapped in a tuple result = (result, ) xmlblob = xmlrpclib.dumps(result, methodname=action['method']) log_debug(5, "returning xmlblob for action", xmlblob) return { 'id': action['id'], 'action': xmlblob, 'version': action['version'], }
def start_domain(uuid): """ Boots the domain for the first time after installation is complete. """ # Load the configuration file for this UUID. domain = DomainDirectory() config = domain.load_config(uuid) # Connect to the hypervisor. connection = libvirt.open(None) # We will attempt to determine if the domain is configured to use a # bootloader. If not, we'll have to explicitly use the kernel and initrd # data provided in the config to start the domain. try: config.getConfigItem(DomainConfig.BOOTLOADER) except DomainConfigError: dce = sys.exc_info()[1] # No bootloader tag present. Use pygrub to extract the kernel from # the disk image if its Xen. For fully virt we dont have pygrub, it # directly emulates the BIOS loading the first sector of the boot disk. if connection.getType() == 'Xen': # This uses pygrub which comes only with xen _prepare_guest_kernel_and_ramdisk(config) # Now, we'll restart the instance, this time using the re-create XML. try: domain = connection.createLinux(config.toXML(), 0) except Exception: e = sys.exc_info()[1] raise_with_tb( VirtualizationException( "Error occurred while attempting to recreate domain %s: %s" % (uuid, str(e))), sys.exc_info()[2])
def load(filename=None, file_obj=None, fd=None): """ Loads an MPM and returns its header and its payload """ if (filename is None and file_obj is None and fd is None): raise ValueError("No parameters passed") if filename is not None: f = open(filename) elif file_obj is not None: f = file_obj else: # fd is not None f = os.fdopen(os.dup(fd), "r") f.seek(0, 0) p = MPM_Package() try: p.load(f) except InvalidPackageError: e = sys.exc_info()[1] try: return load_rpm(f) except InvalidPackageError: raise_with_tb(e, sys.exc_info()[2]) except: raise_with_tb(e, sys.exc_info()[2]) return p.header, p.payload_stream
def parse_byteranges(byterange_header, file_size=None): log_debug(4, "Parsing byte range", byterange_header) regexp = re.compile(r"^bytes\s*=\s*(.*)$") mo = regexp.match(byterange_header) if not mo: raise InvalidByteRangeException arr = mo.groups()[0].split(",") regexp = re.compile(r"^([^-]*)-([^-]*)$") if len(arr) > 1: # We don't support very fancy byte ranges yet raise UnsatisfyableByteRangeException mo = regexp.match(arr[0]) if not mo: # Invalid byterange raise InvalidByteRangeException try: start, end = list(map(_str2int, mo.groups())) except ValueError: # Invalid raise_with_tb(InvalidByteRangeException, sys.exc_info()[2]) if start is not None: if start < 0: # Invalid raise InvalidByteRangeException if file_size is not None: if start >= file_size: raise UnsatisfyableByteRangeException if end is not None: if start > end: # Invalid raise InvalidByteRangeException end = end + 1 else: if file_size: end = file_size else: # No start specified if end is None: # Invalid raise InvalidByteRangeException if end <= 0: # Invalid raise InvalidByteRangeException if file_size: if end > file_size: raise UnsatisfyableByteRangeException start = file_size - end end = file_size else: start = -end end = None byteranges = (start, end) log_debug(4, "Request byterange", byteranges) return byteranges
def initDB(backend=None, host=None, port=None, username=None, password=None, database=None, sslmode=None, sslrootcert=None): """ Initialize the database. Either we get backend and all parameter which means the caller knows what they are doing, or we populate everything from the config files. """ if backend is None: if CFG is None or not CFG.is_initialized(): initCFG('server') backend = CFG.DB_BACKEND host = CFG.DB_HOST port = CFG.DB_PORT database = CFG.DB_NAME username = CFG.DB_USER password = CFG.DB_PASSWORD sslmode = None sslrootcert = None if CFG.DB_SSL_ENABLED: sslmode = 'verify-full' sslrootcert = CFG.DB_SSLROOTCERT if backend not in SUPPORTED_BACKENDS: raise rhnException("Unsupported database backend", backend) if port: port = int(port) # Hide the password add_to_seclist(password) try: __init__DB(backend, host, port, username, password, database, sslmode, sslrootcert) # except (rhnException, SQLError): # raise # pass on, we know those ones # except (KeyboardInterrupt, SystemExit): # raise except SQLConnectError: e = sys.exc_info()[1] try: closeDB() except NameError: pass raise_with_tb(e, sys.exc_info()[2]) except: raise #e_type, e_value = sys.exc_info()[:2] # raise rhnException("Could not initialize Oracle database connection", # str(e_type), str(e_value)) return 0
def read_header(self): self._stream_copy(self.input_stream, self.header_data) self.header_end = self.header_data.tell() try: self.header_data.seek(0, 0) self.header = deb_Header(self.header_data) except: raise_with_tb(InvalidPackageError, sys.exc_info()[2])
def get(self, name, modified=None): pickled = self.cache.get(name, modified) try: if sys.version_info[0] >= 3 and isinstance(pickled, str): pickled = pickled.encode('latin-1') return cPickle.loads(pickled) except cPickle.UnpicklingError: raise_with_tb(KeyError(name), sys.exc_info()[2])
def _get_item_id(prefix, name, errnum, errmsg): prefix_len = len(prefix) if name[:prefix_len] != prefix: raise rhnFault(errnum, errmsg % name) try: uuid = int(name[prefix_len:]) except ValueError: raise_with_tb(rhnFault(errnum, errmsg % name), sys.exc_info()[2]) return uuid
def _execute_wrapper(self, function, *p, **kw): params = ','.join([ "%s: %s" % (repr(key), repr(value)) for key, value in list(kw.items()) ]) log_debug( 5, "Executing SQL: \"%s\" with bind params: {%s}" % (self.sql, params)) if self.sql is None: raise rhnException("Cannot execute empty cursor") if self.blob_map: blob_content = {} for orig_blob_var in list(self.blob_map.keys()): new_blob_var = orig_blob_var + '_blob' blob_content[new_blob_var] = kw[orig_blob_var] kw[new_blob_var] = self.var(cx_Oracle.BLOB) del kw[orig_blob_var] modified_params = self._munge_args(kw) try: retval = function(*p, **kw) except self.OracleError: e = sys.exc_info()[1] ret = self._get_oracle_error_info(e) if isinstance(ret, usix.StringType): raise_with_tb(sql_base.SQLError(self.sql, p, kw, ret), sys.exc_info()[2]) (errno, errmsg) = ret[:2] if 900 <= errno <= 999: # Per Oracle's documentation, SQL parsing error raise_with_tb( sql_base.SQLStatementPrepareError(errno, errmsg, self.sql), sys.exc_info()[2]) if errno == 1475: # statement needs to be reparsed; force a prepare again if self.reparsed: # useless, tried that already. give up log_error("Reparsing cursor did not fix it", self.sql) args = ( "Reparsing tried and still got this", ) + tuple(ret) raise_with_tb(sql_base.SQLError(*args), sys.exc_info()[2]) self._real_cursor = self.dbh.prepare(self.sql) self.reparsed = 1 self._execute_wrapper(function, *p, **kw) elif 20000 <= errno <= 20999: # error codes we know we raise as schema errors raise_with_tb(sql_base.SQLSchemaError(*ret), sys.exc_info()[2]) raise_with_tb(sql_base.SQLError(*ret), sys.exc_info()[2]) except ValueError: # this is not good.Let the user know raise else: self.reparsed = 0 # reset the reparsed counter if self.blob_map: for blob_var, content in list(blob_content.items()): kw[blob_var].getvalue().write(content) # Munge back the values self._unmunge_args(kw, modified_params) return retval
def read_file(filename): """ reads a text config file and returns its lines in a list """ try: return open(filename, 'r').readlines() except (IOError, OSError): e = sys.exc_info()[1] raise_with_tb(ConfigParserError("Can not read config file", filename, e.args[1]), sys.exc_info()[2])
def __processSystemid(self): """ update the systemid/serverid but only if they stat differently. returns 0=no updates made; or 1=updates were made """ if not os.access(ProxyAuth.__systemid_filename, os.R_OK): log_error("unable to access %s" % ProxyAuth.__systemid_filename) raise rhnFault( 1000, _("SUSE Manager Proxy error (SUSE Manager Proxy systemid has wrong permissions?). " "Please contact your system administrator.")) mtime = None try: mtime = os.stat(ProxyAuth.__systemid_filename)[-2] except IOError as e: log_error("unable to stat %s: %s" % (ProxyAuth.__systemid_filename, repr(e))) raise_with_tb( rhnFault( 1000, _("SUSE Manager Proxy error (SUSE Manager Proxy systemid has wrong permissions?). " "Please contact your system administrator.")), sys.exc_info()[2]) if not self.__systemid_mtime: ProxyAuth.__systemid_mtime = mtime if self.__systemid_mtime == mtime \ and self.__systemid and self.__serverid: # nothing to do return 0 # get systemid try: ProxyAuth.__systemid = open(ProxyAuth.__systemid_filename, 'r').read() except IOError as e: log_error("unable to read %s" % ProxyAuth.__systemid_filename) raise_with_tb( rhnFault( 1000, _("SUSE Manager Proxy error (SUSE Manager Proxy systemid has wrong permissions?). " "Please contact your system administrator.")), sys.exc_info()[2]) # get serverid sysid, _cruft = xmlrpclib.loads(ProxyAuth.__systemid) ProxyAuth.__serverid = sysid[0]['system_id'][3:] log_debug( 7, 'SystemId: "%s[...snip snip...]%s"' % (ProxyAuth.__systemid[:20], ProxyAuth.__systemid[-20:])) log_debug(7, 'ServerId: %s' % ProxyAuth.__serverid) # ids were updated return 1
def _openSocketStream(self, method, params): """Wraps the gzipstream.GzipStream instantiation in a test block so we can open normally if stream is not gzipped.""" stream = None retryYN = 0 wait = 0.33 lastErrorMsg = '' cfg = config.initUp2dateConfig() for i in range(cfg['networkRetries']): server = self.getServer(retryYN) if server is None: log2(-1, 2, 'ERROR: server unable to initialize, attempt %s' % i, stream=sys.stderr) retryYN = 1 time.sleep(wait) continue func = getattr(server, method) try: stream = func(*params) if CFG.SYNC_TO_TEMP: import tempfile cached = tempfile.NamedTemporaryFile() stream.read_to_file(cached) cached.seek(0) return cached else: return stream except rpclib.xmlrpclib.ProtocolError: e = sys.exc_info()[1] p = tuple(['<the systemid>'] + list(params[1:])) lastErrorMsg = 'ERROR: server.%s%s: %s' % (method, p, e) log2(-1, 2, lastErrorMsg, stream=sys.stderr) retryYN = 1 time.sleep(wait) # do not reraise this exception! except (KeyboardInterrupt, SystemExit): raise except rpclib.xmlrpclib.Fault: e = sys.exc_info()[1] lastErrorMsg = e.faultString break except Exception: # pylint: disable=E0012, W0703 e = sys.exc_info()[1] p = tuple(['<the systemid>'] + list(params[1:])) lastErrorMsg = 'ERROR: server.%s%s: %s' % (method, p, e) log2(-1, 2, lastErrorMsg, stream=sys.stderr) break # do not reraise this exception! if lastErrorMsg: raise_with_tb(RhnSyncException(lastErrorMsg), sys.exc_info()[2]) # Returns a stream # Should never be reached return stream
def _function(self, name, ret_type): try: c = self.dbh.cursor() except cx_Oracle.DatabaseError: error = sys.exc_info()[1] e = error[0] raise_with_tb( sql_base.SQLSchemaError(e.code, e.message, e.context), sys.exc_info()[2]) return Function(name, c, ret_type)
def get_fd(self, name, user, group, mode): try: fd = _safe_create(self.fname, user, group, mode) except UnreadableFileError: raise_with_tb(OSError("cache entry exists, but is not accessible: %s" % \ name), sys.exc_info()[2]) # now we have the fd open, lock it fcntl.lockf(fd, fcntl.LOCK_EX) return os.fdopen(fd, 'wb')
def put_files(self, action_id, files, upload_contents=1): """Inserts a set of files into the repo, as a result of a scheduled action""" log_debug(4) missing_files = [] files_too_large = [] failed_due_to_quota = [] max_file_size = self.get_maximum_file_size() for file in files: try: params = self._make_file_info(file, local_path=None, load_contents=upload_contents) except cfg_exceptions.RepositoryLocalFileError: missing_files.append(file) continue if upload_contents and (params['size'] > max_file_size): files_too_large.append(file) continue try: self.rpc_call('config.client.upload_file', self.system_id, action_id, params) except xmlrpclib.Fault: e = sys.exc_info()[1] fault_code, fault_string = e.faultCode, e.faultString # deal with particular faults if fault_code == -4003: # File too large files_too_large.append(file) elif fault_code == -4014: # Ran out of org quota space failed_due_to_quota.append(file) else: raise_with_tb(cfg_exceptions.RepositoryFilePushError(fault_code, fault_string), sys.exc_info()[2]) except Exception: traceback.print_exc() raise result = {} # If there are files too large to be pushed, result will have a key # `file_too_large' if len(files_too_large) > 0: result['files_too_large'] = files_too_large if len(failed_due_to_quota) > 0: result['failed_due_to_quota'] = failed_due_to_quota if len(missing_files) > 0: result['missing_files'] = missing_files return result
def __init__(self, stream): self.packaging = 'deb' self.signatures = [] self.is_source = 0 self.deb = None try: self.deb = debfile.DebFile(stream.name) except Exception: e = sys.exc_info()[1] raise_with_tb(InvalidPackageError(e), sys.exc_info()[2]) try: # Fill info about package debcontrol = self.deb.debcontrol() self.hdr = { 'name': debcontrol.get_as_string('Package'), 'arch': debcontrol.get_as_string('Architecture') + '-deb', 'summary': debcontrol.get_as_string('Description').splitlines()[0], 'vendor': debcontrol.get_as_string('Maintainer'), 'package_group': debcontrol.get_as_string('Section'), 'epoch': '', 'version': 0, 'release': 0, 'description': debcontrol.get_as_string('Description'), } for hdr_k, deb_k in [('requires', 'Depends'), ('provides', 'Provides'), ('conflicts', 'Conflicts'), ('obsoletes', 'Replaces'), ('recommends', 'Recommends'), ('suggests', 'Suggests'), ('breaks', 'Breaks'), ('predepends', 'Pre-Depends'), ('payload_size', 'Installed-Size')]: if deb_k in debcontrol: self.hdr[hdr_k] = debcontrol.get_as_string(deb_k) for k in list(debcontrol.keys()): if k not in self.hdr: self.hdr[k] = debcontrol.get_as_string(k) version = debcontrol.get_as_string('Version') if version.find(':') != -1: self.hdr['epoch'], version = version.split(':') self.hdr['version'] = version if version.find('-') != -1: version_tmpArr = version.split('-') self.hdr['version'] = '-'.join(version_tmpArr[:-1]) self.hdr['release'] = version_tmpArr[-1] else: self.hdr['version'] = version self.hdr['release'] = 'X' except Exception: e = sys.exc_info()[1] raise_with_tb(InvalidPackageError(e), sys.exc_info()[2])
def __init__(self, fields, dict=None, mapping=None): GenericDevice.__init__(self) x = {} for k in fields: x[k] = None self.data = UserDictCase(x) if not dict: return # make sure we get a UserDictCase to work with if type(dict) == type({}): dict = UserDictCase(dict) if mapping is None or type(mapping) == type({}): mapping = UserDictCase(mapping) if not isinstance(dict, UserDictCase) or \ not isinstance(mapping, UserDictCase): log_error("Argument passed is not a dictionary", dict, mapping) raise TypeError("Argument passed is not a dictionary", dict, mapping) # make sure we have a platform for k in list(dict.keys()): if dict[k] == '': dict[k] = None if self.data.has_key(k): self.data[k] = dict[k] continue if mapping.has_key(k): # the mapping dict might tell us to lose some fields if mapping[k] is not None: self.data[mapping[k]] = dict[k] else: log_error("Unknown HW key =`%s'" % k, dict.dict(), mapping.dict()) # The try-except is added just so that we can send e-mails try: raise KeyError("Don't know how to parse key `%s''" % k, dict.dict()) except: Traceback(mail=1) # Ignore this key continue # clean up this data try: for k in list(self.data.keys()): if type(self.data[k]) == type("") and len(self.data[k]): self.data[k] = self.data[k].strip() if not len(self.data[k]): continue if self.data[k][0] == '"' and self.data[k][-1] == '"': self.data[k] = self.data[k][1:-1] except IndexError: raise_with_tb( IndexError("Can not process data = %s, key = %s" % (repr(self.data), k)), sys.exc_info()[2])
def procedure(self, name): try: c = self.dbh.cursor() except cx_Oracle.DatabaseError: error = sys.exc_info()[1] e = error[0] raise_with_tb( sql_base.SQLSchemaError(e.code, e.message, e.context), sys.exc_info()[2]) # Pass the cursor in so we can close it after execute() return self._procedure_class(name, c)
def rpc_call(self, method_name, *params): try: result = repository.RPC_Repository.rpc_call(self, method_name, *params) except xmlrpclib.Fault: e = sys.exc_info()[1] if e.faultCode == -9: # System not subscribed raise_with_tb(cfg_exceptions.AuthenticationError( "Invalid digital server certificate%s" % e.faultString), sys.exc_info()[2]) raise return result