def setMetadata(self, folder, metadata): """ Set metadata on a folder. A `ValidationException` is thrown in the cases where the metadata JSON object is badly formed, or if any of the metadata keys contains a period ('.'). :param folder: The folder to set the metadata on. :type folder: dict :param metadata: A dictionary containing key-value pairs to add to the folder's meta field :type metadata: dict :returns: the folder document """ if 'meta' not in folder: folder['meta'] = {} # Add new metadata to existing metadata folder['meta'].update(six.viewitems(metadata)) # Remove metadata fields that were set to null (use items in py3) folder['meta'] = {k: v for k, v in six.viewitems(folder['meta']) if v is not None} folder['updated'] = datetime.datetime.utcnow() # Validate and save the item return self.save(folder)
def test_approval(sample_submitted_application): a = sample_submitted_application assert a.supplier is None sample_submitted_application.set_approval(True) assert a.supplier is not None product_query = Product.query.filter(Product.supplier_code == a.supplier.code) products = product_query.all() assert a.supplier.products == products product_from_submitted = a.data['products']['0'] product_from_submitted['id'] = products[0].id product_from_submitted['supplier_code'] = a.supplier.code product_from_submitted['links'] = { 'self': '/products/{}'.format(products[0].id) } assert a.supplier.serializable['products'] == [product_from_submitted] assert 'recruitment' in a.supplier.serializable['seller_types'] assert a.supplier.is_recruiter r_info = a.supplier.domains[0].recruiter_info._fieldsdict r_info_from_application = a.data['recruiter_info']['Strategy and Policy'] assert six.viewitems(r_info) >= six.viewitems(r_info_from_application) assert six.viewitems(a.supplier.serializable['recruiter_info']) <= \ six.viewitems(a.data['recruiter_info'])
def validate_southbound_devices_usages(nets, ni): kernel_config = KernelConfig(ni) for requested_net, net_info in six.viewitems(nets): if 'remove' in net_info: kernel_config.removeNetwork(requested_net) for requested_net, net_info in six.viewitems(nets): if 'remove' in net_info: continue kernel_config.setNetwork(requested_net, net_info) underlying_devices = [] for net_name, net_attrs in six.viewitems(kernel_config.networks): vlan = net_attrs.get('vlan') if 'bonding' in net_attrs: underlying_devices.append((net_attrs['bonding'], vlan)) elif 'nic' in net_attrs: underlying_devices.append((net_attrs['nic'], vlan)) else: if not net_attrs['bridged']: raise ne.ConfigNetworkError( ne.ERR_BAD_PARAMS, 'southbound device not specified for non-bridged ' 'network "{}"'.format(net_name)) if len(set(underlying_devices)) < len(underlying_devices): raise ne.ConfigNetworkError( ne.ERR_BAD_PARAMS, 'multiple networks/similar vlans cannot be' ' defined on a single underlying device. ' 'kernel networks: {}\nrequested networks: {}'.format( kernel_config.networks, nets))
def testUploadFileToFolder(self): filepath = os.path.join(self.libTestDir, 'sub0', 'f') stream_filename = 'uploaded_from_stream' disk_filename = 'uploaded_from_disk' # upload filepath as a stream and as a local file, and assert the end result is the same with open(filepath, 'rb') as infile: infile.seek(0, os.SEEK_END) size = infile.tell() infile.seek(0) self.client.uploadStreamToFolder(str(self.publicFolder['_id']), infile, stream_filename, size, mimeType='text/plain') self.client.uploadFileToFolder(str(self.publicFolder['_id']), filepath, filename=disk_filename) stream_item = six.next(self.client.listItem(str(self.publicFolder['_id']), name=stream_filename)) disk_item = six.next(self.client.listItem(str(self.publicFolder['_id']), name=disk_filename)) # assert names and sizes are correct self.assertEqual(stream_filename, stream_item['name']) self.assertEqual(size, stream_item['size']) self.assertEqual(disk_filename, disk_item['name']) self.assertEqual(size, disk_item['size']) # assert every other field (besides unique ones) are identical unique_attrs = ('_id', 'name', 'created', 'updated') self.assertEqual({k: v for (k, v) in six.viewitems(stream_item) if k not in unique_attrs}, {k: v for (k, v) in six.viewitems(disk_item) if k not in unique_attrs})
def setMetadata(self, item, metadata): """ Set metadata on an item. A `ValidationException` is thrown in the cases where the metadata JSON object is badly formed, or if any of the metadata keys contains a period ('.'). :param item: The item to set the metadata on. :type item: dict :param metadata: A dictionary containing key-value pairs to add to the items meta field :type metadata: dict :returns: the item document """ if 'meta' not in item: item['meta'] = {} # Add new metadata to existing metadata item['meta'].update(six.viewitems(metadata)) # Remove metadata fields that were set to null (use items in py3) toDelete = [k for k, v in six.viewitems(item['meta']) if v is None] for key in toDelete: del item['meta'][key] item['updated'] = datetime.datetime.utcnow() # Validate and save the item return self.save(item)
def _filter_env(self, env): "return true if env match" if isinstance(env, dict): return viewitems(env) <= viewitems(self.environ) if isinstance(env, list): return bool([key for key in env if key in self.environ]) return str(env) in self.environ
def _set_networks_mtu(self, nets, sb_max_mtu_map): for net, netattrs in six.viewitems(nets): self._set_mtu(net, netattrs['mtu']) for sb, mtu in six.viewitems(sb_max_mtu_map): if mtu: self._set_mtu(sb, mtu)
def setMetadata(self, folder, metadata, allowNull=False): """ Set metadata on a folder. A `ValidationException` is thrown in the cases where the metadata JSON object is badly formed, or if any of the metadata keys contains a period ('.'). :param folder: The folder to set the metadata on. :type folder: dict :param metadata: A dictionary containing key-value pairs to add to the folder's meta field :type metadata: dict :param allowNull: Whether to allow `null` values to be set in the item's metadata. If set to `False` or omitted, a `null` value will cause that metadata field to be deleted. :returns: the folder document """ if 'meta' not in folder: folder['meta'] = {} # Add new metadata to existing metadata folder['meta'].update(six.viewitems(metadata)) # Remove metadata fields that were set to null (use items in py3) if not allowNull: toDelete = [k for k, v in six.viewitems(metadata) if v is None] for key in toDelete: del folder['meta'][key] folder['updated'] = datetime.datetime.utcnow() self.validateKeys(folder['meta']) # Validate and save the item return self.save(folder)
def _add_speed_device_info(net_caps): """Collect and include device speed information in the report.""" timeout = 2 for devname, devattr in six.viewitems(net_caps['nics']): timeout -= _wait_for_link_up(devname, timeout) devattr['speed'] = nic.speed(devname) for devname, devattr in six.viewitems(net_caps['bondings']): timeout -= _wait_for_link_up(devname, timeout) devattr['speed'] = bond.speed(devname)
def order_networks(networks): vlanned_nets = ((net, attr) for net, attr in six.viewitems(networks) if 'vlan'in attr) non_vlanned_nets = ((net, attr) for net, attr in six.viewitems(networks) if 'vlan'not in attr) for net, attr in vlanned_nets: yield net, attr for net, attr in non_vlanned_nets: yield net, attr
def wsgi_handler(env, start_response): if headers: # For Python 2.6 which does not have viewitems if six.PY2: env >= headers else: assert six.viewitems(env) >= six.viewitems(headers) assert body == env['wsgi.input'].read() start_response('200 OK', []) return []
def _update_running_config(networks, bonds): """ Recreate RunningConfig so that following setSafeNetworkConfig will persist a valid configuration. """ running_config = RunningConfig() for net, net_attr in six.viewitems(networks): running_config.setNetwork(net, net_attr) for bonding, bonding_attr in six.viewitems(bonds): running_config.setBonding(bonding, bonding_attr) running_config.save()
def test_creates_sentry_app(self): self.login_as(user=self.user) response = self._post() expected = { 'name': 'MyApp', 'scopes': ['project:read', 'project:write'], 'webhook_url': 'https://example.com', } assert response.status_code == 201, response.content assert six.viewitems(expected) <= six.viewitems(response.data)
def _normalize_net_ifcfg_keys(networks): """ Ignore keys in persisted networks that might originate from vdsm-reg. these might be a result of calling setupNetworks with ifcfg values that come from the original interface that is serving the management network. for 3.5, VDSM still supports passing arbitrary values directly to the ifcfg files, e.g. 'IPV6_AUTOCONF=no'. We filter them out here since they are not supported anymore. """ for netname, netattrs in six.viewitems(networks): networks[netname] = {k: v for k, v in six.viewitems(netattrs) if not _is_unsupported_ifcfg_key(k)}
def stringify(node, indent=0, tab=' '): data = (k for k, v in six.viewitems(node) if not isinstance(v, trie)) result = [] for k in data: result.append("{:s}{!r} -> {!r}".format(tab * indent, k, node[k])) branches = [k for k, v in six.viewitems(node) if isinstance(v, trie)] for k in branches: result.append("{:s}{!r}".format(tab * indent, k)) branch_data = stringify(node[k], indent+1, tab=tab) result.extend(branch_data) return result
def globals(Globals, **tagmap): '''Apply the tags in `Globals` back into the database.''' global apply cls, tagmap_output = apply.__class__, u", {:s}".format(u', '.join(u"{:s}={:s}".format(internal.utils.string.escape(oldtag), internal.utils.string.escape(newtag)) for oldtag, newtag in six.iteritems(tagmap))) if tagmap else '' count = 0 for ea, res in Globals: ns = func if func.within(ea) else db # grab the current (old) tag state state = ns.tag(ea) # transform the new tag state using the tagmap new = { tagmap.get(name, name) : value for name, value in six.viewitems(res) } # check if the tag mapping resulted in the deletion of a tag if len(new) != len(res): for name in six.viewkeys(res) - six.viewkeys(new): logging.warn(u"{:s}.globals(...{:s}) : Refusing requested tag mapping as it results in the tag \"{:s}\" overwriting the tag \"{:s}\" in the global {:#x}. The value {!s} would be replaced with {!s}.".format('.'.join((__name__, cls.__name__)), tagmap_output, internal.utils.string.escape(name, '"'), internal.utils.string.escape(tagmap[name], '"'), ea, internal.utils.string.repr(res[name]), internal.utils.string.repr(res[tagmap[name]]))) pass # check what's going to be overwritten with different values prior to doing it for name in six.viewkeys(state) & six.viewkeys(new): if state[name] == new[name]: continue logging.warn(u"{:s}.globals(...{:s}) : Overwriting tag \"{:s}\" for global at {:#x} with new value {!s}. Old value was {!s}.".format('.'.join((__name__, cls.__name__)), tagmap_output, internal.utils.string.escape(name, '"'), ea, internal.utils.string.repr(new[name]), internal.utils.string.repr(state[name]))) # now we can apply the tags to the global address try: [ ns.tag(ea, name, value) for name, value in six.iteritems(new) if state.get(name, dummy) != value ] except: logging.warn(u"{:s}.globals(...{:s}) : Unable to apply tags ({!s}) to global {:#x}.".format('.'.join((__name__, cls.__name__)), tagmap_output, internal.utils.string.repr(new), ea), exc_info=True) # increase our counter count += 1 return count
def indexer_name_to_id(indexer_name): """Reverse translate the indexer identifier to it's id. :param indexer_name: Identifier of the indexer. Example: will return 1 for 'tvdb'. :return: The indexer id. """ return {v['identifier']: k for k, v in viewitems(indexerConfig)}.get(indexer_name)
def json(json_file): """ Additional entry point to Sequence which parses the input of a json file handler or file from the given path. Json files are parsed in the following ways depending on if the root is a dictionary or array. 1) If the json's root is a dictionary, these are parsed into a sequence of (Key, Value) pairs 2) If the json's root is an array, these are parsed into a sequence of entries >>> seq.json('examples/users.json').first() [u'sarah', {u'date_created': u'08/08', u'news_email': True, u'email': u'*****@*****.**'}] :param json_file: path or file containing json content :return: Sequence wrapping jsonl file """ if isinstance(json_file, str): input_file = builtins.open(json_file, mode='r') json_input = jsonapi.load(input_file) input_file.close() elif hasattr(json_file, 'read'): json_input = jsonapi.load(json_file) else: raise ValueError('json_file must be a file path or implement the iterator interface') if isinstance(json_input, list): return seq(json_input) else: return seq(six.viewitems(json_input))
def fill_from_dict (self, bybl): self.nsamps = len (bybl) seenants = set () for a1, a2 in six.viewkeys (bybl): seenants.add (a1) seenants.add (a2) self.ants = np.array (sorted (seenants)) self.nants = self.ants.size self.ant_to_antidx = dict ((num, idx) for idx, num in enumerate (self.ants)) self.ncontrib = np.empty ((self.nsamps,), dtype=np.int) self.vis = np.empty ((self.nsamps,), dtype=np.complex) self.blidxs = np.empty ((self.nsamps, 2), dtype=np.int) self.nperant = np.zeros ((self.nants,), dtype=np.int) for i, (bl, (data, flags)) in enumerate (six.viewitems (bybl)): ok = ~flags self.ncontrib[i] = ok.sum () self.vis[i] = data[ok].mean () i1 = self.ant_to_antidx[bl[0]] i2 = self.ant_to_antidx[bl[1]] self.blidxs[i] = i1, i2 self.nperant[i1] += 1 self.nperant[i2] += 1
def _remove_zero_values_in_net_qos(net_qos): """ net_qos = {'out': { 'ul': {'m1': 0, 'd': 0, 'm2': 8000000}, 'ls': {'m1': 4000000, 'd': 100000, 'm2': 3000000}}} stripped_qos = {'out': { 'ul': {'m2': 8000000}, 'ls': {'m1': 4000000, 'd': 100000, 'm2': 3000000}}}""" stripped_qos = {} for part, part_config in six.viewitems(net_qos): stripped_qos[part] = dict(part_config) # copy for curve, curve_config in six.viewitems(part_config): stripped_qos[part][curve] = dict((k, v) for k, v in six.viewitems(curve_config) if v != 0) return stripped_qos
def listProviders(self, redirect, list): enabledNames = self.model('setting').get(constants.PluginSettings.PROVIDERS_ENABLED) enabledProviders = [ provider for providerName, provider in six.viewitems(providers.idMap) if providerName in enabledNames ] if enabledProviders: state = self._createStateToken(redirect) else: state = None if list: return [ { 'id': provider.getProviderName(external=False), 'name': provider.getProviderName(external=True), 'url': provider.getUrl(state) } for provider in enabledProviders ] else: return { provider.getProviderName(external=True): provider.getUrl(state) for provider in enabledProviders }
def _computeHash(file, progress=noProgress): """ Computes all supported checksums on a given file. Downloads the file data and stream-computes all required hashes on it, saving the results in the file document. In the case of assetstore impls that already compute the sha512, and when sha512 is the only supported algorithm, we will not download the file to the server. """ toCompute = SUPPORTED_ALGORITHMS - set(file) toCompute = {alg: getattr(hashlib, alg)() for alg in toCompute} if not toCompute: return fileModel = FileModel() with fileModel.open(file) as fh: while True: chunk = fh.read(_CHUNK_LEN) if not chunk: break for digest in six.viewvalues(toCompute): digest.update(chunk) progress.update(increment=len(chunk)) digests = {alg: digest.hexdigest() for alg, digest in six.viewitems(toCompute)} fileModel.update({'_id': file['_id']}, update={ '$set': digests }, multi=False) return digests
def listProviders(self, params): self.requireParams(('redirect',), params) redirect = params['redirect'] returnList = self.boolParam('list', params, default=False) enabledNames = self.model('setting').get( constants.PluginSettings.PROVIDERS_ENABLED) enabledProviders = [ provider for providerName, provider in six.viewitems(providers.idMap) if providerName in enabledNames ] if enabledProviders: state = self._createStateToken(redirect) else: state = None if returnList: return [ { 'id': provider.getProviderName(external=False), 'name': provider.getProviderName(external=True), 'url': provider.getUrl(state) } for provider in enabledProviders ] else: return { provider.getProviderName(external=True): provider.getUrl(state) for provider in enabledProviders }
def _setup_input_pipes(input_pipes, stdin): """ Given a mapping of input pipes, return a tuple with 2 elements. The first is a list of file descriptors to pass to ``select`` as writeable descriptors. The second is a dictionary mapping paths to existing named pipes to their adapters. """ wds = [] fifos = {} for pipe, adapter in six.viewitems(input_pipes): if isinstance(pipe, int): # This is assumed to be an open system-level file descriptor wds.append(pipe) elif pipe == '_stdin': # Special case for binding to standard input input_pipes[stdin] = input_pipes['_stdin'] wds.append(stdin) else: if not os.path.exists(pipe): raise Exception('Input pipe does not exist: %s' % pipe) if not stat.S_ISFIFO(os.stat(pipe).st_mode): raise Exception('Input pipe must be a fifo object: %s' % pipe) fifos[pipe] = adapter return wds, fifos
def json(self, json_file): """ Reads and parses the input of a json file handler or file. Json files are parsed differently depending on if the root is a dictionary or an array. 1) If the json's root is a dictionary, these are parsed into a sequence of (Key, Value) pairs 2) If the json's root is an array, these are parsed into a sequence of entries >>> seq.json('examples/users.json').first() [u'sarah', {u'date_created': u'08/08', u'news_email': True, u'email': u'*****@*****.**'}] :param json_file: path or file containing json content :return: Sequence wrapping jsonl file """ if isinstance(json_file, str): file_open = get_read_function(json_file, self.disable_compression) input_file = file_open(json_file) json_input = jsonapi.load(input_file) elif hasattr(json_file, 'read'): json_input = jsonapi.load(json_file) else: raise ValueError('json_file must be a file path or implement the iterator interface') if isinstance(json_input, list): return self(json_input) else: return self(six.viewitems(json_input))
def join_impl(other, join_type, sequence): """ Implementation for join_t :param other: other sequence to join with :param join_type: join type (inner, outer, left, right) :param sequence: first sequence to join with :return: joined sequence """ if join_type == "inner": return inner_join_impl(other, sequence) seq_dict = {} for element in sequence: seq_dict[element[0]] = element[1] seq_kv = seq_dict other_kv = dict(other) if join_type == "left": keys = seq_kv.keys() elif join_type == "right": keys = other_kv.keys() elif join_type == "outer": keys = set(list(seq_kv.keys()) + list(other_kv.keys())) else: raise TypeError("Wrong type of join specified") result = {} for k in keys: result[k] = (seq_kv.get(k), other_kv.get(k)) return six.viewitems(result)
def wrapped(*args, **kwargs): model = self.model(self.modelName, self.plugin) for raw, converted in six.viewitems(self.map): id = self._getIdValue(kwargs, raw) if self.force: kwargs[converted] = model.load( id, force=True, **self.kwargs) elif self.level is not None: kwargs[converted] = model.load( id=id, level=self.level, user=getCurrentUser(), **self.kwargs) else: kwargs[converted] = model.load(id, **self.kwargs) if kwargs[converted] is None and self.exc: raise RestException( 'Invalid %s id (%s).' % (model.name, str(id))) if self.requiredFlags: model.requireAccessFlags( kwargs[converted], user=getCurrentUser(), flags=self.requiredFlags) return fun(*args, **kwargs)
def search(self, term=None, key=None): """Search episode data for term, if it matches, return the Episode (self). The key parameter can be used to limit the search to a specific element, for example, episodename. This primarily for use use by Show.search and Season.search. See Show.search for further information on search Simple example: >>> e = Episode() >>> e['episodename'] = "An Example" >>> e.search("examp") <Episode 00x00 - An Example> >>> Limiting by key: >>> e.search("examp", key = "episodename") <Episode 00x00 - An Example> >>> """ if term is None: raise TypeError('must supply string to search for (contents)') term = text_type(term).lower() for cur_key, cur_value in viewitems(self): cur_key, cur_value = text_type(cur_key).lower(), text_type(cur_value).lower() if key is not None and cur_key != key: # Do not search this key continue if cur_value.find(text_type(term).lower()) > -1: return self
def requireParams(self, required, provided=None): """ This method has two modes. In the first mode, this takes two parameters, the first being a required parameter or list of them, and the second the dictionary of parameters that were passed. If the required parameter does not appear in the passed parameters, a ValidationException is raised. The second mode of operation takes only a single parameter, which is a dict mapping required parameter names to passed in values for those params. If the value is ``None``, a ValidationException is raised. This mode works well in conjunction with the ``autoDescribeRoute`` decorator, where the parameters are not all contained in a single dictionary. :param required: An iterable of required params, or if just one is required, you can simply pass it as a string. :type required: `list, tuple, or str` :param provided: The list of provided parameters. :type provided: dict """ if provided is None and isinstance(required, dict): for name, val in six.viewitems(required): if val is None: raise RestException('Parameter "%s" is required.' % name) else: if isinstance(required, six.string_types): required = (required,) for param in required: if provided is None or param not in provided: raise RestException('Parameter "%s" is required.' % param)
def get_id_by_external(self, **kwargs): """Search tmdb for a show, using an external id. Accepts as kwargs, so you'l need to add the externals as key/values. :param tvrage_id: The tvrage id. :param tvdb_id: The tvdb id. :param imdb_id: An imdb id (inc. tt). :returns: A dict with externals, including the tvmaze id. """ try: wanted_externals = ['tvdb_id', 'imdb_id', 'tvrage_id'] for external_id in wanted_externals: if kwargs.get(external_id): result = self.tmdb.Find(kwargs.get(external_id)).info(**{'external_source': external_id}) if result.get('tv_results') and result['tv_results'][0]: # Get the external id's for the passed shows id. externals = self.tmdb.TV(result['tv_results'][0]['id']).external_ids() externals = {tmdb_external_id: external_value for tmdb_external_id, external_value in viewitems(externals) if external_value and tmdb_external_id in wanted_externals} externals['tmdb_id'] = result['tv_results'][0]['id'] return externals return {} except RequestException as error: raise IndexerException("Could not get external id's. Cause: {cause}".format(cause=error))
def _permanent_hwaddr_info(devs_report): paddr = bonding.permanent_address() nics_info = devs_report.get('nics', {}) for nic, nicinfo in six.viewitems(nics_info): if nic in paddr: nicinfo['permhwaddr'] = paddr[nic]
def _varkwargs(**kwargs): """Encode var keyword arguments to utf-8.""" return {k: _handle_input(arg) for k, arg in viewitems(kwargs)}
def _update_caps_info(nets_info, flat_devs_info, extra_info): for net_info in six.viewvalues(nets_info): net_info.update(extra_info[net_info['iface']]) for devname, devinfo in six.viewitems(flat_devs_info): devinfo.update(extra_info[devname])
def _setup_dynamic_src_routing(networks): for net_name, net_attrs in six.viewitems(networks): is_remove = net_attrs.get('remove', False) is_dynamic = net_attrs.get('bootproto') == 'dhcp' if is_dynamic and not is_remove: ifacetracking.add(_get_network_iface(net_name, net_attrs))
def filtered_kwargs(kwargs): """Filter kwargs to only contain arguments accepted by `requests.Session.send`.""" return { k: v for k, v in viewitems(kwargs) if k in ('stream', 'timeout', 'verify', 'cert', 'proxies', 'allow_redirects') }
SHOW_SLUG_PATTERN = re.compile(r'([a-z]+)([0-9]+)') # For example: {1: 'tvdb_id', 3: 'tvmaze_id', 4: 'tmdb_id'} mappings = { indexer: indexerConfig[indexer]['mapped_to'] for indexer in indexerConfig } mappings.update(EXTERNAL_MAPPINGS) # For example: {'tvdb_id': 1, 'tvmaze_id': 3, 'tmdb_id': 4} reverse_mappings = { indexerConfig[indexer]['mapped_to']: indexer for indexer in indexerConfig } reverse_mappings.update({v: k for k, v in viewitems(EXTERNAL_MAPPINGS)}) # For example: {'tvdb': 1, 'tvmaze': 3, 'tmdb': 4} indexer_name_mapping = { indexerConfig[indexer]['identifier']: indexer for indexer in indexerConfig } def indexer_name_to_id(indexer_name): """Reverse translate the indexer identifier to it's id. :param indexer_name: Identifier of the indexer. Example: will return 1 for 'tvdb'. :return: The indexer id. """ return {v['identifier']: k
def updateJob(self, job, log=None, overwrite=False, status=None, progressTotal=None, progressCurrent=None, notify=True, progressMessage=None, otherFields=None): """ Update an existing job. Any of the updateable fields that are set to None in the kwargs of this method will not be modified. If you set progress information on the job for the first time and set notify=True, a new notification record for the job progress will be created. If notify=True, job status changes will also create a notification with type="job_status", and log changes will create a notification with type="job_log". :param job: The job document to update. :param log: Message to append to the job log. If you wish to overwrite instead of append, pass overwrite=True. :type log: str :param overwrite: Whether to overwrite the log (default is append). :type overwrite: bool :param status: New status for the job. :type status: JobStatus :param progressTotal: Max progress value for this job. :param otherFields: Any additional fields to set on the job. :type otherFields: dict """ event = events.trigger('jobs.job.update', { 'job': job, 'params': { 'log': log, 'overwrite': overwrite, 'status': status, 'progressTotal': progressTotal, 'progressMessage': progressMessage, 'otherFields': otherFields } }) if event.defaultPrevented: return job now = datetime.datetime.utcnow() user = None otherFields = otherFields or {} if job['userId']: user = User().load(job['userId'], force=True) query = { '_id': job['_id'] } updates = { '$push': {}, '$set': {} } statusChanged = False if log is not None: self._updateLog(job, log, overwrite, now, notify, user, updates) if status is not None: try: status = int(status) except ValueError: # Allow non int states pass statusChanged = status != job['status'] self._updateStatus(job, status, now, query, updates) if progressMessage is not None or progressCurrent is not None or progressTotal is not None: self._updateProgress( job, progressTotal, progressCurrent, progressMessage, notify, user, updates) for k, v in six.viewitems(otherFields): job[k] = v updates['$set'][k] = v if updates['$set'] or updates['$push']: if not updates['$push']: del updates['$push'] job['updated'] = now updates['$set']['updated'] = now updateResult = self.update(query, update=updates, multi=False) # If our query didn't match anything then our state transition # was not valid. So raise an exception if updateResult.matched_count != 1: job = self.load(job['_id'], force=True) msg = 'Invalid state transition to \'%s\', Current state is \'%s\'.' % ( status, job['status']) raise ValidationException(msg, field='status') events.trigger('jobs.job.update.after', { 'job': job }) # We don't want todo this until we know the update was successful if statusChanged and user is not None and notify: self._createUpdateStatusNotification(now, user, job) return job
def read_torrent_status(torrent_data): """Read torrent status from Deluge and Deluged client.""" found_torrents = False info_hash_to_remove = [] for torrent in viewitems(torrent_data): info_hash = str(torrent[0]) details = torrent[1] if not is_info_hash_in_history(info_hash): continue found_torrents = True to_remove = False for i in details['files']: # Need to check only the media file or the .rar file to avoid checking all .r0* files in history if not (is_media_file(i['path']) or get_extension(i['path']) == 'rar'): continue # Check if media was processed # OR check hash in case of RARed torrents if is_already_processed_media( i['path']) or is_info_hash_processed(info_hash): to_remove = True # Don't need to check status if we are not going to remove it. if not to_remove: log.info('Torrent not yet post-processed. Skipping: {torrent}', {'torrent': details['name']}) continue status = 'busy' if details['is_finished']: status = 'completed' elif details['is_seed']: status = 'seeding' elif details['paused']: status = 'paused' else: status = details['state'] if status == 'completed': log.info( 'Torrent completed and reached minimum' ' ratio: [{ratio:.3f}/{ratio_limit:.3f}] or' ' seed idle limit' ' Removing it: [{name}]', ratio=details['ratio'], ratio_limit=details['stop_ratio'], name=details['name']) info_hash_to_remove.append(info_hash) elif status == 'seeding': if float(details['ratio']) < float(details['stop_ratio']): log.info( 'Torrent did not reach minimum' ' ratio: [{ratio:.3f}/{ratio_limit:.3f}].' ' Keeping it: [{name}]', ratio=details['ratio'], ratio_limit=details['stop_ratio'], name=details['name']) else: log.info( 'Torrent completed and reached minimum ratio but it' ' was force started again. Current' ' ratio: [{ratio:.3f}/{ratio_limit:.3f}].' ' Keeping it: [{name}]', ratio=details['ratio'], ratio_limit=details['stop_ratio'], name=details['name']) else: log.info('Torrent is {status}. Keeping it: [{name}]', status=status, name=details['name']) if not found_torrents: log.info('No torrents found that were snatched by Medusa') return info_hash_to_remove
def di(row): """Returns a dict_items object for easier comparison""" return six.viewitems(row)
def bridge_opts_dict_to_sorted_str(opts_dict): opts_pairs = [ '{}={}'.format(key, val) for key, val in six.viewitems(opts_dict) ] opts_pairs.sort() return ' '.join(opts_pairs)
def _swap_dict_keyval(dictionary): return {val: key for key, val in six.viewitems(dictionary)}
def mirror(from_instance_id, to_instance_id, dry_run): """Create snapshots of all volumes of an instance except the root volume.""" ec2 = boto3.resource('ec2') ec2Client = boto3.client('ec2') fromInstance = ec2.Instance(id=from_instance_id) toInstance = ec2.Instance(id=to_instance_id) logging.info('Mirroring non-root volumes from %s to %s', fromInstance, toInstance) fromVolumesByMapping = { mapping['DeviceName']: ec2.Volume(mapping['Ebs']['VolumeId']) for mapping in fromInstance.block_device_mappings } oldToVolumesByMapping = { mapping['DeviceName']: ec2.Volume(mapping['Ebs']['VolumeId']) for mapping in toInstance.block_device_mappings } # Exclude the root volume del fromVolumesByMapping[fromInstance.root_device_name] logging.info('Stopping destination instance %s', toInstance) toInstance.stop() toInstance.wait_until_stopped() logging.info('Stopped destination instance %s', toInstance) for mapping, fromVolume in six.viewitems(fromVolumesByMapping): oldToVolume = oldToVolumesByMapping[mapping] logging.info('Mirroring volume %s, overwriting %s', fromVolume, oldToVolume) fromVolumeName = _getNameTagValue(fromVolume) now = datetime.datetime.utcnow() snapshotDescription = '%s:%s' % ( fromVolumeName, now.replace(microsecond=0, tzinfo=None).isoformat()) if dry_run: continue logging.info('Creating snapshot (named %s)', snapshotDescription) snapshot = fromVolume.create_snapshot( Description=snapshotDescription, ) logging.info('Started snapshot %s (%s)', snapshot.id, snapshotDescription) logging.info('Started snapshot %s', snapshot) snapshot.create_tags( Tags=[ { 'Key': 'Name', 'Value': snapshotDescription } ] ) snapshot.wait_until_completed() logging.info('Snapshot completed %s', snapshot) toVolumeName = _getNameTagValue(oldToVolume) logging.info('Creating new volume (named %s)', toVolumeName) newToVolume = ec2.create_volume( AvailabilityZone=oldToVolume.availability_zone, SnapshotId=snapshot.id, VolumeType='gp2', TagSpecifications=[ { 'ResourceType': 'volume', 'Tags': [ { 'Key': 'Name', 'Value': toVolumeName } ] } ] ) ec2Client.get_waiter('volume_available').wait( VolumeIds=[newToVolume.id] ) logging.info('Created new volume %s', newToVolume) logging.info('Deleting snapshot %s', snapshot) snapshot.delete() logging.info('Detaching old volume %s', oldToVolume) oldToVolume.detach_from_instance() ec2Client.get_waiter('volume_available').wait( VolumeIds=[oldToVolume.id] ) logging.info('Detached old volume %s', oldToVolume) logging.info('Attaching new volume %s to instance %s at %r', newToVolume, toInstance, mapping) newToVolume.attach_to_instance( Device=mapping, InstanceId=toInstance.id ) ec2Client.get_waiter('volume_in_use').wait( VolumeIds=[newToVolume.id] ) logging.info('Attached new volume %s', oldToVolume) logging.info('Deleting old volume %s', oldToVolume) oldToVolume.delete() logging.info('Starting destination instance %s', toInstance) toInstance.start() toInstance.wait_until_running() logging.info('Started destination instance %s', toInstance) logging.info('Done')
def _parse_episodes(self, tvdb_id, episode_data): """Parse retreived episodes.""" if 'episode' not in episode_data: return False episodes = episode_data['episode'] if not isinstance(episodes, list): episodes = [episodes] for cur_ep in episodes: flag_dvd_numbering = False dvd_seas_no = dvd_ep_no = None seas_no, ep_no = cur_ep.get('seasonnumber'), cur_ep.get( 'episodenumber') if self.config['dvdorder']: dvd_seas_no, dvd_ep_no = cur_ep.get('dvd_season'), cur_ep.get( 'dvd_episodenumber') log.debug( 'Using DVD ordering for dvd season: {0} and dvd episode: {1}, ' 'with regular season {2} and episode {3}', dvd_seas_no, dvd_ep_no, seas_no, ep_no) flag_dvd_numbering = dvd_seas_no is not None and dvd_ep_no is not None # We didn't get a season number but did get a dvd order episode number. Mark it as special. if dvd_ep_no is not None and dvd_seas_no is None: dvd_seas_no = 0 flag_dvd_numbering = True if self.config['dvdorder'] and not flag_dvd_numbering: log.warning( 'No DVD order available for episode (season: {0}, episode: {1}). Skipping this episode. ' 'If you want to have this episode visible, please change it on the TheTvdb site, ' 'or consider disabling DVD order for the show: {2}({3})', dvd_seas_no or seas_no, dvd_ep_no or ep_no, self.shows[tvdb_id]['seriesname'], tvdb_id) if not app.TVDB_DVD_ORDER_EP_IGNORE: dvd_seas_no = 0 # Add as special. # Use the epno (dvd order) and if not exist fall back to the regular episode number. dvd_ep_no = dvd_ep_no or ep_no flag_dvd_numbering = True else: # If TVDB_DVD_ORDER_EP_IGNORE is enabled, we wil not add any episode as a special, when there is not # a dvd ordered episode number. continue if flag_dvd_numbering: seas_no = dvd_seas_no ep_no = dvd_ep_no if seas_no is None or ep_no is None: log.warning( 'Invalid episode numbering (series: {0}({1}), season: {2!r}, episode: {3!r}) ' 'Contact TVDB forums to have it fixed', self.shows[tvdb_id]['seriesname'], tvdb_id, seas_no, ep_no) continue # Skip to next episode # float() is because https://github.com/dbr/tvnamer/issues/95 - should probably be fixed in TVDB data seas_no = int(float(seas_no)) ep_no = int(float(ep_no)) for k, v in viewitems(cur_ep): k = k.lower() if v and k == 'filename': v = urljoin(self.config['artwork_prefix'], v) self._set_item(tvdb_id, seas_no, ep_no, k, v)
def generate_coords_from_dict(coords_dict): # use a deque to allow fast insertion/removal at the # beginning and end of the sequence for chrom, coords_list in viewitems(coords_dict): starts, ends = map(deque, zip(*coords_list)) yield chrom, starts, ends
def _parse_images(self, sid): """Parse images XML. From http://thetvdb.com/api/[APIKEY]/series/[SERIES ID]/banners.xml images are retrieved using t['show name]['_banners'], for example: >>> indexer_api = Tvdb(images = True) >>> indexer_api['scrubs']['_banners'].keys() ['fanart', 'poster', 'series', 'season', 'seasonwide'] For a Poster >>> t['scrubs']['_banners']['poster']['680x1000']['35308']['_bannerpath'] u'http://thetvdb.com/banners/posters/76156-2.jpg' For a season poster or season banner (seasonwide) >>> t['scrubs']['_banners']['seasonwide'][4]['680x1000']['35308']['_bannerpath'] u'http://thetvdb.com/banners/posters/76156-4-2.jpg' >>> Any key starting with an underscore has been processed (not the raw data from the XML) This interface will be improved in future versions. """ key_mapping = { 'file_name': 'bannerpath', 'language_id': 'language', 'key_type': 'bannertype', 'resolution': 'bannertype2', 'ratings_info': { 'count': 'ratingcount', 'average': 'rating' }, 'thumbnail': 'thumbnailpath', 'sub_key': 'sub_key', 'id': 'id' } search_for_image_type = self.config['image_type'] log.debug('Getting show banners for {0}', sid) _images = {} # Let's get the different types of images available for this series try: series_images_count = self.config[ 'session'].series_api.series_id_images_get( sid, accept_language=self.config['language']) except (ApiException, RequestException) as error: log.info( 'Could not get image count for show id: {0} with reason: {1!r}', sid, error) return for image_type, image_count in viewitems( self._object_to_dict(series_images_count)): try: if search_for_image_type and search_for_image_type != image_type: # We want to use the 'poster' image also for the 'poster_thumb' type if image_type != 'poster' or image_type == 'poster' and search_for_image_type != 'poster_thumb': continue if not image_count: continue if image_type not in _images: _images[image_type] = {} images = self.config[ 'session'].series_api.series_id_images_query_get( sid, key_type=image_type, accept_language=self.config['language']) for image in images.data: # Store the images for each resolution available # Always provide a resolution or 'original'. resolution = image.resolution or 'original' if resolution not in _images[image_type]: _images[image_type][resolution] = {} # _images[image_type][resolution][image.id] = image_dict image_attributes = self._object_to_dict(image, key_mapping) bid = image_attributes.pop('id') if image_type in ['season', 'seasonwide']: if int(image.sub_key ) not in _images[image_type][resolution]: _images[image_type][resolution][int( image.sub_key)] = {} if bid not in _images[image_type][resolution][int( image.sub_key)]: _images[image_type][resolution][int( image.sub_key)][bid] = {} base_path = _images[image_type][resolution][int( image.sub_key)][bid] else: if bid not in _images[image_type][resolution]: _images[image_type][resolution][bid] = {} base_path = _images[image_type][resolution][bid] for k, v in viewitems(image_attributes): if k is None or v is None: continue if k.endswith('path'): k = '_{0}'.format(k) log.debug('Adding base url for image: {0}', v) v = self.config['artwork_prefix'].format(image=v) base_path[k] = v except (ApiException, RequestException) as error: log.warning( 'Could not parse Poster for show id: {0}, with exception: {1!r}', sid, error) return self._save_images(sid, _images) self._set_show_data(sid, '_banners', _images)
def get_flat_underscore_dict(flat_tuple_dict): return {'_'.join(k): v for k, v in six.viewitems(flat_tuple_dict)}
def _get_flat_devs_info(devices_info): return { devname: devinfo for sub_devs in six.viewvalues(devices_info) for devname, devinfo in six.viewitems(sub_devs) }
def getDbConnection(uri=None, replicaSet=None, quiet=False, **kwargs): """ Get a MongoClient object that is connected to the configured database. We lazy-instantiate a module-level singleton, the MongoClient objects manage their own connection pools internally. Any extra kwargs you pass to this method will be passed through to the MongoClient. :param uri: if specified, connect to this mongo db rather than the one in the config. :param replicaSet: if uri is specified, use this replica set. :param quiet: if true, don't logprint warnings and success. :type quiet: bool """ global _dbClients origKey = (uri, replicaSet) if origKey in _dbClients: return _dbClients[origKey] dbConf = getDbConfig() if uri is None or uri == '': uri = dbConf.get('uri') replicaSet = dbConf.get('replica_set') clientOptions = { # This is the maximum time between when we fetch data from a cursor. # If it times out, the cursor is lost and we can't reconnect. If it # isn't set, we have issues with replica sets when the primary goes # down. This value can be overridden in the mongodb uri connection # string with the socketTimeoutMS. 'socketTimeoutMS': 60000, 'connectTimeoutMS': 20000, 'serverSelectionTimeoutMS': 20000, 'readPreference': 'secondaryPreferred', 'replicaSet': replicaSet, 'w': 'majority' } # All other options in the [database] section will be passed directly as # options to the mongo client for opt, val in six.viewitems(dict(dbConf)): if opt not in {'uri', 'replica_set'}: clientOptions[opt] = val # Finally, kwargs take precedence clientOptions.update(kwargs) # if the connection URI overrides any option, honor it above our own # settings. uriParams = urllib.parse.parse_qs(urllib.parse.urlparse(uri).query) for key in uriParams: if key in clientOptions: del clientOptions[key] if uri is None: dbUriRedacted = 'mongodb://*****:*****@') if len(parts) == 2: dbUriRedacted = 'mongodb://' + parts[1] else: dbUriRedacted = uri client = pymongo.MongoClient(uri, **clientOptions) if not quiet: desc = '' if replicaSet: desc += ', replica set: %s' % replicaSet logprint.info('Connecting to MongoDB: %s%s' % (dbUriRedacted, desc)) # Make sure we can connect to the mongo server at startup client.server_info() _dbClients[origKey] = _dbClients[(uri, replicaSet)] = client return client
def _paginate(self, data=None, data_generator=None, sort=None, headers={}): arg_page = self._get_page() arg_limit = self._get_limit() headers.update({ 'X-Pagination-Page': arg_page, 'X-Pagination-Limit': arg_limit }) first_page = arg_page if arg_page > 0 else 1 previous_page = None if arg_page <= 1 else arg_page - 1 if data_generator: results = list(data_generator())[:arg_limit] next_page = None if len(results) < arg_limit else arg_page + 1 last_page = None else: arg_sort = self._get_sort(default=sort) start = (arg_page - 1) * arg_limit end = start + arg_limit results = data if arg_sort: # Compare to earliest datetime instead of None def safe_compare(field, results): if field == 'airDate' and results[field] is None: return text_type(datetime.min) return results[field] try: for field, reverse in reversed(arg_sort): results = sorted(results, key=partial(safe_compare, field), reverse=reverse) except KeyError: return self._bad_request('Invalid sort query parameter') count = len(results) headers['X-Pagination-Count'] = count results = results[start:end] next_page = None if end > count else arg_page + 1 last_page = ((count - 1) // arg_limit) + 1 headers['X-Pagination-Total'] = last_page if last_page <= arg_page: last_page = None # Reconstruct the query parameters query_params = [] for arg, values in viewitems(self.request.query_arguments): if arg in ('page', 'limit'): continue if not isinstance(values, list): values = [values] query_params += [(arg, value) for value in values] bare_uri = url_concat(self.request.path, query_params) links = [] for rel, page in (('next', next_page), ('last', last_page), ('first', first_page), ('previous', previous_page)): if page is None: continue uri = url_concat(bare_uri, dict(page=page, limit=arg_limit)) link = '<{uri}>; rel="{rel}"'.format(uri=uri, rel=rel) links.append(link) self.set_header('Link', ', '.join(links)) return self._ok(data=results, headers=headers)
def _get_episodes(self, tvmaze_id, specials=False, aired_season=None): # pylint: disable=unused-argument """ Get all the episodes for a show by tvmaze id. :param tvmaze_id: Series tvmaze id. :return: An ordered dict with the show searched for. In the format of OrderedDict{"episode": [list of episodes]} """ # Parse episode data log.debug('Getting all episodes of {0}', tvmaze_id) try: results = self.tvmaze_api.episode_list(tvmaze_id, specials=specials) except IDNotFound: log.debug('Episode search did not return any results.') return False except BaseError as e: raise IndexerException('Show episodes search failed in getting a result with error: {0!r}'.format(e)) episodes = self._map_results(results, self.series_map) if not episodes: return False if not isinstance(episodes, list): episodes = [episodes] absolute_number_counter = 1 for cur_ep in episodes: if self.config['dvdorder']: log.debug('Using DVD ordering.') use_dvd = cur_ep.get('dvd_season') is not None and cur_ep.get('dvd_episodenumber') is not None else: use_dvd = False if use_dvd: seasnum, epno = cur_ep.get('dvd_season'), cur_ep.get('dvd_episodenumber') else: seasnum, epno = cur_ep.get('seasonnumber'), cur_ep.get('episodenumber') if self.config['dvdorder']: log.warning('No DVD order for episode (season: {0}, episode: {1}). ' 'Falling back to non-DVD order. ' 'Please consider disabling DVD order for the show with TVmaze ID: {2}', seasnum, epno, tvmaze_id) if seasnum is None or epno in (None, 0): log.warning('An episode has incomplete season/episode number (season: {0!r}, episode: {1!r})', seasnum, epno) continue # Skip to next episode seas_no = int(seasnum) ep_no = int(epno) if seas_no > 0: cur_ep['absolute_number'] = absolute_number_counter absolute_number_counter += 1 for k, v in viewitems(cur_ep): k = k.lower() if v is not None: if k == 'image_medium': self._set_item(tvmaze_id, seas_no, ep_no, 'filename', v) self._set_item(tvmaze_id, seas_no, ep_no, k, v)
def get_flat_path_dict(flat_tuple_dict): return {os.path.join(*k): v for k, v in six.viewitems(flat_tuple_dict)}
def post(self, series_slug=None, path_param=None): """Add a new series.""" if series_slug is not None: return self._bad_request('Series slug should not be specified') data = json_decode(self.request.body) if not data or 'id' not in data: return self._bad_request('Invalid series data') ids = {k: v for k, v in viewitems(data['id']) if k != 'imdb'} if len(ids) != 1: return self._bad_request( 'Only 1 indexer identifier should be specified') identifier = SeriesIdentifier.from_slug('{slug}{id}'.format( slug=list(ids)[0], id=list(itervalues(ids))[0])) if not identifier: return self._bad_request('Invalid series identifier') if Series.find_by_identifier(identifier): return self._conflict('Series already exist added') data_options = data.get('options', {}) try: options = { 'default_status': data_options.get('status'), 'quality': data_options.get('quality', { 'preferred': [], 'allowed': [] }), 'season_folders': data_options.get('seasonFolders'), 'lang': data_options.get('language'), 'subtitles': data_options.get('subtitles'), 'anime': data_options.get('anime'), 'scene': data_options.get('scene'), 'paused': data_options.get('paused'), 'blacklist': data_options['release'].get('blacklist', []) if data_options.get('release') else None, 'whitelist': data_options['release'].get('whitelist', []) if data_options.get('release') else None, 'default_status_after': data_options.get('statusAfter'), 'root_dir': data_options.get('rootDir'), 'show_lists': data_options.get('showLists') } queue_item_obj = app.show_queue_scheduler.action.addShow( identifier.indexer.id, identifier.id, data_options.get('showDir'), **options) except SaveSeriesException as error: return self._not_found(error) return self._created(data=queue_item_obj.to_json)
def inv_flat_tuple_dict(flat_tuple_dict): return {v: k for k, v in six.viewitems(flat_tuple_dict)}
def do_split(graph, split_seeds, agglo_id=None, supervoxels=None): agglo_ids, supervoxel_map = _make_supervoxel_map(graph, split_seeds, need_agglo_ids=agglo_id is None) if agglo_id is None: agglo_id_counts = { agglo_id: sum(z[1]['count'] for z in seeds) for agglo_id, seeds in six.viewitems(agglo_ids) } agglo_id = max(agglo_ids, key=lambda x: agglo_id_counts[x]) if len(agglo_ids) > 1: logging.info('Warning: more than one agglomerated component. ' + 'Choosing component %d with maximum number of seed points.', agglo_id) logging.info('agglo_id_counts = %r', agglo_id_counts) input_edges = graph.get_agglo_edges(agglo_id) if supervoxels is not None: input_edges = [x for x in input_edges if x.segment_ids[0] in supervoxels and x.segment_ids[1] in supervoxels] graph = build_graph(input_edges) if debug_graph: graph.check_consistency() cur_eqs = neuroglancer.EquivalenceMap() logging.info('Agglomerating') threshold = float('inf') while True: entry = graph.get_next_edge() if entry is None: if verbose_merging: logging.info('Stopping because entry is None') break if entry[0] > threshold: if verbose_merging: logging.info('Stopping because edge score %r is > threshold %r', entry[0], threshold) break segment_ids = entry[1] seeds_a = supervoxel_map.get(segment_ids[0]) seeds_b = supervoxel_map.get(segment_ids[1]) if ((seeds_a is not None and len(seeds_a) > 1) or (seeds_b is not None and len(seeds_b) > 1) or (seeds_a is not None and seeds_b is not None and seeds_a != seeds_b)): if verbose_merging: logging.info('Excluding edge %r because of seeds: %r %r', segment_ids, seeds_a, seeds_b) graph.remove_edge_from_heap(segment_ids) continue if verbose_merging: logging.info('Merging %r with score %r', segment_ids, entry[0]) graph.merge(segment_ids) if debug_graph: graph.check_consistency() new_id = cur_eqs.union(*segment_ids) new_seeds = seeds_a or seeds_b if new_seeds: supervoxel_map[new_id] = new_seeds return dict(agglo_id=agglo_id, cur_eqs=cur_eqs, supervoxel_map=supervoxel_map)
def func_with_serialized_params(*args, **kwargs): _args = tuple([deserialize(arg) for arg in args]) _kwargs = {k: deserialize(v) for k, v in six.viewitems(kwargs)} return func(*_args, **_kwargs)
def updateProgress(self, record, save=True, **kwargs): """ Update an existing progress record. :param record: The existing progress record to update. :type record: dict :param total: Some numeric value representing the total task length. By convention, setting this <= 0 means progress on this task is indeterminate. Generally this shouldn't change except in cases where progress on a task switches between indeterminate and determinate state. :type total: int, long, or float :param state: Represents the state of the underlying task execution. :type state: ProgressState enum value. :param current: Some numeric value representing the current progress of the task (relative to total). :type current: int, long, or float :param increment: Amount to increment the progress by. Don't pass both current and increment together, as that behavior is undefined. :type increment: int, long, or float :param message: Message corresponding to the current state of the task. :type message: str :param expires: Set a custom (UTC) expiration time on the record. Default is one hour from the current time. :type expires: datetime :param save: Whether to save the record to the database. :type save: bool """ if 'increment' in kwargs: record['data']['current'] += kwargs['increment'] for field, value in six.viewitems(kwargs): if field in ('total', 'current', 'state', 'message'): record['data'][field] = value now = datetime.datetime.utcnow() if 'expires' in kwargs: expires = kwargs['expires'] else: expires = now + datetime.timedelta(hours=1) record['updated'] = now record['expires'] = expires record['updatedTime'] = time.time() if save: # Only update the time estimate if we are also saving if (record['updatedTime'] > record['startTime'] and record['data']['estimateTime']): if 'estimatedTotalTime' in record: del record['estimatedTotalTime'] try: total = float(record['data']['total']) current = float(record['data']['current']) if total >= current and total > 0 and current > 0: record['estimatedTotalTime'] = \ total * (record['updatedTime'] - record['startTime']) / current except ValueError: pass return self.save(record) else: return record
def patched_input(*args, **kwargs): return f(*[handle_arg(arg) for arg in args], **{k: handle_arg(arg) for k, arg in viewitems(kwargs)})
def _get_sriov_devices(): devices = hostdev.list_by_caps() return [ device_name for device_name, device_info in six.viewitems(devices) if 'totalvfs' in device_info['params'] ]
def _add_bridge_opts(net_caps): for bridgename, bridgeattr in six.viewitems(net_caps['bridges']): bridgeattr['opts'] = bridges.bridge_options(bridgename)
def _map_results(self, tvmaze_response, key_mappings=None, list_separator='|'): """ Map results to a a key_mapping dict. :param tvmaze_response: tvmaze response obect, or a list of response objects. :type tvmaze_response: list(object) :param key_mappings: Dict of tvmaze attributes, that are mapped to normalized keys. :type key_mappings: dictionary :param list_separator: A list separator used to transform lists to a character separator string. :type list_separator: string. """ parsed_response = [] if not isinstance(tvmaze_response, list): tvmaze_response = [tvmaze_response] # TVmaze does not number their special episodes. It does map it to a season. And that's something, medusa # Doesn't support. So for now, we increment based on the order, we process the specials. And map it to # season 0. We start with episode 1. index_special_episodes = 1 for item in tvmaze_response: return_dict = {} try: for key, value in viewitems(item.__dict__): if value is None or value == []: continue # These keys have more complex dictionaries, let's map these manually if key in ['schedule', 'network', 'image', 'externals', 'rating']: if key == 'schedule': return_dict['airs_time'] = value.get('time') or '0:00AM' return_dict['airs_dayofweek'] = value.get('days')[0] if value.get('days') else None if key == 'network': return_dict['network'] = value.name return_dict['code'] = value.code return_dict['timezone'] = value.timezone if key == 'image': if value.get('medium'): return_dict['image_medium'] = value.get('medium') return_dict['image_original'] = value.get('original') return_dict['poster'] = value.get('medium') if key == 'externals': return_dict['tvrage_id'] = value.get('tvrage') return_dict['tvdb_id'] = value.get('thetvdb') return_dict['imdb_id'] = value.get('imdb') if key == 'rating': return_dict['rating'] = value.get('average') \ if isinstance(value, dict) else value else: # Do some value sanitizing if isinstance(value, list): if all(isinstance(x, (string_types, integer_types)) for x in value): value = list_separator.join(text_type(v) for v in value) # Try to map the key if key in key_mappings: key = key_mappings[key] # Set value to key return_dict[key] = text_type(value) if isinstance(value, (float, integer_types)) else value # For episodes if hasattr(item, 'season_number') and getattr(item, 'episode_number') is None: return_dict['episodenumber'] = text_type(index_special_episodes) return_dict['seasonnumber'] = 0 index_special_episodes += 1 # If there is a web_channel available, let's use that in stead of the network field. network = getattr(item, 'web_channel', None) if network and getattr(network, 'name', None): return_dict['network'] = network.name except Exception as error: log.warning('Exception trying to parse attribute: {0}, with exception: {1!r}', key, error) parsed_response.append(return_dict) return parsed_response if len(parsed_response) != 1 else parsed_response[0]