def __call__(self): while True: try: header, payload, __ = recv(self.socket) except GreenletExit: break except zmq.ZMQError: exc = TaskClosed('Collector socket closed') for results in viewvalues(self.results): for result in viewvalues(results): result.set_exception(exc) break except: # TODO: warn MalformedMessage continue method, call_id, task_id = header method = ord(method) reply = Reply(method, call_id, task_id) if method & ACK: value = payload or None else: value = self.unpack(payload) self.trace and self.trace(method, (call_id, task_id, value)) del header, payload, method, call_id, task_id try: self.dispatch_reply(reply, value) except KeyError: # TODO: warning continue finally: del reply, value
def _dpdk_devs_current(dpdk_devices): devs_exist = all(_dev_exists(devinfo) for devinfo in six.viewvalues(dpdk_devices)) unlisted_devices = _unlisted_devices( [devinfo['pci_addr'] for devinfo in six.viewvalues(dpdk_devices)]) return devs_exist and not unlisted_devices
def setup_params(self, data): params = self.params.copy() valid_scale = ('area', 'count', 'width') if params['scale'] not in valid_scale: msg = "Parameter scale should be one of {}" raise PlotnineError(msg.format(valid_scale)) lookup = { 'biweight': 'biw', 'cosine': 'cos', 'cosine2': 'cos2', 'epanechnikov': 'epa', 'gaussian': 'gau', 'triangular': 'tri', 'triweight': 'triw', 'uniform': 'uni'} with suppress(KeyError): params['kernel'] = lookup[params['kernel'].lower()] if params['kernel'] not in six.viewvalues(lookup): msg = ("kernel should be one of {}. " "You may use the abbreviations {}") raise PlotnineError(msg.format(six.viewkeys(lookup), six.viewvalues())) missing_params = (six.viewkeys(stat_density.DEFAULT_PARAMS) - six.viewkeys(params)) for key in missing_params: params[key] = stat_density.DEFAULT_PARAMS[key] return params
def reinitializeAll(): """ Force all models to reconnect/rebuild indices (needed for testing). """ for pluginModels in list(six.viewvalues(_modelInstances)): for model in list(six.viewvalues(pluginModels)): model.reconnect()
def validate_column_specs(events, next_value_columns, previous_value_columns): """ Verify that the columns of ``events`` can be used by an EventsLoader to serve the BoundColumns described by ``next_value_columns`` and ``previous_value_columns``. """ required = { TS_FIELD_NAME, SID_FIELD_NAME, EVENT_DATE_FIELD_NAME, }.union( # We also expect any of the field names that our loadable columns # are mapped to. viewvalues(next_value_columns), viewvalues(previous_value_columns), ) received = set(events.columns) missing = required - received if missing: raise ValueError("EventsLoader missing required columns {missing}.\n" "Got Columns: {received}\n" "Expected Columns: {required}".format( missing=sorted(missing), received=sorted(received), required=sorted(required), ))
def validate_column_specs(events, next_value_columns, previous_value_columns): """ Verify that the columns of ``events`` can be used by an EventsLoader to serve the BoundColumns described by ``next_value_columns`` and ``previous_value_columns``. """ required = { TS_FIELD_NAME, SID_FIELD_NAME, EVENT_DATE_FIELD_NAME, }.union( # We also expect any of the field names that our loadable columns # are mapped to. viewvalues(next_value_columns), viewvalues(previous_value_columns), ) received = set(events.columns) missing = required - received if missing: raise ValueError( "EventsLoader missing required columns {missing}.\n" "Got Columns: {received}\n" "Expected Columns: {required}".format( missing=sorted(missing), received=sorted(received), required=sorted(required), ) )
def _normalize_qos_config(qos): for value in six.viewvalues(qos): for attrs in six.viewvalues(value): if attrs.get('m1') == 0: del attrs['m1'] if attrs.get('d') == 0: del attrs['d'] return qos
def required_event_fields(next_value_columns, previous_value_columns): """ Compute the set of resource columns required to serve ``next_value_columns`` and ``previous_value_columns``. """ # These metadata columns are used to align event indexers. return {TS_FIELD_NAME, SID_FIELD_NAME, EVENT_DATE_FIELD_NAME}.union( # We also expect any of the field names that our loadable columns # are mapped to. viewvalues(next_value_columns), viewvalues(previous_value_columns), )
def _merge_state(interfaces_state, routes_state, dns_state): interfaces = [ifstate for ifstate in six.viewvalues(interfaces_state)] state = { Interface.KEY: sorted(interfaces, key=lambda d: d[Interface.NAME]) } if routes_state: state.update(routes={Route.CONFIG: routes_state}) if dns_state: nameservers = itertools.chain.from_iterable( ns for ns in six.viewvalues(dns_state)) state[DNS.KEY] = {DNS.CONFIG: {DNS.SERVER: list(nameservers)}} return state
def mockGoogleToken(url, request): try: params = urllib.parse.parse_qs(request.body) self.assertEqual(params['client_id'], [providerInfo['client_id']['value']]) except (KeyError, AssertionError) as e: return { 'status_code': 401, 'content': json.dumps({'error': repr(e)}) } try: self.assertEqual(params['grant_type'], ['authorization_code']) self.assertEqual(params['client_secret'], [providerInfo['client_secret']['value']]) self.assertRegexpMatches(params['redirect_uri'][0], providerInfo['allowed_callback_re']) for account in six.viewvalues(providerInfo['accounts']): if account['auth_code'] == params['code'][0]: break else: self.fail() except (KeyError, AssertionError) as e: return { 'status_code': 400, 'content': json.dumps({'error': repr(e)}) } return json.dumps({ 'token_type': 'Bearer', 'access_token': account['access_token'], 'expires_in': 3546, 'id_token': 'google_id_token' })
def mockGithubApiEmail(url, request): try: for account in six.viewvalues(providerInfo['accounts']): if 'token %s' % account['access_token'] == \ request.headers['Authorization']: break else: self.fail() except AssertionError as e: return { 'status_code': 401, 'content': json.dumps({ 'message': repr(e) }) } return json.dumps([ { 'primary': False, 'email': '*****@*****.**', 'verified': True }, { 'primary': True, 'email': account['user']['email'], 'verified': True } ])
def mockBitbucketApiEmail(url, request): try: for account in six.viewvalues(providerInfo['accounts']): if 'Bearer %s' % account['access_token'] == \ request.headers['Authorization']: break else: self.fail() except AssertionError as e: return { 'status_code': 401, 'content': json.dumps({ 'message': repr(e) }) } return json.dumps({ "page": 1, "pagelen": 10, "size": 1, "values": [{ 'is_primary': True, 'is_confirmed': True, 'email': account['user']['email'], 'links': {}, "type": "email" }] })
def _computeHash(file, progress=noProgress): """ Computes all supported checksums on a given file. Downloads the file data and stream-computes all required hashes on it, saving the results in the file document. In the case of assetstore impls that already compute the sha512, and when sha512 is the only supported algorithm, we will not download the file to the server. """ toCompute = SUPPORTED_ALGORITHMS - set(file) toCompute = {alg: getattr(hashlib, alg)() for alg in toCompute} if not toCompute: return fileModel = FileModel() with fileModel.open(file) as fh: while True: chunk = fh.read(_CHUNK_LEN) if not chunk: break for digest in six.viewvalues(toCompute): digest.update(chunk) progress.update(increment=len(chunk)) digests = { alg: digest.hexdigest() for alg, digest in six.viewitems(toCompute) } fileModel.update({'_id': file['_id']}, update={'$set': digests}, multi=False) return digests
def mockGoogleApi(url, request): try: for account in six.viewvalues(providerInfo['accounts']): if 'Bearer %s' % account['access_token'] == \ request.headers['Authorization']: break else: self.fail() params = urllib.parse.parse_qs(url.query) self.assertSetEqual(set(params['fields'][0].split(',')), {'id', 'emails', 'name'}) except AssertionError as e: return { 'status_code': 401, 'content': json.dumps({'error': repr(e)}) } return json.dumps({ 'id': account['user']['oauth']['id'], 'name': { 'givenName': account['user']['firstName'], 'familyName': account['user']['lastName'] }, 'emails': [{ 'type': 'other', 'value': '*****@*****.**' }, { 'type': 'account', 'value': account['user']['email'] }] })
def _update_dhcp_info(nets_info, devices_info): """Update DHCP info for both networks and devices""" net_ifaces = {net_info['iface'] for net_info in six.viewvalues(nets_info)} flat_devs_info = { devname: devinfo for sub_devs in six.viewvalues(devices_info) for devname, devinfo in six.viewitems(sub_devs) } dhcp_info = dhclient.dhcp_info(net_ifaces | frozenset(flat_devs_info)) for net_info in six.viewvalues(nets_info): net_info.update(dhcp_info[net_info['iface']]) for devname, devinfo in six.viewitems(flat_devs_info): devinfo.update(dhcp_info[devname])
def mockBitbucketApiUser(url, request): try: for account in six.viewvalues(providerInfo['accounts']): if 'Bearer %s' % account['access_token'] == \ request.headers['Authorization']: break else: self.fail() except AssertionError as e: return { 'status_code': 401, 'content': json.dumps({'message': repr(e)}) } return json.dumps({ "created_on": "2011-12-20T16:34:07.132459+00:00", "uuid": account['user']['oauth']['id'], "location": "Santa Monica, CA", "links": {}, "website": "https://tutorials.bitbucket.org/", "username": account['user']['login'], "display_name": '%s %s' % (account['user']['firstName'], account['user']['lastName']) })
def restore_subscriptions(self): subs = [sub for sub in six.viewvalues(self._subscriptions)] self._subscriptions.clear() for sub in subs: self.subscribe(sub.destination, message_handler=sub.message_handler)
def calculate_cum_reward(policy): """Calculate cumulative reward with respect to time. Parameters ---------- policy: bandit object The bandit algorithm you want to evaluate. Return --------- cum_reward: dict The dict stores {history_id: cumulative reward} . cum_n_actions: dict The dict stores {history_id: cumulative number of recommended actions}. """ cum_reward = {-1: 0.0} cum_n_actions = {-1: 0} for i in range(policy.history_storage.n_histories): reward = policy.history_storage.get_history(i).rewards cum_n_actions[i] = cum_n_actions[i - 1] + len(reward) cum_reward[i] = cum_reward[i - 1] + sum(six.viewvalues(reward)) return cum_reward, cum_n_actions
class HumanAgent(object): ACTIONS = { 'look_left': _action(-20, 0, 0, 0, 0, 0, 0), 'look_right': _action(20, 0, 0, 0, 0, 0, 0), 'strafe_left': _action(0, 0, -1, 0, 0, 0, 0), 'strafe_right': _action(0, 0, 1, 0, 0, 0, 0), 'forward': _action(0, 0, 0, 1, 0, 0, 0), 'backward': _action(0, 0, 0, -1, 0, 0, 0), } ACTION_LIST = list(six.viewvalues(ACTIONS)) rewards = 0 def step(self, reward, unused_image): """Gets an image state and a reward, returns an action.""" self.rewards += reward action_str = getch() if ord(action_str) in action_mappings.keys(): action_key = action_mappings[ord(action_str)] if action_key == 'shutdown': # close the program print("Shutting down") sys.exit(0) else: return self.ACTIONS[action_key] else: # no-op action return _action(0, 0, 0, 0, 0, 0, 0)
def _index_dct_to_table(index_dct, column): keys = six.viewkeys(index_dct) keys_type = None if len(keys) > 0: probe = next(iter(keys)) if isinstance(probe, np.datetime64): keys_type = pa.timestamp( "ns") # workaround pyarrow type inference bug (ARROW-2554) elif isinstance(probe, pd.Timestamp): keys = [d.to_datetime64() for d in keys] keys_type = pa.timestamp( "ns") # workaround pyarrow type inference bug (ARROW-2554) elif isinstance(probe, np.bool_): keys = [bool(d) for d in keys] # This is because of ARROW-1646: # [Python] pyarrow.array cannot handle NumPy scalar types # Additional note: pyarrow.array is supposed to infer type automatically. # But the inferred type is not enough to hold np.uint64. Until this is fixed in # upstream Arrow, we have to retain the following line keys = np.array(list(keys)) labeled_array = pa.array(keys, type=keys_type) partition_array = pa.array(list(six.viewvalues(index_dct))) return pa.Table.from_arrays([labeled_array, partition_array], names=[column, _PARTITION_COLUMN_NAME])
def _exp4p_score(self, context): """The main part of Exp4.P. """ advisor_ids = list(six.viewkeys(context)) w = self._modelstorage.get_model()['w'] if len(w) == 0: for i in advisor_ids: w[i] = 1 w_sum = sum(six.viewvalues(w)) action_probs_list = [] for action_id in self.action_ids: weighted_exp = [w[advisor_id] * context[advisor_id][action_id] for advisor_id in advisor_ids] prob_vector = np.sum(weighted_exp) / w_sum action_probs_list.append((1 - self.n_actions * self.p_min) * prob_vector + self.p_min) action_probs_list = np.asarray(action_probs_list) action_probs_list /= action_probs_list.sum() estimated_reward = {} uncertainty = {} score = {} for action_id, action_prob in zip(self.action_ids, action_probs_list): estimated_reward[action_id] = action_prob uncertainty[action_id] = 0 score[action_id] = action_prob self._modelstorage.save_model( {'action_probs': estimated_reward, 'w': w}) return estimated_reward, uncertainty, score
def mockBitbucketApiEmail(url, request): try: for account in six.viewvalues(providerInfo['accounts']): if 'Bearer %s' % account['access_token'] == \ request.headers['Authorization']: break else: self.fail() except AssertionError as e: return { 'status_code': 401, 'content': json.dumps({'message': repr(e)}) } return json.dumps({ "page": 1, "pagelen": 10, "size": 1, "values": [{ 'is_primary': True, 'is_confirmed': True, 'email': account['user']['email'], 'links': {}, "type": "email" }] })
def create_yield_list(Pw, frw, Pfrw, res_list, can_merge, requires_merge): """ Generates (yields) a list of solutions, and creates a list of those created. Pw: The possible working solutions (so far) frw: The flow rule we are adding Pfrw: The combinations of placements for that rule res_list: Where the resulting list should be stored """ for orig_sln in Pw: for new_sln in Pfrw: # We need to consider priority order XXX # Ensure all rules are included - put in a low priority drop merged_sln = orig_sln.copy_and_add(new_sln, frw, overwrite=True) generate_merged_solutions(merged_sln, frw, requires_merge, can_merge) merged_sln.number = len(res_list) res_list.append(merged_sln) blah = [] for x in viewvalues(merged_sln.placements): blah.append(x.copy()) try: reach = simulate_reachability(blah) for k, v in viewitems(reach): if v is None: debug() if v == Match(): debug() except Exception: # debug() # print "ARRG" pass yield merged_sln
def CaptureFrameLocals(self, frame): """Captures local variables and arguments of the specified frame. Args: frame: frame to capture locals and arguments. Returns: (arguments, locals) tuple. """ # Capture all local variables (including method arguments). variables = { n: self.CaptureNamedVariable(n, v, 1, self.default_capture_limits) for n, v in six.viewitems(frame.f_locals) } # Split between locals and arguments (keeping arguments in the right order). nargs = frame.f_code.co_argcount if frame.f_code.co_flags & inspect.CO_VARARGS: nargs += 1 if frame.f_code.co_flags & inspect.CO_VARKEYWORDS: nargs += 1 frame_arguments = [] for argname in frame.f_code.co_varnames[:nargs]: if argname in variables: frame_arguments.append(variables.pop(argname)) return (frame_arguments, list(six.viewvalues(variables)))
def _filterMaxBatchRevision(batchDirectoryPaths): batchDirectoryPathsByRevisionById = collections.defaultdict(dict) for batchDirectoryPath in batchDirectoryPaths: batchMatch = re.match( r'^%s_%s\.%s\.' r'(?P<dataLevel>\w+)\.' r'(?P<batchId>[0-9]+)\.' r'(?P<batchRevision>[0-9]+)\.' r'0$' % ( batchDirectoryPath.dataProvider, batchDirectoryPath.diseaseStudyCode, batchDirectoryPath.dataType), batchDirectoryPath.tail() ) if not batchMatch: raise IngestException( 'Could not parse batch directory: "%s"' % str(batchDirectoryPath)) batchMatchDict = batchMatch.groupdict() dataLevel = batchMatch.groupdict()['dataLevel'] if dataLevel != 'Level_1': raise IngestException('Unknown data level: "%s"' % str(batchDirectoryPath)) batchId = int(batchMatchDict['batchId']) batchRevision = int(batchMatchDict['batchRevision']) batchDirectoryPathsByRevisionById[batchId][batchRevision] = batchDirectoryPath for batchDirectoryPathsByRevision in six.viewvalues(batchDirectoryPathsByRevisionById): batchRevision, batchDirectoryPath = max(six.viewitems(batchDirectoryPathsByRevision)) yield batchDirectoryPath
def get_solution(ciphertext, keys): #English letter frequency population variance norm_vals = [val * 100 for val in six.viewvalues(language_freq_dict)] english_pop_var = population_variance(norm_vals) solutions = {} best_sol = {} if keys: for key in keys: #print("Key is possibly: {}".format(key)) plaintext = decrypt(ciphertext, key) pop_var = get_letter_freq_population_variance(plaintext) #diff_pop_var = abs(english_pop_var - pop_var) num_words = count_words(plaintext, english_dict) if num_words > WORD_THRESHOLD: solutions[key] = { "key": key, "pop_var": pop_var, "plaintext": plaintext, "num_words": num_words } if solutions: best_sol_count = 0 print("Found {} possible solutions.".format(len(solutions))) for key, sol_dict in six.iteritems(solutions): if sol_dict["num_words"] > best_sol_count: best_sol = sol_dict best_sol_count = sol_dict["num_words"] return best_sol
class DiscretizedDerivativeRandomAgent(object): """Simple agent for DeepMind Lab.""" ACTIONS = { 'look_left': _action(-20, 0, 0, 0, 0, 0, 0), 'look_right': _action(20, 0, 0, 0, 0, 0, 0), 'strafe_left': _action(0, 0, -1, 0, 0, 0, 0), 'strafe_right': _action(0, 0, 1, 0, 0, 0, 0), 'forward': _action(0, 0, 0, 1, 0, 0, 0), 'backward': _action(0, 0, 0, -1, 0, 0, 0), } ACTION_LIST = list(six.viewvalues(ACTIONS)) rewards = 0 action = np.zeros((7, ), dtype=np.intc) dt = 1 #0.01 def step(self, reward, unused_image): """Gets an image state and a reward, returns an action.""" self.rewards += reward action = random.choice(DiscretizedRandomAgent.ACTION_LIST) self.action += action #*self.dt return self.action
def __init__(self, json_data=None, _readonly=False): if isinstance(json_data, MapBase): json_data = json_data.to_json() elif json_data is not None: for v in six.viewvalues(json_data): validator(v) super(Map, self).__init__(json_data, _readonly=_readonly)
def validate_southbound_devices_usages(nets, ni): kernel_config = KernelConfig(ni) for requested_net, net_info in six.viewitems(nets): if 'remove' in net_info: kernel_config.removeNetwork(requested_net) for requested_net, net_info in six.viewitems(nets): if 'remove' in net_info: continue kernel_config.setNetwork(requested_net, net_info) underlying_devices = [] for net_attrs in six.viewvalues(kernel_config.networks): vlan = net_attrs.get('vlan') if 'bonding' in net_attrs: underlying_devices.append((net_attrs['bonding'], vlan)) elif 'nic' in net_attrs: underlying_devices.append((net_attrs['nic'], vlan)) if len(set(underlying_devices)) < len(underlying_devices): raise ne.ConfigNetworkError( ne.ERR_BAD_PARAMS, 'multiple networks/similar vlans cannot be' ' defined on a single underlying device. ' 'kernel networks: {}\nrequested networks: {}'.format( kernel_config.networks, nets))
def required_event_fields(next_value_columns, previous_value_columns): """ Compute the set of resource columns required to serve ``next_value_columns`` and ``previous_value_columns``. """ # These metadata columns are used to align event indexers. return { TS_FIELD_NAME, SID_FIELD_NAME, EVENT_DATE_FIELD_NAME, }.union( # We also expect any of the field names that our loadable columns # are mapped to. viewvalues(next_value_columns), viewvalues(previous_value_columns), )
def validate_link_aggregation_state(desired_state, current_state): available_ifaces = { ifname for ifname, ifstate in six.viewitems(desired_state.interfaces) if ifstate.get('state') != 'absent' } available_ifaces |= set(current_state.interfaces) specified_slaves = set() for iface_state in six.viewvalues(desired_state.interfaces): if iface_state.get('state') != 'absent': link_aggregation = iface_state.get('link-aggregation') if link_aggregation: slaves = set(link_aggregation.get('slaves', [])) if not (slaves <= available_ifaces): raise NmstateValueError( "Link aggregation has missing slave: {}".format( iface_state ) ) if slaves & specified_slaves: raise NmstateValueError( "Link aggregation has reused slave: {}".format( iface_state ) ) specified_slaves |= slaves
def mockBitbucketApiUser(url, request): try: for account in six.viewvalues(providerInfo['accounts']): if 'Bearer %s' % account['access_token'] == \ request.headers['Authorization']: break else: self.fail() except AssertionError as e: return { 'status_code': 401, 'content': json.dumps({ 'message': repr(e) }) } return json.dumps({ "created_on": "2011-12-20T16:34:07.132459+00:00", "uuid": account['user']['oauth']['id'], "location": "Santa Monica, CA", "links": {}, "website": "https://tutorials.bitbucket.org/", "username": account['user']['login'], "display_name": '%s %s' % (account['user']['firstName'], account['user']['lastName']) })
def _computeHash(file, progress=noProgress): """ Computes all supported checksums on a given file. Downloads the file data and stream-computes all required hashes on it, saving the results in the file document. In the case of assetstore impls that already compute the sha512, and when sha512 is the only supported algorithm, we will not download the file to the server. """ toCompute = SUPPORTED_ALGORITHMS - set(file) toCompute = {alg: getattr(hashlib, alg)() for alg in toCompute} if not toCompute: return fileModel = FileModel() with fileModel.open(file) as fh: while True: chunk = fh.read(_CHUNK_LEN) if not chunk: break for digest in six.viewvalues(toCompute): digest.update(chunk) progress.update(increment=len(chunk)) digests = {alg: digest.hexdigest() for alg, digest in six.viewitems(toCompute)} fileModel.update({'_id': file['_id']}, update={ '$set': digests }, multi=False) return digests
def test_fetch_artifacts(self, deployable_entity, strs, flat_dicts): strs, flat_dicts = strs[: 3], flat_dicts[: 3] # all 12 is excessive for a test for key, artifact in zip(strs, flat_dicts): deployable_entity.log_artifact(key, artifact) artifacts = deployable_entity.fetch_artifacts(strs) assert set(six.viewkeys(artifacts)) == set(strs) assert all( filepath.startswith(_CACHE_DIR) for filepath in six.viewvalues(artifacts)) for key, filepath in six.viewitems(artifacts): artifact_contents = deployable_entity._get_artifact(key) if type(artifact_contents) is tuple: # ER returns (contents, path_only) # TODO: ER & RMV _get_artifact() should return the same thing artifact_contents, _ = artifact_contents with open(filepath, 'rb') as f: file_contents = f.read() assert file_contents == artifact_contents
class DiscretizedRandomAgent(object): """Simple agent for DeepMind Lab.""" ACTIONS = { 'look_left': _action(-20, 0, 0, 0, 0, 0, 0), 'look_right': _action(20, 0, 0, 0, 0, 0, 0), 'look_up': _action(0, 10, 0, 0, 0, 0, 0), 'look_down': _action(0, -10, 0, 0, 0, 0, 0), 'strafe_left': _action(0, 0, -1, 0, 0, 0, 0), 'strafe_right': _action(0, 0, 1, 0, 0, 0, 0), 'forward': _action(0, 0, 0, 1, 0, 0, 0), 'backward': _action(0, 0, 0, -1, 0, 0, 0), 'fire': _action(0, 0, 0, 0, 1, 0, 0), 'jump': _action(0, 0, 0, 0, 0, 1, 0), 'crouch': _action(0, 0, 0, 0, 0, 0, 1) } ACTION_LIST = list(six.viewvalues(ACTIONS)) def __init__(self): self.rewards = 0 def step(self, reward, unused_image): """Gets an image state and a reward, returns an action.""" self.rewards += reward return random.choice(DiscretizedRandomAgent.ACTION_LIST)
def onJobSave(event): # Patch a bug with how girder_worker's Girder task spec's 'api_url' is set # with Vagrant port forwarding and Nginx proxies # This is fundamentally a problem with "rest.getApiUrl" job = event.info if job['handler'] == 'worker_handler': # All girder_worker jobs have 3 absolute external URLs, which need to # patched; these are located at (excluding other job fields): # job = { # 'kwargs': { # 'inputs': { # '<input_name1>': { # 'mode': 'girder', # 'api_url': '<external_url>' # } # }, # 'outputs': { # '<output_name1>': { # 'mode': 'girder', # 'api_url': '<external_url>' # } # }, # 'jobInfo': { # 'url': '<external_url>' # } # } # } # We need to do this for all job statuses, since due to the way that # Job.save is overridden, the local model may be desynchronized from # the database after saving; this is fine, since girder_worker # (where it matters) will always load directly from the correct entry # in the database def replaceHost(url): # TODO: get this from the server config or the request return 'http://127.0.0.1:8080' + url[url.find('/api/v1'):] job['kwargs'] = json_util.loads(job['kwargs']) for specValue in itertools.chain( six.viewvalues(job['kwargs'].get('inputs', {})), six.viewvalues(job['kwargs'].get('outputs', {}))): if specValue['mode'] == 'girder': specValue['api_url'] = replaceHost(specValue['api_url']) if job['kwargs'].get('jobInfo'): job['kwargs']['jobInfo']['url'] = replaceHost( job['kwargs']['jobInfo']['url']) job['kwargs'] = json_util.dumps(job['kwargs'])
def identify_packages_from_files(self, files): """Identifies "packages" for a given collection of files From an iterative collection of files, we identify the packages that contain the files and any files that are not related. Parameters ---------- files : iterable Container (e.g. list or set) of file paths Return ------ (found_packages, unknown_files) - found_packages is a list of dicts that holds information about the found packages. Package dicts need at least "name" and "files" (that contains an array of related files) - unknown_files is a list of files that were not found in a package """ unknown_files = set() found_packages = {} nb_pkg_files = 0 # TODO: probably that _get_packagefields should create packagespecs # internally and just return them. But we should make them hashable file_to_package_dict = self._get_packagefields_for_files(files) for f in files: # Stores the file if f not in file_to_package_dict: unknown_files.add(f) else: # TODO: pkgname should become pkgid # where for packages from distributions would be name, # for VCS -- their path pkgfields = file_to_package_dict[f] if pkgfields is None: unknown_files.add(f) else: pkgfields_hashable = tuple(x for x in pkgfields.items()) if pkgfields_hashable in found_packages: found_packages[pkgfields_hashable].files.append(f) nb_pkg_files += 1 else: pkg = self._create_package(**pkgfields) if pkg: found_packages[pkgfields_hashable] = pkg # we store only non-directories within 'files' if not self._session.isdir(f): pkg.files.append(f) nb_pkg_files += 1 else: unknown_files.add(f) lgr.debug("%s: %d packages with %d files, and %d other files", self.__class__.__name__, len(found_packages), nb_pkg_files, len(unknown_files)) return list(viewvalues(found_packages)), unknown_files
def required_estimates_fields(columns): """ Compute the set of resource columns required to serve `columns`. """ # We also expect any of the field names that our loadable columns # are mapped to. return metadata_columns.union(viewvalues(columns))
def _get_timeout(self, map): timeout = 30.0 for disp in list(six.viewvalues(self._map)): if hasattr(disp, "next_check_interval"): interval = disp.next_check_interval() if interval is not None and interval >= 0: timeout = min(interval, timeout) return timeout
def __init__(self, savePath): self.networksPath = os.path.join(savePath, 'nets', '') self.bondingsPath = os.path.join(savePath, 'bonds', '') nets = self._getConfigs(self.networksPath) for net_attrs in six.viewvalues(nets): _filter_out_volatile_net_attrs(net_attrs) bonds = self._getConfigs(self.bondingsPath) super(Config, self).__init__(nets, bonds)
def _normalize_ovs_gateway(config_copy): """ OVS networks do not yet support sourceroute, consequently, requesting a gateway on such a network (which is not a default route) will be ignored. """ for netattrs in six.viewvalues(config_copy.networks): if not netattrs.get('remove') and netattrs['switch'] == 'ovs': if 'gateway' in netattrs and not netattrs.get('defaultRoute'): netattrs.pop('gateway')
def __get_windowed_points(self, tstmp, value): self.agg_buffer[tstmp] = value for tstmp_old in self.agg_buffer.keys(): if tstmp_old <= tstmp - self.window: del self.agg_buffer[tstmp_old] continue break return six.viewvalues(self.agg_buffer)
def generate_state(networks, bondings): """ Generate a new nmstate state given VDSM setup state format """ ifstates = {} _generate_bonds_state(bondings, ifstates) _generate_networks_state(networks, ifstates) interfaces = [ifstate for ifstate in six.viewvalues(ifstates)] return {INTERFACES: sorted(interfaces, key=lambda d: d['name'])}
def this_is_okay(): d = {} iterkeys(d) six.iterkeys(d) six.itervalues(d) six.iteritems(d) six.iterlists(d) six.viewkeys(d) six.viewvalues(d) six.viewlists(d) itervalues(d) future.utils.iterkeys(d) future.utils.itervalues(d) future.utils.iteritems(d) future.utils.iterlists(d) future.utils.viewkeys(d) future.utils.viewvalues(d) future.utils.viewlists(d) six.next(d) builtins.next(d)
def validate_bond_names(nets, bonds): bad_bond_names = {bond for bond in bonds if not re.match('^bond\w+$', bond)} bad_bond_names |= {net_attrs['bonding'] for net_attrs in six.viewvalues(nets) if 'bonding' in net_attrs and not re.match('^bond\w+$', net_attrs['bonding'])} if bad_bond_names: raise ne.ConfigNetworkError(ne.ERR_BAD_BONDING, 'bad bond name(s): {}'.format( ', '.join(bad_bond_names)))
def _inspectFunSignature(self, fun): self._funNamedArgs = set() self._funHasKwargs = False for funParam in six.viewvalues(signature(fun).parameters): if funParam.kind in {Parameter.POSITIONAL_OR_KEYWORD, Parameter.KEYWORD_ONLY}: # POSITIONAL_OR_KEYWORD are basic positional parameters # KEYWORD_ONLY are named parameters that appear after a * in Python 3 self._funNamedArgs.add(funParam.name) elif funParam.kind == Parameter.VAR_KEYWORD: # VAR_KEYWORD is the **kwargs parameter self._funHasKwargs = True
def __init__(self, savePath): self.netconf_path = savePath self.networksPath = os.path.join(savePath, NETCONF_NETS) self.bondingsPath = os.path.join(savePath, NETCONF_BONDS) self.devicesPath = os.path.join(savePath, NETCONF_DEVS) nets = self._getConfigs(self.networksPath) for net_attrs in six.viewvalues(nets): _filter_out_volatile_net_attrs(net_attrs) bonds = self._getConfigs(self.bondingsPath) devices = self._getConfigs(self.devicesPath) super(Config, self).__init__(nets, bonds, devices)
def is_default_route(gateway, routes): if not gateway: return False for route in itertools.chain.from_iterable(six.viewvalues(routes)): if (route.get('table') == RtKnownTables.RT_TABLE_MAIN and route['family'] == 'inet' and route['scope'] == 'global' and route['gateway'] == gateway and route['destination'] == 'none'): return True return False
def setup_params(self, data): params = self.params.copy() lookup = { 'biweight': 'biw', 'cosine': 'cos', 'cosine2': 'cos2', 'epanechnikov': 'epa', 'gaussian': 'gau', 'triangular': 'tri', 'triweight': 'triw', 'uniform': 'uni'} with suppress(KeyError): params['kernel'] = lookup[params['kernel'].lower()] if params['kernel'] not in six.viewvalues(lookup): msg = ("kernel should be one of {}. " "You may use the abbreviations {}") raise PlotnineError(msg.format(six.viewkeys(lookup), six.viewvalues(lookup))) return params
def _is_explicit_defined_default_class(dev): """ A default class is defined explicitly when a non-vlan network has hostQos definitions. """ netinfo = NetInfo(cache_get()) for attrs in six.viewvalues(netinfo.networks): if 'vlan' not in attrs and 'hostQos' in attrs: ports = attrs['ports'] if attrs['bridged'] else [attrs['iface']] if dev in ports: return True return False