def extendtm(self, units, store=None, sort=True): """Extends the memory with extra unit(s). :param units: The units to add to the TM. :param store: Optional store from where some metadata can be retrieved and associated with each unit. :param sort: Optional parameter that can be set to False to supress sorting of the candidates list. This should probably only be used in :meth:`matcher.inittm`. """ if isinstance(units, base.TranslationUnit): units = [units] for candidate in ifilter(self.usable, units): simpleunit = base.TranslationUnit("") # We need to ensure that we don't pass multistrings futher, since # some modules (like the native Levenshtein) can't use it. if isinstance(candidate.source, multistring): if len(candidate.source.strings) > 1: simpleunit.orig_source = candidate.source simpleunit.orig_target = candidate.target simpleunit.source = unicode(candidate.source) simpleunit.target = unicode(candidate.target) else: simpleunit.source = candidate.source simpleunit.target = candidate.target # If we now only get translator comments, we don't get programmer # comments in TM suggestions (in Pootle, for example). If we get all # notes, pot2po adds all previous comments as translator comments # in the new po file simpleunit.addnote(candidate.getnotes(origin="translator")) simpleunit.fuzzy = candidate.isfuzzy() self.candidates.units.append(simpleunit) if sort: self.candidates.units.sort(key=sourcelen, reverse=self.sort_reverse)
def extendtm(self, units, store=None, sort=True): """Extends the memory with extra unit(s). :param units: The units to add to the TM. :param store: Optional store from where some metadata can be retrieved and associated with each unit. :param sort: Optional parameter that can be set to False to supress sorting of the candidates list. This should probably only be used in :meth:`matcher.inittm`. """ if isinstance(units, base.TranslationUnit): units = [units] for candidate in ifilter(self.usable, units): simpleunit = base.TranslationUnit("") # We need to ensure that we don't pass multistrings futher, since # some modules (like the native Levenshtein) can't use it. if isinstance(candidate.source, multistring): if len(candidate.source.strings) > 1: simpleunit.orig_source = candidate.source simpleunit.orig_target = candidate.target simpleunit.source = six.text_type(candidate.source) simpleunit.target = six.text_type(candidate.target) else: simpleunit.source = candidate.source simpleunit.target = candidate.target # If we now only get translator comments, we don't get programmer # comments in TM suggestions (in Pootle, for example). If we get all # notes, pot2po adds all previous comments as translator comments # in the new po file simpleunit.addnote(candidate.getnotes(origin="translator")) simpleunit.fuzzy = candidate.isfuzzy() self.candidates.units.append(simpleunit) if sort: self.candidates.units.sort(key=sourcelen, reverse=self.sort_reverse)
def __canonicalize(value): def flatten_bits(value): for i in value: if isinstance(i, jbos): for j in i.bits: yield j elif isinstance(i, stringy_types): yield i else: raise TypeError(type(i)) bits = ifilter(None, flatten_bits(value)) try: last = next(bits) except StopIteration: return for i in bits: same_type = type(i) == type(last) if same_type and isinstance(i, string_types): last += i elif same_type and isinstance(i, literal_types): last = type(i)(last.string + i.string) else: yield last last = i yield last
def get_all(self): """ Generate all the tiles in the store with their data. :rtype: iterator """ return imap(self.get_one, ifilter(None, self.list()))
def __len__(self): """ Returns the total number of tiles in the store. :rtype: int """ return reduce(lambda x, _: x + 1, ifilter(None, self.list()), 0)
def _extract_lib_name(self, library): basename = library.path.basename() m = self._lib_re.match(basename) if not m: raise ValueError("'{}' is not a valid library name" .format(basename)) # Get the first non-None group from the match. return next(ifilter( None, m.groups() ))
def get(self, tiles): """ Add data to each of ``tiles``. :param tiles: Tilestream :type tiles: iterator :rtype: iterator """ return imap(self.get_one, ifilter(None, tiles))
def delete(self, tiles): """ Delete ``tiles`` from the store. :param tiles: Input tilestream :type tiles: iterable :rtype: iterator """ return imap(self.delete_one, ifilter(None, tiles))
def get_bounding_pyramid(self): """ Returns the bounding pyramid that encloses all tiles in the store. :rtype: :class:`BoundingPyramid` """ return reduce(BoundingPyramid.add, imap(attrgetter('tilecoord'), ifilter(None, self.list())), BoundingPyramid())
def put(self, tiles): """ Store ``tiles`` in the store. :param tiles: Tilestream :type tiles: iterator :rtype: iterator """ return imap(self.put_one, ifilter(None, tiles))
def get_bounding_pyramid(self): """ Returns the bounding pyramid that encloses all tiles in the store. :rtype: :class:`BoundingPyramid` """ return reduce( BoundingPyramid.add, imap(attrgetter('tilecoord'), ifilter(None, self.list())), BoundingPyramid())
def _search_child(element, tag, instance, property_, create, terminal=False): """ Search for a child element matching filter_func. """ try: # using next/ifilter allows comparable behavior to element.find(tag), # but with greater flexibility return next(ifilter(_filter_func(property_, tag, terminal), element)) except StopIteration: if not create: return None attributes = _attributes_func(property_, tag, terminal)(instance) return _create_child(tag, element, attributes)
def order_by(self): if self._order_by: return [(col.field, order) for sort_column_id, order in self._order_by for col in [self._column_configs[sort_column_id]]] elif self.top_level_columns: # can only sort by columns that come from the DB col = list( ifilter(lambda col: hasattr(col, 'field'), self.top_level_columns)) if col: col = col[0] return [(col.field, ASCENDING)] return []
def _filter(self, condition, predicate, **concurrency_kwargs): """ Does parallel filtering on given ``condition`` with given `predicate``. Supports parallel execution. Internal method you do not want to use generally. """ mapper = self.WORKERS.get(concurrency_kwargs) if mapper: iterator = ((predicate, item) for item in self) filtered = mapper(condition, iterator) filtered = (result for suitable, result in filtered if suitable) else: filtered = ifilter(predicate, self) return self.__class__(filtered)
def expand_word(cls, word, rule): if isinstance(word, cls): return word.expand(rule) elif isinstance(word, jbos): placeholders = list( ifilter(lambda i: isinstance(i[1], cls), enumerate(word.bits))) if len(placeholders) > 1: raise ValueError('only one placeholder per word permitted') elif len(placeholders) == 1: index, bit = placeholders[0] expanded = bit.expand(rule) pre, post = word.bits[:index], word.bits[index + 1:] return [ jbos(*(pre + (safe_str(i), ) + post)) for i in expanded ] return [word]
def order_by(self): ret = [] if self._order_by: for sort_column_id, order in self._order_by: if sort_column_id in self._column_configs: for col in [self._column_configs[sort_column_id]]: ret.append((col.field, order)) else: ret.append((sort_column_id, order)) elif self.top_level_columns: # can only sort by columns that come from the DB col = list( ifilter(lambda col: hasattr(col, 'field'), self.top_level_columns)) if col: col = col[0] ret.append((col.field, ASCENDING)) return ret
def _install_commands(install_outputs, doppel): def install_line(output): cmd = doppel(output.install_kind) if isinstance(output, Directory): if output.files is not None: src = [i.path.relpath(output.path) for i in output.files] dst = path.install_path(output.path, output.install_root, directory=True) return cmd('into', src, dst, directory=output.path) warnings.warn( ('installed directory {!r} has no matching files; did you ' + 'forget to set `include`?').format(output.path)) src = output.path dst = path.install_path(src, output.install_root) return cmd('onto', src, dst) return list( chain((install_line(i) for i in install_outputs), ifilter(None, (i.post_install for i in install_outputs))))
def _install_commands(backend, build_inputs, buildfile, env): install_outputs = build_inputs['install'] if not install_outputs: return None doppel = env.tool('doppel') def doppel_cmd(kind): cmd = backend.cmd_var(doppel, buildfile) name = cmd.name if kind != 'program': kind = 'data' cmd = [cmd] + doppel.data_args cmdname = '{name}_{kind}'.format(name=name, kind=kind) return buildfile.variable(cmdname, cmd, backend.Section.command, True) def install_line(output): cmd = doppel_cmd(output.install_kind) if isinstance(output, Directory): src = [i.path.relpath(output.path) for i in output.files] dst = path.install_path(output.path.parent(), output.install_root) return doppel.copy_into(cmd, src, dst, directory=output.path) else: src = output.path dst = path.install_path(src, output.install_root) return doppel.copy_onto(cmd, src, dst) def post_install(output): if output.post_install: line = output.post_install line[0] = backend.cmd_var(line[0], buildfile) return line return list( chain((install_line(i) for i in install_outputs), ifilter(None, (post_install(i) for i in install_outputs))))
def generate_box_model(nfibers, start=1, missing_relids=None, skip_fibids=None ): """Generate a model of the expected peaks in a box""" if skip_fibids is None: skip_fibids = [] if missing_relids is None: missing_relids = [] iter1 = itertools.count(start) iter2 = ifilter(lambda x:x not in skip_fibids, iter1) iter3 = itertools.islice(iter2, nfibers) result = [] for idx, fibid in enumerate(iter3, 1): key = FIBER_PEAK if idx in missing_relids: key = FIBER_DEAD tok = FiberModelElement(fibid=fibid, mode=key) result.append(tok) return result
def parse_workflow(cls, base_url, node, input_workflow=None, test_path=None, global_gen=None): myworkflow = input_workflow if not myworkflow: myworkflow = WorkFlow() # Clean up for easy parsing node = lowercase_keys(flatten_dictionaries(node)) # Simple table of variable name, coerce function, and optionally special store function CONFIG_ELEMENTS = { # Simple variables u'name': [coerce_string_to_ascii], u'tests': [coerce_list_of_strings], u'body': [ContentHandler.parse_content], u'group': [coerce_to_string] # Test group name } def use_config_parser(configobject, configelement, configvalue): """ Try to use parser bindings to find an option for parsing and storing config element :configobject: Object to store configuration :configelement: Configuratione element name :configvalue: Value to use to set configuration :returns: True if found match for config element, False if didn't """ myparsing = CONFIG_ELEMENTS.get(configelement) if myparsing: converted = myparsing[0](configvalue) setattr(configobject, configelement, converted) return True return False """Fuction mapping""" functions_case = { 'setvalue_body': [WorkFlow.setvalue_body], 'setvalue_headers': [WorkFlow.setvalue_headers], 'setvalue_auth_password': [WorkFlow.setvalue_auth_password], 'setvalue_auth_username': [WorkFlow.setvalue_auth_username], 'setvalue_url': [WorkFlow.setvalue_url], 'setvalue_method': [WorkFlow.setvalue_method], 'setvalue_expected_status': [WorkFlow.setvalue_expected_status] } # Copy/convert input elements into appropriate form for a test object for configelement, configvalue in node.items(): if use_config_parser(myworkflow, configelement, configvalue): continue elif configelement == u'name': myworkflow.name = str(configvalue) elif configelement == u'extract_binds': # Add a list of extractors, of format: # {variable_name: {extractor_type: extractor_config}, ... } binds = flatten_dictionaries(configvalue) if myworkflow.extract_binds is None: myworkflow.extract_binds = dict() for variable_name, extractor in binds.items(): if not isinstance(extractor, dict) or len(extractor) == 0: raise TypeError( "Extractors must be defined as maps of extractorType:{configs} with 1 entry" ) if len(extractor) > 1: raise ValueError( "Cannot define multiple extractors for given variable name" ) # Safe because length can only be 1 for extractor_type, extractor_config in extractor.items(): myworkflow.extract_binds[ variable_name] = validators.parse_extractor( extractor_type, extractor_config) mytest.variable_binds = validators.parse_extractor( extractor_type, extractor_config) elif configelement == u'params': params_flag = 1 dict_params = dict() dict_templated_params = dict() for p in range(len(configvalue)): for key1, value1 in configvalue[p].items(): list_params = list() list_templated_params = list() for q in range(len(value1)): for key2, value2 in value1[q].items(): if (key2 == "name"): raise Exception( "Cannot overwrite name attribute of original test case" ) if (key2 == "group"): raise Exception( "Cannot overwrite group attribute of original test case" ) if (key2 == "body"): assert isinstance(value2, dict) myparsing = CONFIG_ELEMENTS.get(key2) converted = myparsing[0](value2) myworkflow.setvalue_body( converted, list_params) continue if (key2 == "generator_binds"): assert isinstance(value2, dict) atrr_dict = dict() atrr_dict[key2] = value2 list_params.append(atrr_dict) continue if (key2 == "delay" or key2 == "retries" or key2 == "repeat"): assert isinstance(value2, int) atrr_dict = dict() atrr_dict[key2] = value2 list_params.append(atrr_dict) continue if (key2 == "extract_binds"): temp_dict = dict() atrr_dict = dict() for index in range(len(value2)): for variable_name, extractor in value2[ index].items(): if not isinstance( extractor, dict) or len( extractor) == 0: raise TypeError( "Extractors must be defined as maps of extractorType:{configs} with 1 entry" ) if len(extractor) > 1: raise ValueError( "Cannot define multiple extractors for given variable name" ) # Safe because length can only be 1 for extractor_type, extractor_config in extractor.items( ): temp_dict[ variable_name] = validators.parse_extractor( extractor_type, extractor_config) atrr_dict[key2] = temp_dict list_params.append(atrr_dict) continue var_func = "setvalue_" + key2 if isinstance(value2, dict): output = flatten_dictionaries(value2) else: output = value2 #output={'template': {'license': {'name': 'randhir', 'properties': '$var22'}}} if isinstance(output, dict): filterfunc = lambda x: str(x[0]).lower( ) == 'template' # Templated items templates = [ x for x in ifilter( filterfunc, output.items()) ] #output_items=[('template', {'license': {'name': 'randhir', 'properties': '$var22'}})] else: templates = None if templates: list_templated_params.append(key2) if (var_func == 'setvalue_auth_username'): functions_case[var_func][0]( myworkflow, templates[0][1], list_params, isTemplate=True) if (var_func == 'setvalue_auth_password'): functions_case[var_func][0]( myworkflow, templates[0][1], list_params, isTemplate=True) if (var_func == 'setvalue_headers'): functions_case[var_func][0]( myworkflow, templates[0][1], list_params, isTemplate=True) if (var_func == 'setvalue_url'): temp = urlparse.urljoin( base_url, coerce_to_string(templates[0][1])) functions_case[var_func][0]( myworkflow, temp, list_params, isTemplate=True) if (var_func == 'setvalue_method'): functions_case[var_func][0]( myworkflow, templates[0][1], list_params, isTemplate=True) if (var_func == 'setvalue_expected_status' ): functions_case[var_func][0]( myworkflow, templates[0][1], list_params, isTemplate=True) else: if (var_func == 'setvalue_auth_username'): functions_case[var_func][0]( myworkflow, output, list_params) if (var_func == 'setvalue_auth_password'): functions_case[var_func][0]( myworkflow, output, list_params) if (var_func == 'setvalue_headers'): functions_case[var_func][0]( myworkflow, output, list_params) if (var_func == 'setvalue_url'): temp = urlparse.urljoin( base_url, coerce_to_string(output)) functions_case[var_func][0]( myworkflow, temp, list_params) if (var_func == 'setvalue_method'): functions_case[var_func][0]( myworkflow, output, list_params) if (var_func == 'setvalue_expected_status' ): functions_case[var_func][0]( myworkflow, output, list_params) dict_params[str(key1)] = list_params dict_templated_params[str( key1)] = list_templated_params myworkflow.params = dict_params myworkflow.params_templated = dict_templated_params elif configelement == 'variable_binds': myworkflow.variable_binds = flatten_dictionaries(configvalue) return myworkflow
def filter_available_cpus(cpus): return ifilter(lambda cpu: cpu["used"]==False, cpus)
def get_one(self, tile): return next(ifilter(None, (store.get_one(tile) for store in self.tilestores)), None)
def iterkeys(self): func = partial(self.__class__._of(), parent=self) return ifilter(None, imap(self.__class__._key, imap(func, self._element.__iter__())))
def configure_curl(self, timeout=DEFAULT_TIMEOUT, context=None, curl_handle=None): """ Create and mostly configure a curl object for test, reusing existing if possible """ if curl_handle: curl = curl_handle try: # Check the curl handle isn't closed, and reuse it if possible curl.getinfo(curl.HTTP_CODE) # Below clears the cookies & curl options for clean run # But retains the DNS cache and connection pool curl.reset() curl.setopt(curl.COOKIELIST, "ALL") except pycurl.error: curl = pycurl.Curl() else: curl = pycurl.Curl() # curl.setopt(pycurl.VERBOSE, 1) # Debugging convenience curl.setopt(curl.URL, str(self.url)) curl.setopt(curl.TIMEOUT, timeout) is_unicoded = False bod = self.body if isinstance(bod, text_type): # Encode unicode bod = bod.encode('UTF-8') is_unicoded = True # Set read function for post/put bodies if bod and len(bod) > 0: curl.setopt(curl.READFUNCTION, MyIO(bod).read) if self.auth_username and self.auth_password: curl.setopt(pycurl.USERPWD, parsing.encode_unicode_bytes(self.auth_username) + b':' + parsing.encode_unicode_bytes(self.auth_password)) if self.auth_type: curl.setopt(pycurl.HTTPAUTH, self.auth_type) if self.method == u'POST': curl.setopt(HTTP_METHODS[u'POST'], 1) # Required for some servers if bod is not None: curl.setopt(pycurl.POSTFIELDSIZE, len(bod)) else: curl.setopt(pycurl.POSTFIELDSIZE, 0) elif self.method == u'PUT': curl.setopt(HTTP_METHODS[u'PUT'], 1) # Required for some servers if bod is not None: curl.setopt(pycurl.INFILESIZE, len(bod)) else: curl.setopt(pycurl.INFILESIZE, 0) elif self.method == u'PATCH': curl.setopt(curl.POSTFIELDS, bod) curl.setopt(curl.CUSTOMREQUEST, 'PATCH') # Required for some servers # I wonder: how compatible will this be? It worked with Django but feels iffy. if bod is not None: curl.setopt(pycurl.INFILESIZE, len(bod)) else: curl.setopt(pycurl.INFILESIZE, 0) elif self.method == u'DELETE': curl.setopt(curl.CUSTOMREQUEST, 'DELETE') if bod is not None: curl.setopt(pycurl.POSTFIELDS, bod) curl.setopt(pycurl.POSTFIELDSIZE, len(bod)) elif self.method == u'HEAD': curl.setopt(curl.NOBODY, 1) curl.setopt(curl.CUSTOMREQUEST, 'HEAD') elif self.method and self.method.upper() != 'GET': # Alternate HTTP methods curl.setopt(curl.CUSTOMREQUEST, self.method.upper()) if bod is not None: curl.setopt(pycurl.POSTFIELDS, bod) curl.setopt(pycurl.POSTFIELDSIZE, len(bod)) # Template headers as needed and convert headers dictionary to list of header entries head = self.get_headers(context=context) head = copy.copy(head) # We're going to mutate it, need to copy # Set charset if doing unicode conversion and not set explicitly # TESTME if is_unicoded and u'content-type' in head.keys(): content = head[u'content-type'] if u'charset' not in content: head[u'content-type'] = content + u' ; charset=UTF-8' if head: headers = [str(headername) + ':' + str(headervalue) for headername, headervalue in head.items()] else: headers = list() # Fix for expecting 100-continue from server, which not all servers # will send! headers.append("Expect:") headers.append("Connection: close") curl.setopt(curl.HTTPHEADER, headers) # Set custom curl options, which are KEY:VALUE pairs matching the pycurl option names # And the key/value pairs are set if self.curl_options: filterfunc = lambda x: x[0] is not None and x[1] is not None # Must have key and value for (key, value) in ifilter(filterfunc, self.curl_options.items()): # getattr to look up constant for variable name curl.setopt(getattr(curl, key), value) return curl
def parse_test(cls, base_url, node, input_test=None, test_path=None): """ Create or modify a test, input_test, using configuration in node, and base_url If no input_test is given, creates a new one Test_path gives path to test file, used for setting working directory in setting up input bodies Uses explicitly specified elements from the test input structure to make life *extra* fun, we need to handle list <-- > dict transformations. This is to say: list(dict(),dict()) or dict(key,value) --> dict() for some elements Accepted structure must be a single dictionary of key-value pairs for test configuration """ mytest = input_test if not mytest: mytest = Test() # Clean up for easy parsing node = lowercase_keys(flatten_dictionaries(node)) # Simple table of variable name, coerce function, and optionally special store function CONFIG_ELEMENTS = { # Simple variables # u'auth_username': [coerce_string_to_ascii], # u'auth_password': [coerce_string_to_ascii], # u'method': [coerce_http_method], # HTTP METHOD u'delay': [lambda x: int(x)], # Delay before running u'group': [coerce_to_string], # Test group name u'name': [coerce_to_string], # Test name # u'expected_status': [coerce_list_of_ints], u'delay': [lambda x: int(x)], u'stop_on_failure': [safe_to_bool], u'retries': [lambda x: int(x)], u'depends_on': [coerce_list_of_strings], # Templated / special handling #u'url': [coerce_templatable, set_templated], # TODO: special handling for templated content, sigh u'body': [ContentHandler.parse_content] #u'headers': [], # COMPLEX PARSE OPTIONS #u'extract_binds':[], # Context variable-to-extractor output binding #u'variable_binds': [], # Context variable to value binding #u'generator_binds': [], # Context variable to generator output binding #u'validators': [], # Validation functions to run } def use_config_parser(configobject, configelement, configvalue): """ Try to use parser bindings to find an option for parsing and storing config element :configobject: Object to store configuration :configelement: Configuratione element name :configvalue: Value to use to set configuration :returns: True if found match for config element, False if didn't """ myparsing = CONFIG_ELEMENTS.get(configelement) if myparsing: converted = myparsing[0](configvalue) setattr(configobject, configelement, converted) return True return False # Copy/convert input elements into appropriate form for a test object for configelement, configvalue in node.items(): if use_config_parser(mytest, configelement, configvalue): continue # Configure test using configuration elements if configelement == u'url': temp = configvalue if isinstance(configvalue, dict): # Template is used for URL val = lowercase_keys(configvalue)[u'template'] assert isinstance(val, basestring) or isinstance(val, int) url = urlparse.urljoin(base_url, coerce_to_string(val)) mytest.set_url(url, isTemplate=True) else: assert isinstance(configvalue, basestring) or isinstance( configvalue, int) mytest.url = urlparse.urljoin(base_url, coerce_to_string(configvalue)) if configelement == u'display_name': temp = configvalue if isinstance(configvalue, dict): # Template is used for Disply Name val = lowercase_keys(configvalue)[u'template'] assert isinstance(val, basestring) or isinstance(val, int) mytest.set_display_name(val, isTemplate=True) else: assert isinstance(configvalue, basestring) or isinstance( configvalue, int) mytest.display_name = urlparse.urljoin(base_url, coerce_to_string(configvalue)) if configelement == u'auth_password': temp = configvalue if isinstance(configvalue, basestring): val = lowercase_keys(configvalue) assert isinstance(val, basestring) or isinstance(val, int) mytest.set_auth_password(val) if configelement == u'auth_username': temp = configvalue if isinstance(configvalue, basestring): val = lowercase_keys(configvalue) assert isinstance(val, basestring) or isinstance(val, int) mytest.set_auth_username(val) if configelement == u'method': val = configvalue if isinstance(configvalue, basestring) or isinstance(val, list): assert isinstance(val, basestring) or isinstance(val, list) mytest.set_method(val) if configelement == u'expected_status': val = configvalue assert isinstance(val, basestring) or isinstance(val, list) mytest.set_expected_status(val) if configelement == u'extract_binds': # Add a list of extractors, of format: # {variable_name: {extractor_type: extractor_config}, ... } binds = flatten_dictionaries(configvalue) if mytest.extract_binds is None: mytest.extract_binds = dict() for variable_name, extractor in binds.items(): if not isinstance(extractor, dict) or len(extractor) == 0: raise TypeError( "Extractors must be defined as maps of extractorType:{configs} with 1 entry") if len(extractor) > 1: raise ValueError( "Cannot define multiple extractors for given variable name") # Safe because length can only be 1 for extractor_type, extractor_config in extractor.items(): mytest.extract_binds[variable_name] = validators.parse_extractor(extractor_type, extractor_config) if configelement == u'validators': # Add a list of validators if not isinstance(configvalue, list): raise Exception( 'Misconfigured validator section, must be a list of validators') if mytest.validators is None: mytest.validators = list() # create validator and add to list iof validators for var in configvalue: if not isinstance(var, dict): raise TypeError( "Validators must be defined as validatorType:{configs} ") for validator_type, validator_config in var.items(): validator = validators.parse_validator( validator_type, validator_config) mytest.validators.append(validator) if configelement == 'headers': # HTTP headers to use, flattened to a single string-string dictionary mytest.headers configvalue = flatten_dictionaries(configvalue) if isinstance(configvalue, dict): filterfunc = lambda x: str(x[0]).lower() == 'template' # Templated items templates = [x for x in ifilter(filterfunc, configvalue.items())] else: templates = None if templates: # Should have single entry in dictionary keys mytest.set_headers(templates[0][1], isTemplate=True) elif isinstance(configvalue, dict): mytest.headers = configvalue else: raise TypeError( "Illegal header type: headers must be a dictionary or list of dictionary keys") if configelement == 'variable_binds': mytest.variable_binds = flatten_dictionaries(configvalue) if configelement == 'generator_binds': if(True): output = flatten_dictionaries(configvalue) output2 = dict() for key, value in output.items(): output2[str(key)] = str(value) mytest.generator_binds = output2 if configelement.startswith('curl_option_'): curlopt = configelement[12:].upper() if hasattr(BASECURL, curlopt): if not mytest.curl_options: mytest.curl_options = dict() mytest.curl_options[curlopt] = configvalue else: raise ValueError( "Illegal curl option: {0}".format(curlopt)) # For non-GET requests, accept additional response codes indicating success # (but only if not expected statuses are not explicitly specified) # this is per HTTP spec: # http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.5 if 'expected_status' not in node.keys(): if mytest.method == 'POST': mytest.expected_status = [200, 201, 204] elif mytest.method == 'PUT': mytest.expected_status = [200, 201, 204] elif mytest.method == 'DELETE': mytest.expected_status = [200, 202, 204] # Fallthrough default is simply [200] return mytest
def __iter__(self): return ifilter(lambda k: k in self._validatorDict, self._dict.__iter__())
def parse_workflow(cls, base_url,node,input_workflow=None, test_path=None , global_gen=None): myworkflow = input_workflow if not myworkflow: myworkflow = WorkFlow() # Clean up for easy parsing node = lowercase_keys(flatten_dictionaries(node)) # Simple table of variable name, coerce function, and optionally special store function CONFIG_ELEMENTS = { # Simple variables u'name': [coerce_string_to_ascii], u'tests': [coerce_list_of_strings], u'body': [ContentHandler.parse_content], u'group': [coerce_to_string] # Test group name } def use_config_parser(configobject, configelement, configvalue): """ Try to use parser bindings to find an option for parsing and storing config element :configobject: Object to store configuration :configelement: Configuratione element name :configvalue: Value to use to set configuration :returns: True if found match for config element, False if didn't """ myparsing = CONFIG_ELEMENTS.get(configelement) if myparsing: converted = myparsing[0](configvalue) setattr(configobject, configelement, converted) return True return False """Fuction mapping""" functions_case={'setvalue_body':[WorkFlow.setvalue_body],'setvalue_headers':[WorkFlow.setvalue_headers],'setvalue_auth_password':[WorkFlow.setvalue_auth_password],'setvalue_auth_username': [WorkFlow.setvalue_auth_username],'setvalue_url':[WorkFlow.setvalue_url],'setvalue_method':[WorkFlow.setvalue_method],'setvalue_expected_status':[WorkFlow.setvalue_expected_status]} # Copy/convert input elements into appropriate form for a test object for configelement, configvalue in node.items(): if use_config_parser(myworkflow, configelement, configvalue): continue elif configelement == u'name': myworkflow.name = str(configvalue) elif configelement == u'extract_binds': # Add a list of extractors, of format: # {variable_name: {extractor_type: extractor_config}, ... } binds = flatten_dictionaries(configvalue) if myworkflow.extract_binds is None: myworkflow.extract_binds = dict() for variable_name, extractor in binds.items(): if not isinstance(extractor, dict) or len(extractor) == 0: raise TypeError( "Extractors must be defined as maps of extractorType:{configs} with 1 entry") if len(extractor) > 1: raise ValueError( "Cannot define multiple extractors for given variable name") # Safe because length can only be 1 for extractor_type, extractor_config in extractor.items(): myworkflow.extract_binds[variable_name] = validators.parse_extractor(extractor_type, extractor_config) mytest.variable_binds= validators.parse_extractor(extractor_type, extractor_config) elif configelement == u'params': params_flag=1 dict_params=dict() dict_templated_params=dict() for p in range(len(configvalue)): for key1,value1 in configvalue[p].items(): list_params=list() list_templated_params=list() for q in range(len(value1)): for key2,value2 in value1[q].items(): if(key2 == "name"): raise Exception("Cannot overwrite name attribute of original test case") if(key2 == "group"): raise Exception("Cannot overwrite group attribute of original test case") if(key2=="body"): assert isinstance(value2, dict) myparsing = CONFIG_ELEMENTS.get(key2) converted = myparsing[0](value2) myworkflow.setvalue_body(converted,list_params) continue if(key2=="generator_binds"): assert isinstance(value2, dict) atrr_dict=dict() atrr_dict[key2]=value2 list_params.append(atrr_dict) continue if(key2=="delay" or key2 == "retries" or key2 == "repeat"): assert isinstance(value2, int) atrr_dict=dict() atrr_dict[key2]=value2 list_params.append(atrr_dict) continue if(key2=="extract_binds"): temp_dict=dict() atrr_dict=dict() for index in range(len(value2)): for variable_name, extractor in value2[index].items(): if not isinstance(extractor, dict) or len(extractor) == 0: raise TypeError( "Extractors must be defined as maps of extractorType:{configs} with 1 entry") if len(extractor) > 1: raise ValueError( "Cannot define multiple extractors for given variable name") # Safe because length can only be 1 for extractor_type, extractor_config in extractor.items(): temp_dict[variable_name] = validators.parse_extractor(extractor_type, extractor_config) atrr_dict[key2]=temp_dict list_params.append(atrr_dict) continue var_func="setvalue_"+key2 if isinstance(value2, dict): output = flatten_dictionaries(value2) else: output = value2 #output={'template': {'license': {'name': 'randhir', 'properties': '$var22'}}} if isinstance(output, dict): filterfunc = lambda x: str(x[0]).lower() == 'template' # Templated items templates = [x for x in ifilter(filterfunc, output.items())]#output_items=[('template', {'license': {'name': 'randhir', 'properties': '$var22'}})] else: templates = None if templates: list_templated_params.append(key2) if(var_func=='setvalue_auth_username'): functions_case[var_func][0](myworkflow,templates[0][1],list_params,isTemplate=True) if(var_func=='setvalue_auth_password'): functions_case[var_func][0](myworkflow,templates[0][1],list_params,isTemplate=True) if(var_func=='setvalue_headers'): functions_case[var_func][0](myworkflow,templates[0][1],list_params,isTemplate=True) if(var_func=='setvalue_url'): temp=urlparse.urljoin(base_url,coerce_to_string(templates[0][1])) functions_case[var_func][0](myworkflow,temp,list_params,isTemplate=True) if(var_func=='setvalue_method'): functions_case[var_func][0](myworkflow,templates[0][1],list_params,isTemplate=True) if(var_func=='setvalue_expected_status'): functions_case[var_func][0](myworkflow,templates[0][1],list_params,isTemplate=True) else: if(var_func=='setvalue_auth_username'): functions_case[var_func][0](myworkflow,output,list_params) if(var_func=='setvalue_auth_password'): functions_case[var_func][0](myworkflow,output,list_params) if(var_func=='setvalue_headers'): functions_case[var_func][0](myworkflow,output,list_params) if(var_func=='setvalue_url'): temp=urlparse.urljoin(base_url,coerce_to_string(output)) functions_case[var_func][0](myworkflow,temp,list_params) if(var_func=='setvalue_method'): functions_case[var_func][0](myworkflow,output,list_params) if(var_func=='setvalue_expected_status'): functions_case[var_func][0](myworkflow,output,list_params) dict_params[str(key1)]=list_params dict_templated_params[str(key1)]=list_templated_params myworkflow.params=dict_params myworkflow.params_templated=dict_templated_params elif configelement == 'variable_binds': myworkflow.variable_binds = flatten_dictionaries(configvalue) return myworkflow
def configure_curl(self, timeout=DEFAULT_TIMEOUT, context=None, curl_handle=None): """ Create and mostly configure a curl object for test, reusing existing if possible """ if curl_handle: curl = curl_handle try: # Check the curl handle isn't closed, and reuse it if possible curl.getinfo(curl.HTTP_CODE) # Below clears the cookies & curl options for clean run # But retains the DNS cache and connection pool curl.reset() curl.setopt(curl.COOKIELIST, "ALL") except pycurl.error: curl = pycurl.Curl() else: curl = pycurl.Curl() # curl.setopt(pycurl.VERBOSE, 1) # Debugging convenience curl.setopt(curl.URL, str(self.url)) curl.setopt(curl.TIMEOUT, timeout) is_unicoded = False bod = self.body if isinstance(bod, text_type): # Encode unicode bod = bod.encode('UTF-8') is_unicoded = True # Set read function for post/put bodies if bod and len(bod) > 0: curl.setopt(curl.READFUNCTION, MyIO(bod).read) if self.auth_username and self.auth_password: curl.setopt( pycurl.USERPWD, parsing.encode_unicode_bytes(self.auth_username) + b':' + parsing.encode_unicode_bytes(self.auth_password)) if self.auth_type: curl.setopt(pycurl.HTTPAUTH, self.auth_type) if self.method == u'POST': curl.setopt(HTTP_METHODS[u'POST'], 1) # Required for some servers if bod is not None: curl.setopt(pycurl.POSTFIELDSIZE, len(bod)) else: curl.setopt(pycurl.POSTFIELDSIZE, 0) elif self.method == u'PUT': curl.setopt(HTTP_METHODS[u'PUT'], 1) # Required for some servers if bod is not None: curl.setopt(pycurl.INFILESIZE, len(bod)) else: curl.setopt(pycurl.INFILESIZE, 0) elif self.method == u'PATCH': curl.setopt(curl.POSTFIELDS, bod) curl.setopt(curl.CUSTOMREQUEST, 'PATCH') # Required for some servers # I wonder: how compatible will this be? It worked with Django but feels iffy. if bod is not None: curl.setopt(pycurl.INFILESIZE, len(bod)) else: curl.setopt(pycurl.INFILESIZE, 0) elif self.method == u'DELETE': curl.setopt(curl.CUSTOMREQUEST, 'DELETE') if bod is not None: curl.setopt(pycurl.POSTFIELDS, bod) curl.setopt(pycurl.POSTFIELDSIZE, len(bod)) elif self.method == u'HEAD': curl.setopt(curl.NOBODY, 1) curl.setopt(curl.CUSTOMREQUEST, 'HEAD') elif self.method and self.method.upper( ) != 'GET': # Alternate HTTP methods curl.setopt(curl.CUSTOMREQUEST, self.method.upper()) if bod is not None: curl.setopt(pycurl.POSTFIELDS, bod) curl.setopt(pycurl.POSTFIELDSIZE, len(bod)) # Template headers as needed and convert headers dictionary to list of header entries head = self.get_headers(context=context) head = copy.copy(head) # We're going to mutate it, need to copy # Set charset if doing unicode conversion and not set explicitly # TESTME if is_unicoded and u'content-type' in head.keys(): content = head[u'content-type'] if u'charset' not in content: head[u'content-type'] = content + u' ; charset=UTF-8' if head: headers = [ str(headername) + ':' + str(headervalue) for headername, headervalue in head.items() ] else: headers = list() # Fix for expecting 100-continue from server, which not all servers # will send! headers.append("Expect:") headers.append("Connection: close") curl.setopt(curl.HTTPHEADER, headers) # Set custom curl options, which are KEY:VALUE pairs matching the pycurl option names # And the key/value pairs are set if self.curl_options: filterfunc = lambda x: x[0] is not None and x[ 1] is not None # Must have key and value for (key, value) in ifilter(filterfunc, self.curl_options.items()): # getattr to look up constant for variable name curl.setopt(getattr(curl, key), value) return curl
def filters(self): return list( ifilter(None, [f.to_es_filter() for f in self._filter_values.values()]))
def msbuild_link(rule, build_inputs, solution, env): if ( any(i not in ['c', 'c++'] for i in rule.langs) or rule.linker.flavor != 'msvc' ): raise ValueError('msbuild backend currently only supports c/c++ ' + 'with msvc') output = rule.output[0] # Parse compilation flags; if there's only one set of them (i.e. the # command_var is the same for every compiler), we can apply these to # all the files at once. Otherwise, we need to apply them to each file # individually so they all get the correct options. obj_creators = [i.creator for i in rule.files] compilers = uniques(i.compiler for i in obj_creators) per_compiler_cflags = {} for c in compilers: key = c.command_var if key not in per_compiler_cflags: per_compiler_cflags[key] = c.parse_flags(msbuild.textify_each( c.global_flags + build_inputs['compile_options'][c.lang] )) if len(per_compiler_cflags) == 1: common_cflags = per_compiler_cflags.popitem()[1] else: common_cflags = None # Parse linking flags. ldflags = rule.linker.parse_flags(msbuild.textify_each( (rule.linker.global_flags + build_inputs['link_options'][rule.base_mode][rule.linker.family] + rule.options) )) ldflags['libs'] = ( getattr(rule.linker, 'global_libs', []) + getattr(rule, 'lib_options', []) ) if hasattr(output, 'import_lib'): ldflags['import_lib'] = output.import_lib deps = chain( (i.creator.file for i in rule.files), chain.from_iterable(i.creator.header_files for i in rule.files), chain.from_iterable(i.creator.extra_deps for i in rule.files), ifilter(None, (getattr(i.creator, 'pch_source', None) for i in rule.files)), rule.libs, rule.extra_deps ) def get_source(file): # Get the source file for this compilation rule; it's either a # regular source file or a PCH source file. if isinstance(file.creator, CompileHeader): return file.creator.pch_source return file.creator.file # Create the project file. project = msbuild.VcxProject( env, name=rule.name, mode=rule.msbuild_mode, output_file=output, files=[{ 'name': get_source(i), 'options': _parse_file_cflags(i, per_compiler_cflags), } for i in rule.files], compile_options=common_cflags, link_options=ldflags, dependencies=solution.dependencies(deps), ) solution[output] = project
def parse_test(cls, base_url, node, input_test=None, test_path=None): """ Create or modify a test, input_test, using configuration in node, and base_url If no input_test is given, creates a new one Test_path gives path to test file, used for setting working directory in setting up input bodies Uses explicitly specified elements from the test input structure to make life *extra* fun, we need to handle list <-- > dict transformations. This is to say: list(dict(),dict()) or dict(key,value) --> dict() for some elements Accepted structure must be a single dictionary of key-value pairs for test configuration """ mytest = input_test if not mytest: mytest = Test() # Clean up for easy parsing node = lowercase_keys(flatten_dictionaries(node)) # Simple table of variable name, coerce function, and optionally special store function CONFIG_ELEMENTS = { # Simple variables # u'auth_username': [coerce_string_to_ascii], # u'auth_password': [coerce_string_to_ascii], # u'method': [coerce_http_method], # HTTP METHOD u'delay': [lambda x: int(x)], # Delay before running u'group': [coerce_to_string], # Test group name u'name': [coerce_to_string], # Test name # u'expected_status': [coerce_list_of_ints], u'delay': [lambda x: int(x)], u'stop_on_failure': [safe_to_bool], u'retries': [lambda x: int(x)], u'depends_on': [coerce_list_of_strings], # Templated / special handling #u'url': [coerce_templatable, set_templated], # TODO: special handling for templated content, sigh u'body': [ContentHandler.parse_content] #u'headers': [], # COMPLEX PARSE OPTIONS #u'extract_binds':[], # Context variable-to-extractor output binding #u'variable_binds': [], # Context variable to value binding #u'generator_binds': [], # Context variable to generator output binding #u'validators': [], # Validation functions to run } def use_config_parser(configobject, configelement, configvalue): """ Try to use parser bindings to find an option for parsing and storing config element :configobject: Object to store configuration :configelement: Configuratione element name :configvalue: Value to use to set configuration :returns: True if found match for config element, False if didn't """ myparsing = CONFIG_ELEMENTS.get(configelement) if myparsing: converted = myparsing[0](configvalue) setattr(configobject, configelement, converted) return True return False # Copy/convert input elements into appropriate form for a test object for configelement, configvalue in node.items(): if use_config_parser(mytest, configelement, configvalue): continue # Configure test using configuration elements if configelement == u'url': temp = configvalue if isinstance(configvalue, dict): # Template is used for URL val = lowercase_keys(configvalue)[u'template'] assert isinstance(val, basestring) or isinstance(val, int) url = urlparse.urljoin(base_url, coerce_to_string(val)) mytest.set_url(url, isTemplate=True) else: assert isinstance(configvalue, basestring) or isinstance( configvalue, int) mytest.url = urlparse.urljoin( base_url, coerce_to_string(configvalue)) if configelement == u'display_name': temp = configvalue if isinstance(configvalue, dict): # Template is used for Disply Name val = lowercase_keys(configvalue)[u'template'] assert isinstance(val, basestring) or isinstance(val, int) mytest.set_display_name(val, isTemplate=True) else: assert isinstance(configvalue, basestring) or isinstance( configvalue, int) mytest.display_name = urlparse.urljoin( base_url, coerce_to_string(configvalue)) if configelement == u'auth_password': temp = configvalue if isinstance(configvalue, basestring): val = lowercase_keys(configvalue) assert isinstance(val, basestring) or isinstance(val, int) mytest.set_auth_password(val) if configelement == u'auth_username': temp = configvalue if isinstance(configvalue, basestring): val = lowercase_keys(configvalue) assert isinstance(val, basestring) or isinstance(val, int) mytest.set_auth_username(val) if configelement == u'method': val = configvalue if isinstance(configvalue, basestring) or isinstance( val, list): assert isinstance(val, basestring) or isinstance(val, list) mytest.set_method(val) if configelement == u'expected_status': val = configvalue assert isinstance(val, basestring) or isinstance(val, list) mytest.set_expected_status(val) if configelement == u'extract_binds': # Add a list of extractors, of format: # {variable_name: {extractor_type: extractor_config}, ... } binds = flatten_dictionaries(configvalue) if mytest.extract_binds is None: mytest.extract_binds = dict() for variable_name, extractor in binds.items(): if not isinstance(extractor, dict) or len(extractor) == 0: raise TypeError( "Extractors must be defined as maps of extractorType:{configs} with 1 entry" ) if len(extractor) > 1: raise ValueError( "Cannot define multiple extractors for given variable name" ) # Safe because length can only be 1 for extractor_type, extractor_config in extractor.items(): mytest.extract_binds[ variable_name] = validators.parse_extractor( extractor_type, extractor_config) if configelement == u'validators': # Add a list of validators if not isinstance(configvalue, list): raise Exception( 'Misconfigured validator section, must be a list of validators' ) if mytest.validators is None: mytest.validators = list() # create validator and add to list iof validators for var in configvalue: if not isinstance(var, dict): raise TypeError( "Validators must be defined as validatorType:{configs} " ) for validator_type, validator_config in var.items(): validator = validators.parse_validator( validator_type, validator_config) mytest.validators.append(validator) if configelement == 'headers': # HTTP headers to use, flattened to a single string-string dictionary mytest.headers configvalue = flatten_dictionaries(configvalue) if isinstance(configvalue, dict): filterfunc = lambda x: str(x[0]).lower( ) == 'template' # Templated items templates = [ x for x in ifilter(filterfunc, configvalue.items()) ] else: templates = None if templates: # Should have single entry in dictionary keys mytest.set_headers(templates[0][1], isTemplate=True) elif isinstance(configvalue, dict): mytest.headers = configvalue else: raise TypeError( "Illegal header type: headers must be a dictionary or list of dictionary keys" ) if configelement == 'variable_binds': mytest.variable_binds = flatten_dictionaries(configvalue) if configelement == 'generator_binds': if (True): output = flatten_dictionaries(configvalue) output2 = dict() for key, value in output.items(): output2[str(key)] = str(value) mytest.generator_binds = output2 if configelement.startswith('curl_option_'): curlopt = configelement[12:].upper() if hasattr(BASECURL, curlopt): if not mytest.curl_options: mytest.curl_options = dict() mytest.curl_options[curlopt] = configvalue else: raise ValueError( "Illegal curl option: {0}".format(curlopt)) # For non-GET requests, accept additional response codes indicating success # (but only if not expected statuses are not explicitly specified) # this is per HTTP spec: # http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#sec9.5 if 'expected_status' not in node.keys(): if mytest.method == 'POST': mytest.expected_status = [200, 201, 204] elif mytest.method == 'PUT': mytest.expected_status = [200, 201, 204] elif mytest.method == 'DELETE': mytest.expected_status = [200, 202, 204] # Fallthrough default is simply [200] return mytest