def generate_implementation(self): property_offsets = [] current_offset = 0 if "Debug" not in sys.argv[4]: self._properties_for_build_type = filter(lambda property: not property["debug_only"], self._properties) else: self._properties_for_build_type = self._properties for property in self._properties: property_offsets.append(current_offset) current_offset += len(property["serialized_name"]) + 1 gperf_input = GPERF_TEMPLATE % { "license": license.license_for_generated_cpp(), "class_name": self.class_name, "property_name_strings": "\n".join( map(lambda property: ' "%(serialized_name)s\\0"' % property, self._properties) ), "property_name_offsets": "\n".join(map(lambda offset: " %d," % offset, property_offsets)), "property_to_enum_map": "\n".join( map( lambda property: "%(name)s, %(enum_name)s" % property, self._properties_for_build_type + self._aliases, ) ), "internal_properties": "\n".join( map(self._case_properties, filter(lambda property: property["is_internal"], self._properties)) ), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = [self.gperf_path, "--key-positions=*", "-P", "-D", "-n", "-s", "2"] gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) return gperf.communicate(gperf_input)[0]
def generate_implementation(self): return IMPLEMENTATION_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'includes': '\n'.join(map(self._include, self._exceptions())), 'description_initalizations': '\n'.join(map(self._description_initalization, self._exceptions())), }
def generate_implementation(self): property_offsets = [] current_offset = 0 for property in self._properties_list: property_offsets.append(current_offset) current_offset += len(property["name"]) + 1 css_name_and_enum_pairs = [(property['name'], property_id) for property_id, property in self._properties.items()] for name, aliased_name in self._aliases.items(): css_name_and_enum_pairs.append((name, css_properties.css_name_to_enum(aliased_name))) gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_name_strings': '\n'.join(map(lambda property: ' "%(name)s\\0"' % property, self._properties_list)), 'property_name_offsets': '\n'.join(map(lambda offset: ' %d,' % offset, property_offsets)), 'property_to_enum_map': '\n'.join(map(lambda property: '%s, %s' % property, css_name_and_enum_pairs)), 'internal_properties': '\n'.join("case %s:" % property_id for property_id, property in self._properties.items() if property['is_internal']), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = [self.gperf_path, '--key-positions=*', '-P', '-n'] gperf_args.extend(['-m', '50']) # Pick best of 50 attempts. gperf_args.append('-D') # Allow duplicate hashes -> More compact code. gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) return gperf.communicate(gperf_input)[0]
def generate_implementation(self): enum_value_to_name = {property['enum_value']: property['name'] for property in self._properties_including_aliases} property_offsets = [] property_names = [] current_offset = 0 for enum_value in range(self._first_enum_value, max(enum_value_to_name) + 1): property_offsets.append(current_offset) if enum_value in enum_value_to_name: name = enum_value_to_name[enum_value] property_names.append(name) current_offset += len(name) + 1 css_name_and_enum_pairs = [(property['name'], property['property_id']) for property in self._properties_including_aliases] gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_name_strings': '\n'.join(' "%s\\0"' % name for name in property_names), 'property_name_offsets': '\n'.join(' %d,' % offset for offset in property_offsets), 'property_to_enum_map': '\n'.join('%s, %s' % property for property in css_name_and_enum_pairs), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = [self.gperf_path, '--key-positions=*', '-P', '-n'] gperf_args.extend(['-m', '50']) # Pick best of 50 attempts. gperf_args.append('-D') # Allow duplicate hashes -> More compact code. gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) return gperf.communicate(gperf_input)[0]
def generate_implementation(self): keyword_offsets = [] current_offset = 0 for keyword in self._value_keywords: keyword_offsets.append(current_offset) current_offset += len(keyword["name"]) + 1 gperf_input = GPERF_TEMPLATE % { "license": license.license_for_generated_cpp(), "class_name": self.class_name, "value_keyword_strings": "\n".join(' "%(name)s\\0"' % keyword for keyword in self._value_keywords), "value_keyword_offsets": "\n".join(" %d," % offset for offset in keyword_offsets), "value_keyword_to_enum_map": "\n".join( "%(lower_name)s, %(enum_name)s" % keyword for keyword in self._value_keywords ), "ua_sheet_mode_values_keywords": "\n ".join( map(self._case_value_keyword, self._value_keywords_with_mode("UASheet")) ), "quirks_mode_or_ua_sheet_mode_values_keywords": "\n ".join( map(self._case_value_keyword, self._value_keywords_with_mode("QuirksOrUASheet")) ), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = [self.gperf_path, "--key-positions=*", "-P", "-n"] gperf_args.extend(["-m", "50"]) # Pick best of 50 attempts. gperf_args.append("-D") # Allow duplicate hashes -> More compact code. gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) return gperf.communicate(gperf_input)[0]
def generate_header(self): return HEADER_TEMPLATE % { 'class_name' : self.class_name, 'license' : license.license_for_generated_cpp(), 'method_declarations' : "\n".join(map(self._method_declaration, self._all_features)), 'storage_declarations' : "\n".join(map(self._storage_declarations, self._non_custom_features)), }
def generate_header(self): enum_enties = map(self._enum_declaration, [{'enum_name': 'CSSValueInvalid', 'enum_value': 0}] + self._value_keywords) return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'value_keyword_enums': "\n".join(enum_enties), 'value_keywords_count': len(enum_enties), 'max_value_keyword_length': reduce(max, map(len, map(lambda property: property['name'], self._value_keywords))), }
def generate_interfaces_header(self): return INTERFACES_HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'macro_style_name': _to_macro_style(self.class_name), 'declare_conditional_macros': self._declare_conditional_macros(), 'unconditional_macros': '\n'.join(sorted(set(map(self._unconditional_macro, self._unconditional_entries)))), 'conditional_macros': '\n'.join(map(self._conditional_macros, self._entries_by_conditional.keys())), }
def generate_header(self): enum_enties = map(self._enum_declaration, [{'enum_name': 'CSSValueInvalid', 'enum_value': 0}] + self._value_keywords) return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'value_keyword_enums': "\n".join(enum_enties), 'value_keywords_count': len(enum_enties), 'max_value_keyword_length': max(len(keyword['name']) for keyword in self._value_keywords), }
def generate_header(self): return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_enums': "\n".join(map(self._enum_declaration, self._properties_list)), 'first_property_id': self._first_enum_value, 'properties_count': len(self._properties), 'last_property_id': self._first_enum_value + len(self._properties) - 1, 'max_name_length': max(map(len, self._properties)), }
def generate_header(self): return HEADER_TEMPLATE % { "license": license.license_for_generated_cpp(), "class_name": self.class_name, "property_enums": "\n".join(map(self._enum_declaration, self._properties)), "first_property_id": self._first_property_id, "properties_count": len(self._properties), "last_property_id": self._first_property_id + len(self._properties) - 1, "max_name_length": reduce(max, map(len, map(lambda property: property["name"], self._properties))), }
def generate_header(self): return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_enums': "\n".join(map(self._enum_declaration, self._properties)), 'first_property_id': self._first_property_id, 'properties_count': len(self._properties), 'last_property_id': self._first_property_id + len(self._properties) - 1, 'max_name_length': reduce(max, map(len, map(lambda property: property['name'], self._properties))), }
def generate_implementation(self): gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_name_strings': '\n'.join(map(lambda property: ' "%(name)s",' % property, self._properties)), 'property_to_enum_map': '\n'.join(map(lambda property: '%(name)s, %(enum_name)s' % property, self._properties + self._aliases)), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = ['gperf', '--key-positions=*', '-D', '-n', '-s', '2'] gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) return gperf.communicate(gperf_input)[0]
def generate_headers_header(self): base_header_for_suffix = '' if self.suffix: base_header_for_suffix = '\n#include "core/%(namespace)sHeaders.h"\n' % {'namespace': self.namespace} return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'namespace': self.namespace, 'suffix': self.suffix, 'base_header_for_suffix': base_header_for_suffix, 'includes': '\n'.join(self._headers_header_includes(self.in_file.name_dictionaries)), }
def generate_header(self): enum_enties = map( self._enum_declaration, [{"enum_name": "CSSValueInvalid", "enum_value": 0}] + self._value_keywords ) return HEADER_TEMPLATE % { "license": license.license_for_generated_cpp(), "class_name": self.class_name, "value_keyword_enums": "\n".join(enum_enties), "value_keywords_count": len(enum_enties), "max_value_keyword_length": max(len(keyword["name"]) for keyword in self._value_keywords), }
def generate_header(self): enum_enties = map( self._enum_declaration, [{"enum_name": "CSSValueInvalid", "enum_value": 0}] + self._value_keywords ) return HEADER_TEMPLATE % { "license": license.license_for_generated_cpp(), "class_name": self.class_name, "value_keyword_enums": "\n".join(enum_enties), "value_keywords_count": len(enum_enties), "max_value_keyword_length": reduce( max, map(len, map(lambda property: property["name"], self._value_keywords)) ), }
def generate_interfaces_header(self): base_header_for_suffix = '' if self.suffix: base_header_for_suffix = '\n#include "core/%(namespace)sInterfaces.h"\n' % {'namespace': self.namespace} return INTERFACES_HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'namespace': self.namespace, 'suffix': self.suffix, 'base_header_for_suffix': base_header_for_suffix, 'macro_style_name': name_utilities.to_macro_style(self.namespace + self.suffix), 'declare_conditional_macros': self._declare_conditional_macros(), 'unconditional_macros': '\n'.join(sorted(set(map(self._unconditional_macro, self._unconditional_entries)))), 'conditional_macros': '\n'.join(map(self._conditional_macros, self._entries_by_conditional.keys())), }
def generate_implementation(self): gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'value_keyword_strings': '\n'.join(map(lambda property: ' "%(name)s",' % property, self._value_keywords)), 'value_keyword_to_enum_map': '\n'.join(map(lambda property: '%(name)s, %(enum_name)s' % property, self._value_keywords)), 'ua_sheet_mode_values_keywords': '\n'.join(map(self._case_value_keyword, self._value_keywords_with_mode('UASheet'))), 'quirks_mode_values_keywords': '\n'.join(map(self._case_value_keyword, self._value_keywords_with_mode('Quirks'))), 'quirks_mode_or_ua_sheet_mode_values_keywords': '\n'.join(map(self._case_value_keyword, self._value_keywords_with_mode('QuirksOrUASheet'))), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = ['gperf', '--key-positions=*', '-D', '-n', '-s', '2'] gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) return gperf.communicate(gperf_input)[0]
def generate_header(self): enum_enties = map(self._enum_declaration, [{ 'enum_name': 'CSSValueInvalid', 'enum_value': 0 }] + self._value_keywords) return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'value_keyword_enums': "\n".join(enum_enties), 'value_keywords_count': len(enum_enties), 'max_value_keyword_length': max(len(keyword['name']) for keyword in self._value_keywords), }
def generate_implementation(self): property_offsets = [] current_offset = 0 for property in self._properties_list: property_offsets.append(current_offset) current_offset += len(property["name"]) + 1 css_name_and_enum_pairs = [ (property['name'], property_id) for property_id, property in self._properties.items() ] for name, aliased_name in self._aliases.items(): css_name_and_enum_pairs.append( (name, css_properties.css_name_to_enum(aliased_name))) gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_name_strings': '\n'.join( map(lambda property: ' "%(name)s\\0"' % property, self._properties_list)), 'property_name_offsets': '\n'.join(map(lambda offset: ' %d,' % offset, property_offsets)), 'property_to_enum_map': '\n'.join( map(lambda property: '%s, %s' % property, css_name_and_enum_pairs)), 'internal_properties': '\n'.join("case %s:" % property_id for property_id, property in self._properties.items() if property['is_internal']), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = [self.gperf_path, '--key-positions=*', '-P', '-n'] gperf_args.extend(['-m', '50']) # Pick best of 50 attempts. gperf_args.append('-D') # Allow duplicate hashes -> More compact code. gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) return gperf.communicate(gperf_input)[0]
def generate_implementation(self): keyword_offsets = [0] current_offset = 1 for keyword in self._value_keywords: keyword_offsets.append(current_offset) current_offset += len(keyword["name"]) + 1 gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'value_keyword_strings': '\n'.join( map(lambda property: ' "%(name)s\\0"' % property, self._value_keywords)), 'value_keyword_offsets': '\n'.join(map(lambda offset: ' %d,' % offset, keyword_offsets)), 'value_keyword_to_enum_map': '\n'.join( map(lambda property: '%(name)s, %(enum_name)s' % property, self._value_keywords)), 'ua_sheet_mode_values_keywords': '\n '.join( map(self._case_value_keyword, self._value_keywords_with_mode('UASheet'))), 'quirks_mode_values_keywords': '\n '.join( map(self._case_value_keyword, self._value_keywords_with_mode('Quirks'))), 'quirks_mode_or_ua_sheet_mode_values_keywords': '\n '.join( map(self._case_value_keyword, self._value_keywords_with_mode('QuirksOrUASheet'))), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = [ self.gperf_path, '--key-positions=*', '-D', '-n', '-s', '2' ] gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) return gperf.communicate(gperf_input)[0]
def generate_implementation(self): property_offsets = [] current_offset = 0 for property in self._properties: property_offsets.append(current_offset) current_offset += len(property["name"]) + 1 gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_name_strings': '\n'.join(map(lambda property: ' "%(name)s\\0"' % property, self._properties)), 'property_name_offsets': '\n'.join(map(lambda offset: ' %d,' % offset, property_offsets)), 'property_to_enum_map': '\n'.join(map(lambda property: '%(name)s, %(enum_name)s' % property, self._properties + self._aliases)), 'internal_properties': '\n'.join(map(self._case_properties, filter(lambda property: property['is_internal'], self._properties))), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = ['gperf', '--key-positions=*', '-P', '-D', '-n', '-s', '2'] gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) return gperf.communicate(gperf_input)[0]
def generate_implementation(self): enum_value_to_name = { property['enum_value']: property['name'] for property in self._properties_including_aliases } property_offsets = [] property_names = [] current_offset = 0 for enum_value in range(self._first_enum_value, max(enum_value_to_name) + 1): property_offsets.append(current_offset) if enum_value in enum_value_to_name: name = enum_value_to_name[enum_value] property_names.append(name) current_offset += len(name) + 1 css_name_and_enum_pairs = [ (property['name'], property['property_id']) for property in self._properties_including_aliases ] gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_name_strings': '\n'.join(' "%s\\0"' % name for name in property_names), 'property_name_offsets': '\n'.join(' %d,' % offset for offset in property_offsets), 'property_to_enum_map': '\n'.join('%s, %s' % property for property in css_name_and_enum_pairs), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = [self.gperf_path, '--key-positions=*', '-P', '-n'] gperf_args.extend(['-m', '50']) # Pick best of 50 attempts. gperf_args.append('-D') # Allow duplicate hashes -> More compact code. gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) return gperf.communicate(gperf_input)[0]
def generate_headers_header(self): base_header_for_suffix = '' if self.suffix: base_header_for_suffix = '\n#include "core/%(namespace)sHeaders.h"\n' % { 'namespace': self.namespace } return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'namespace': self.namespace, 'suffix': self.suffix, 'base_header_for_suffix': base_header_for_suffix, 'includes': '\n'.join( self._headers_header_includes(self.in_file.name_dictionaries)), }
def generate_header(self): return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_enums': "\n".join(map(self._enum_declaration, self._properties)), 'first_property_id': self._first_property_id, 'properties_count': len(self._properties), 'last_property_id': self._first_property_id + len(self._properties) - 1, 'max_name_length': reduce( max, map(len, map(lambda property: property['name'], self._properties))), }
def generate_header(self): return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_enums': "\n".join( map(self._enum_declaration, self._properties_including_aliases)), 'first_property_id': self._first_enum_value, 'properties_count': len(self._properties), 'last_property_id': self._first_enum_value + len(self._properties) - 1, 'last_unresolved_property_id': self.last_unresolved_property_id, 'max_name_length': max(map(len, self._properties)), }
def generate_implementation(self): gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'property_name_strings': '\n'.join( map(lambda property: ' "%(name)s",' % property, self._properties)), 'property_to_enum_map': '\n'.join( map(lambda property: '%(name)s, %(enum_name)s' % property, self._properties + self._aliases)), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = ['gperf', '--key-positions=*', '-D', '-n', '-s', '2'] gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE) return gperf.communicate(gperf_input)[0]
def generate_interfaces_header(self): return INTERFACES_HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'macro_style_name': _to_macro_style(self.class_name), 'declare_conditional_macros': self._declare_conditional_macros(), 'unconditional_macros': '\n'.join( sorted( set( map(self._unconditional_macro, self._unconditional_entries)))), 'conditional_macros': '\n'.join( map(self._conditional_macros, self._entries_by_conditional.keys())), }
def generate_implementation(self): keyword_offsets = [0] current_offset = 1 for keyword in self._value_keywords: keyword_offsets.append(current_offset) current_offset += len(keyword["name"]) + 1 gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'value_keyword_strings': '\n'.join(map(lambda property: ' "%(name)s\\0"' % property, self._value_keywords)), 'value_keyword_offsets': '\n'.join(map(lambda offset: ' %d,' % offset, keyword_offsets)), 'value_keyword_to_enum_map': '\n'.join(map(lambda property: '%(name)s, %(enum_name)s' % property, self._value_keywords)), 'ua_sheet_mode_values_keywords': '\n '.join(map(self._case_value_keyword, self._value_keywords_with_mode('UASheet'))), 'quirks_mode_values_keywords': '\n '.join(map(self._case_value_keyword, self._value_keywords_with_mode('Quirks'))), 'quirks_mode_or_ua_sheet_mode_values_keywords': '\n '.join(map(self._case_value_keyword, self._value_keywords_with_mode('QuirksOrUASheet'))), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = [self.gperf_path, '--key-positions=*', '-D', '-n', '-s', '2'] gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) return gperf.communicate(gperf_input)[0]
def generate_implementation(self): keyword_offsets = [] current_offset = 0 for keyword in self._value_keywords: keyword_offsets.append(current_offset) current_offset += len(keyword["name"]) + 1 gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'value_keyword_strings': '\n'.join(' "%(name)s\\0"' % keyword for keyword in self._value_keywords), 'value_keyword_offsets': '\n'.join(' %d,' % offset for offset in keyword_offsets), 'value_keyword_to_enum_map': '\n'.join('%(lower_name)s, %(enum_name)s' % keyword for keyword in self._value_keywords), 'ua_sheet_mode_values_keywords': '\n '.join(map(self._case_value_keyword, self._value_keywords_with_mode('UASheet'))), 'quirks_mode_or_ua_sheet_mode_values_keywords': '\n '.join(map(self._case_value_keyword, self._value_keywords_with_mode('QuirksOrUASheet'))), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = [self.gperf_path, '--key-positions=*', '-P', '-n'] gperf_args.extend(['-m', '50']) # Pick best of 50 attempts. gperf_args.append('-D') # Allow duplicate hashes -> More compact code. gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) return gperf.communicate(gperf_input)[0]
def generate_header(self): enum_enties = map(self._enum_declaration, [{ 'enum_name': 'CSSValueInvalid', 'enum_value': 0 }] + self._value_keywords) return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'value_keyword_enums': "\n".join(enum_enties), 'value_keywords_count': len(enum_enties), 'max_value_keyword_length': reduce( max, map( len, map(lambda property: property['name'], self._value_keywords))), }
def generate_implementation(self): keyword_offsets = [] current_offset = 0 for keyword in self._value_keywords: keyword_offsets.append(current_offset) current_offset += len(keyword["name"]) + 1 gperf_input = GPERF_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'value_keyword_strings': '\n'.join(' "%(name)s\\0"' % keyword for keyword in self._value_keywords), 'value_keyword_offsets': '\n'.join(' %d,' % offset for offset in keyword_offsets), 'value_keyword_to_enum_map': '\n'.join('%(lower_name)s, %(enum_name)s' % keyword for keyword in self._value_keywords), 'ua_sheet_mode_values_keywords': '\n '.join( map(self._case_value_keyword, self._value_keywords_with_mode('UASheet'))), 'quirks_mode_or_ua_sheet_mode_values_keywords': '\n '.join( map(self._case_value_keyword, self._value_keywords_with_mode('QuirksOrUASheet'))), } # FIXME: If we could depend on Python 2.7, we would use subprocess.check_output gperf_args = [self.gperf_path, '--key-positions=*', '-P', '-n'] gperf_args.extend(['-m', '50']) # Pick best of 50 attempts. gperf_args.append('-D') # Allow duplicate hashes -> More compact code. gperf = subprocess.Popen(gperf_args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True) return gperf.communicate(gperf_input)[0]
def generate_implementation(self): return IMPLEMENTATION_TEMPLATE % { 'class_name' : self.class_name, 'license' : license.license_for_generated_cpp(), 'storage_definitions' : "\n".join(map(self._storage_definition, self._non_custom_features)), }
def generate_implementation(self): return IMPLEMENTATION_TEMPLATE % { 'class_name': self.class_name, 'license': license.license_for_generated_cpp(), 'factory_implementation': "\n".join(map(self._factory_implementation, self._events())), }
def generate_headers_header(self): return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'namespace': self.namespace, 'includes': '\n'.join(self._headers_header_includes(self.in_file.name_dictionaries)), }
def generate_idl(self): return IDL_TEMPLATE % { 'class_name' : self.class_name, 'license' : license.license_for_generated_cpp(), 'attribute_declarations': "\n".join(map(self._attribute_declaration, self._non_custom_features)) }
def generate_implementation(self): return IMPLEMENTATION_TEMPLATE % { "class_name": self.class_name, "license": license.license_for_generated_cpp(), "factory_implementation": "\n".join(map(self._factory_implementation, self._events())), }
def generate_header(self): return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'exception_types': '\n'.join(map(self._exception_type, self._exceptions())), }
def generate_headers_header(self): return HEADER_TEMPLATE % { 'license': license.license_for_generated_cpp(), 'class_name': self.class_name, 'includes': '\n'.join(self._headers_header_includes(self.in_file.name_dictionaries)), }
def generate_implementation(self): return IMPLEMENTATION_TEMPLATE % { 'namespace': self._namespace().strip('"'), 'license': license.license_for_generated_cpp(), 'factory_implementation': "\n".join(map(self._factory_implementation, self._events())), }