Esempio n. 1
0
    def setUp(self):
        with open_text('tests.data', 'example01_camera.json') as f:
            camera_params = json.load(f)
            self.camera = CameraIntrinsics(
                torch.tensor(camera_params['intrinsic'])[:3])

        with open_text('tests.data', 'example01_univ_annot3.txt') as f:
            self.points = torch.as_tensor(np.loadtxt(f))

        self.z_ref = 3992.29
Esempio n. 2
0
    def setUp(self):
        with open_binary('tests.data', 'example02_image.jpg') as f:
            self.image: PIL.Image.Image = PIL.Image.open(f).copy()

        with open_text('tests.data', 'example02_camera.json') as f:
            camera_params = json.load(f)
            self.camera = CameraIntrinsics(
                torch.tensor(camera_params['intrinsic'])[:3])

        with open_text('tests.data', 'example02_univ_annot3.txt') as f:
            self.points = torch.as_tensor(np.loadtxt(f))
Esempio n. 3
0
 def test_open_text_with_errors(self):
     # Raises UnicodeError without the 'errors' argument.
     with resources.open_text(self.data, 'utf-16.file', 'utf-8', 'strict') as fp:
         self.assertRaises(UnicodeError, fp.read)
     with resources.open_text(self.data, 'utf-16.file', 'utf-8', 'ignore') as fp:
         result = fp.read()
     self.assertEqual(
         result,
         'H\x00e\x00l\x00l\x00o\x00,\x00 '
         '\x00U\x00T\x00F\x00-\x001\x006\x00 '
         '\x00w\x00o\x00r\x00l\x00d\x00!\x00\n\x00',
     )
def load_ir(ir_args):
    """
    Input: path to puzzle folder structured as given in the module description.  
    Output: Pair of lists of dictionaries representing 'raw' models, followed by None
    Refer to the package description to understand more about the way the inputs and outputs and structured.  
    """
    raw_training_models, raw_candidate_models = filtered_irloader.load_ir(
        ir_args)  #basic_irloader.load_ir(ir_args)
    # Temporarily disabling explicit label domain file input option in favour of default file
    # filter_file_path = ir_args.label_domain_filepath
    labeldomain_file = importlib_resources.open_text('knowledge',
                                                     'labeldomain.json')
    with labeldomain_file:
        labeldomain_dict = json.loads(labeldomain_file.read())
        annotated_training_models = []
        for model in raw_training_models:
            annotated_training_models = annotated_training_models + [
                _annotate_raw_model(model, labeldomain_dict)
            ]
        annotated_candidate_models = []
        for model in raw_candidate_models:
            annotated_candidate_models = annotated_candidate_models + [
                _annotate_raw_model(model, labeldomain_dict)
            ]
        return annotated_training_models, annotated_candidate_models
Esempio n. 5
0
    def test_complex_message(self):
        with open_text('mailman.utilities.tests.data', 'scrub_test.eml') as fp:
            msg = mfs(fp.read())
        self.assertEqual(scrubber.scrub(msg), """\
This is the first text/plain part
-------------- next part --------------
A message part incompatible with plain text digests has been removed ...
Name: not available
Type: text/html
Size: 27 bytes
Desc: not available
-------------- next part --------------
Plain text with \\u201cfancy quotes\\u201d from embedded message.
-------------- next part --------------
A message part incompatible with plain text digests has been removed ...
Name: not available
Type: text/html
Size: 58 bytes
Desc: not available
-------------- next part --------------
A message part incompatible with plain text digests has been removed ...
Name: Image
Type: image/jpeg
Size: 16 bytes
Desc: A JPEG image
""")
Esempio n. 6
0
    def __init__(self, name, **kwargs: Dict):
        super().__init__()

        self.name = name
        """ Name of the variable """

        self.metadata: Dict = {}
        """ Dictionary for metadata of the variable """

        # Initialize class attributes once at first instantiation -------------
        if not self._variable_descriptions:
            # Class attribute, but it's safer to initialize it at first instantiation
            with open_text(resources, DESCRIPTION_FILENAME) as desc_io:
                vars_descs = np.genfromtxt(desc_io, delimiter="\t", dtype=str)
            self.__class__._variable_descriptions.update(vars_descs)

        if not self._base_metadata:
            # Get variable base metadata from an ExplicitComponent
            comp = om.ExplicitComponent()
            # get attributes
            metadata = comp.add_output(name="a")

            self.__class__._base_metadata = metadata
            self.__class__._base_metadata["value"] = 1.0
            self.__class__._base_metadata["tags"] = set()
            self.__class__._base_metadata["shape"] = None
        # Done with class attributes ------------------------------------------

        self.metadata = self.__class__._base_metadata.copy()
        self.metadata.update(kwargs)
        self._set_default_shape()

        # If no description, add one from DESCRIPTION_FILE_PATH, if available
        if not self.description and self.name in self._variable_descriptions:
            self.description = self._variable_descriptions[self.name]
Esempio n. 7
0
 def test_config(self):
     """Test that the namespace option in the example config is valid."""
     package = "mongo_connector.service"
     stream = importlib_resources.open_text(package, "config.json")
     with stream:
         namespaces = json.load(stream)["__namespaces"]
     NamespaceConfig(namespace_options=namespaces)
Esempio n. 8
0
def get_authorized_page(key, remote):
    global authorizedHtml
    if not authorizedHtml:
        authorizedHtml = pkg_resources.open_text(templates,
                                                 "authorized.html").read()
    return (authorizedHtml.replace("<key>", key).replace(
        "<remote>", "true" if remote else "false").encode())
Esempio n. 9
0
def test_configuration_schema():
    """Validate the schema itself against its specification."""
    with open_text("memote.experimental.schemata",
                   "configuration.json",
                   encoding="utf-8") as file_handle:
        schema = json.load(file_handle)
    Draft4Validator.check_schema(schema)  # Will raise an exception if invalid.
Esempio n. 10
0
def get_profile(file_name: str = "BACJ.txt",
                thickness_ratio=None,
                chord_length=None) -> pd.DataFrame:
    """
    Reads profile from indicated resource file and returns it after resize

    :param file_name: name of resource (only "BACJ.txt" for now)
    :param thickness_ratio:
    :param chord_length:
    :return: Nx2 pandas.DataFrame with x in 1st column and z in 2nd column
    """

    with open_text(resources, file_name) as source:
        x_z = np.genfromtxt(source,
                            skip_header=1,
                            delimiter="\t",
                            names="x, z")
    profile = Profile()
    profile.set_points(x_z["x"], x_z["z"])

    if thickness_ratio:
        profile.thickness_ratio = thickness_ratio

    if chord_length:
        profile.chord_length = chord_length

    return profile.get_sides()
Esempio n. 11
0
def get_msg_fixture(msg_file):
    msg_module = 'mrs.messages'

    with open_text(msg_module, msg_file) as json_msg:
        msg = json.load(json_msg)

    return msg
Esempio n. 12
0
    def __init__(self):
        translator = VarXpathTranslator()
        with open_text(resources, CONVERSION_FILENAME_1) as translation_table:
            translator.read_translation_table(translation_table)
        super().__init__(translator)

        self.xml_unit_attribute = "unit"
Esempio n. 13
0
def main():

    svg_file = open_text('ursim.environments', 'first_environment.svg')

    if len(sys.argv) > 1 and sys.argv[1].endswith('svg'):
        svg_file = sys.argv[1]
        if not os.path.exists(svg_file):
            svg_file = open_text('ursim.environments', svg_file)

    kbctrl = KeyboardController()

    app = RoboSimApp(kbctrl)
    kbctrl.app = app

    app.sim.load_svg(svg_file)

    app.run()
def test_hmmer_reader_invalid_file():
    buffer = pkg_resources.open_text(hmmer_reader.data, "A0ALD9.fasta")
    hmmfile = open_hmmer(buffer)

    with pytest.raises(ParsingError):
        hmmfile.read_model()

    buffer.close()
Esempio n. 15
0
def _load_resource_as_json(resource_name: str) -> dict:
    """ loads the internal json resource with the given resource_name """
    try:
        with pkg_resources.open_text(schema, resource_name) as schema_resource:
            return json.load(schema_resource)
    except FileNotFoundError as err:
        raise ResourceError(
            f'No resource with name {resource_name} was found') from err
def test_hmmer_reader_corrupted_file():
    buffer = pkg_resources.open_text(hmmer_reader.data,
                                     "PF02545.hmm.br.corrupted")
    hmmfile = open_hmmer(buffer)

    with pytest.raises(UnicodeDecodeError):
        hmmfile.read_model()

    buffer.close()
Esempio n. 17
0
def read_device_table():
    """
    Reads the device part table file.
    """
    # since yaml.safe_load is expecting a file stream...
    # inspection always gets this one wrong...
    # noinspection PyTypeChecker
    with resources.open_text(resource, "ht32_part_table.yaml") as ifile:
        return yaml.safe_load(ifile)
Esempio n. 18
0
    def load_evaluations_metrics(cls, exp_id: str = None) -> pd.DataFrame:
        """
        Load metric evaluations results. This data can be used to do asserts against
        after running evaluation on [pre-aggregated][epstats.toolkit.testing.test_data.TestData.load_goals_agg]
        or [by-unit][epstats.toolkit.testing.test_data.TestData.load_goals_by_unit] test data.

        Arguments:
            exp_id: experiment id
        """
        df = pd.read_csv(pkg_resources.open_text(resources, "evaluations_metrics.csv"), sep="\t")
        return df[df.exp_id == exp_id] if exp_id is not None else df
Esempio n. 19
0
def dicer(flavor, length):
    with open_text('resources.lists', flavor + '.txt') as f:
        words = f.read().splitlines()
    seq = [choice(words) for i in range(length)]
    phrases = {
        'hy': '-'.join(seq),
        'sp': ' '.join(seq),
        'so': ''.join(seq)
    }
    response = jsonify(phrases)
    response.headers['Cache-Control'] = 'no-store, no-cache'
    return response
Esempio n. 20
0
def split_file() -> int:
	"""
	Entry point for `se split-file`
	"""

	parser = argparse.ArgumentParser(description="Split an XHTML file into many files at all instances of <!--se:split-->, and include a header template for each file.")
	parser.add_argument("-f", "--filename-format", metavar="STRING", type=str, default="chapter-%n.xhtml", help="a format string for the output files; `%%n` is replaced with the current chapter number; defaults to `chapter-%%n.xhtml`")
	parser.add_argument("-s", "--start-at", metavar="INTEGER", type=se.is_positive_integer, default="1", help="start numbering chapters at this number, instead of at 1")
	parser.add_argument("-t", "--template-file", metavar="FILE", type=str, default="", help="a file containing an XHTML template to use for each chapter; the string `NUMBER` is replaced by the chapter number, and the string `TEXT` is replaced by the chapter body")
	parser.add_argument("filename", metavar="FILE", help="an HTML/XHTML file")
	args = parser.parse_args()

	try:
		filename = Path(args.filename).resolve()
		with open(filename, "r", encoding="utf-8") as file:
			xhtml = se.strip_bom(file.read())
	except FileNotFoundError:
		se.print_error(f"Couldn’t open file: [path][link=file://{filename}]{filename}[/][/].")
		return se.InvalidFileException.code

	if args.template_file:
		try:
			filename = Path(args.template_file).resolve()
			with open(filename, "r", encoding="utf-8") as file:
				template_xhtml = file.read()
		except FileNotFoundError:
			se.print_error(f"Couldn’t open file: [path][link=file://{filename}]{filename}[/][/].")
			return se.InvalidFileException.code
	else:
		with importlib_resources.open_text("se.data.templates", "chapter-template.xhtml", encoding="utf-8") as file:
			template_xhtml = file.read()

	chapter_xhtml = ""

	# Remove leading split tags
	xhtml = regex.sub(r"^\s*<\!--se:split-->", "", xhtml)

	for line in xhtml.splitlines():
		if "<!--se:split-->" in line:
			prefix, suffix = line.split("<!--se:split-->")
			chapter_xhtml = chapter_xhtml + prefix
			_split_file_output_file(args.filename_format, args.start_at, template_xhtml, chapter_xhtml)

			args.start_at = args.start_at + 1
			chapter_xhtml = suffix

		else:
			chapter_xhtml = f"{chapter_xhtml}\n{line}"

	if chapter_xhtml and not chapter_xhtml.isspace():
		_split_file_output_file(args.filename_format, args.start_at, template_xhtml, chapter_xhtml)

	return 0
Esempio n. 21
0
    def load_goals_simple_agg(cls) -> pd.DataFrame:
        """
        Load sample of aggregated test data in simple wide format. File `goals_simple_agg.csv` contains only one
        experiment, so it is sufficient to just open it.

        We use this dataset in unit testing and we are making it available here for other possible use-cases too.

        See `load_evaluations` set of functions to load corresponding evaluation results.
        """
        df = pd.read_csv(pkg_resources.open_text(resources,
                                                 "goals_simple_agg.csv"),
                         sep="\t")
        return df
    def _compute_alpha_flap(flap_angle: float, chord_ratio: float) -> np.ndarray:
        """
        Roskam data to calculate the effectiveness of a simple slotted flap.

        :param flap_angle: flap angle (in Degree)
        :param chord_ratio: position of flap on wing chord
        :return: effectiveness ratio
        """

        temp_array = []
        with open_text(resources, LIFT_EFFECTIVENESS_FILENAME) as file:
            for line in file:
                temp_array.append([float(x) for x in line.split(",")])
        x1 = []
        y1 = []
        x2 = []
        y2 = []
        x3 = []
        y3 = []
        x4 = []
        y4 = []
        x5 = []
        y5 = []
        for arr in temp_array:
            x1.append(arr[0])
            y1.append(arr[1])
            x2.append(arr[2])
            y2.append(arr[3])
            x3.append(arr[4])
            y3.append(arr[5])
            x4.append(arr[6])
            y4.append(arr[7])
            x5.append(arr[8])
            y5.append(arr[9])
        tck1 = interpolate.splrep(x1, y1, s=0)
        tck2 = interpolate.splrep(x2, y2, s=0)
        tck3 = interpolate.splrep(x3, y3, s=0)
        tck4 = interpolate.splrep(x4, y4, s=0)
        tck5 = interpolate.splrep(x5, y5, s=0)
        ynew1 = interpolate.splev(min(max(flap_angle, min(x1)), max(x1)), tck1, der=0)
        ynew2 = interpolate.splev(min(max(flap_angle, min(x2)), max(x2)), tck2, der=0)
        ynew3 = interpolate.splev(min(max(flap_angle, min(x3)), max(x3)), tck3, der=0)
        ynew4 = interpolate.splev(min(max(flap_angle, min(x4)), max(x4)), tck4, der=0)
        ynew5 = interpolate.splev(min(max(flap_angle, min(x5)), max(x5)), tck5, der=0)
        zs = [0.15, 0.20, 0.25, 0.30, 0.40]
        y_final = [float(ynew1), float(ynew2), float(ynew3), float(ynew4), float(ynew5)]
        tck6 = interpolate.splrep(zs, y_final, s=0)
        effectiveness = interpolate.splev(min(max(chord_ratio, min(zs)), max(zs)), tck6, der=0)

        return effectiveness
Esempio n. 23
0
def _resolve_resource(
    package: str,
    *,
    name: t.Optional[str],
    support_extensions=(".yaml", ".yml")) -> dict:
    logger.info("resolve resource, find resource from %s", package)
    for fname in importlib_resources.contents(package):
        if not os.path.splitext(fname)[1].endswith(support_extensions):
            continue
        name = fname
        break
    logger.info("resolve resource, load data from %s", name)
    with importlib_resources.open_text(package, name) as rf:
        return loading.load(rf)
Esempio n. 24
0
    def load_goals_by_unit(cls, exp_id: str = None) -> pd.DataFrame:
        """
        Load sample of test data by unit to evaluate metrics. We use this dataset
        in unit testing and we are making it available here for other possible use-cases too.

        See `load_evaluations` set of functions to load corresponding evaluation results.

        Arguments:
            exp_id: experiment id
        """
        df = pd.read_csv(pkg_resources.open_text(resources, "goals_by_unit.csv"), sep="\t").fillna(
            {"dimension": "", "dimension_value": ""}
        )
        return df[df.exp_id == exp_id] if exp_id is not None else df
Esempio n. 25
0
def _load_dangerous():
    """
    Load dangerous commands from csv file.
    """
    first_line = True
    dangerous_command = {}
    with open_text(project_data, "dangerous_commands.csv") as dangerous_file:
        csvreader = csv.reader(dangerous_file)
        for line in csvreader:
            if first_line:
                first_line = False
                continue
            command, reason = line
            dangerous_command[command] = reason
    return dangerous_command
Esempio n. 26
0
def open_resource_text(*path, **kwargs):  # Note: can't use encoding=None in python 2.7
    """Return a file-like object opened for text reading of the resource.

    If the resource does not already exist on its own on the file system,
    a temporary file will be created. If the file was created, it
    will be deleted upon exiting the context manager (no exception is
    raised if the directory was deleted prior to the context manager
    exiting).
    """
    if len(path) == 0:
        raise TypeError("must provide a path")
    file_name = path[-1]
    package = ".".join([RESOURCE_MODULE] + list(path[:-1]))
    encoding = kwargs.pop("encoding", "utf-8")
    return importlib_resources.open_text(package, file_name, encoding)
Esempio n. 27
0
def redirect_tween_factory(handler, registry, redirects=None):
    if redirects is None:
        # N.B. If we fail to load or parse the redirects file, the application
        # will fail to boot. This is deliberate: a missing/corrupt redirects
        # file should result in a healthcheck failure.

        with importlib_resources.open_text("h", "redirects") as handle:
            redirects = parse_redirects(handle)

    def redirect_tween(request):
        url = lookup_redirects(redirects, request)
        if url is not None:
            return httpexceptions.HTTPMovedPermanently(location=url)
        return handler(request)

    return redirect_tween
Esempio n. 28
0
def get_mnx_mapping(session: Session):
    """Return a mapping from MetaNetX prefixes to MIRIAM registries."""
    with open_text(equilibrator_assets.data, "prefix_mapping.tsv") as handle:
        mapping = {
            row.mnx_prefix: session.query(Registry).filter_by(
                namespace=row.identifiers_prefix).one_or_none()
            for row in pd.read_csv(handle, sep="\t", header=0).itertuples(
                index=False)
        }
    mapping["envipath"] = (session.query(Registry).filter_by(
        namespace="envipath").one_or_none())
    mapping["synonyms"] = (session.query(Registry).filter_by(
        namespace="synonyms").one_or_none())
    mapping["deprecated"] = (session.query(Registry).filter_by(
        namespace="metanetx.chemical").one_or_none())
    return mapping
Esempio n. 29
0
    def global_defaults() -> dict:
        """Set the global default configuration, before loading any other config."""
        defaults = Config.global_defaults()

        # Load default configuration
        with open_text('roberto', 'default_config.yaml') as f:
            defaults = merge_dicts(defaults, yaml.safe_load(f))

        # Git version and branch information
        try:
            git_describe = subprocess.run(['git', 'describe', '--tags'],
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.DEVNULL,
                                          check=True).stdout.decode('utf-8')
        except subprocess.CalledProcessError:
            # May fail, e.g. when there are no tags.
            git_describe = '0.0.0-0-notag'
        defaults['git'].update(parse_git_describe(git_describe))

        # First try to get a decent branch name
        branch = subprocess.run(["git", "rev-parse", "--abbrev-ref", "HEAD"],
                                stdout=subprocess.PIPE,
                                stderr=subprocess.DEVNULL,
                                check=True).stdout.decode('utf-8').strip()
        # If that failed, try to get the tag
        if branch == 'HEAD':
            try:
                branch = subprocess.run(
                    ["git", "describe", "--tags", "--exact-match"],
                    stdout=subprocess.PIPE,
                    stderr=subprocess.DEVNULL,
                    check=True).stdout.decode('utf-8').strip()
            except subprocess.CalledProcessError:
                # Final attempt, just the sha.
                try:
                    branch = subprocess.run(
                        ["git", "rev-parse", "HEAD"],
                        stdout=subprocess.PIPE,
                        stderr=subprocess.DEVNULL,
                        check=True).stdout.decode('utf-8').strip()
                except subprocess.CalledProcessError:
                    branch = '__nogit__'
        defaults['git']['branch'] = branch

        return defaults
Esempio n. 30
0
    def load(self, dtype_conversion=None):
        """
        Load the data table and corresponding validation schema.

        Parameters
        ----------
        dtype_conversion : dict
            Column names as keys and corresponding type for loading the data.
            Please take a look at the `pandas documentation
            <https://pandas.pydata.org/pandas-docs/stable/io.html#specifying-column-data-types>`__
            for detailed explanations.

        """
        self.data = read_tabular(self.filename, dtype_conversion)
        with open_text(memote.experimental.schemata,
                       self.SCHEMA,
                       encoding="utf-8") as file_handle:
            self.schema = json.load(file_handle)
Esempio n. 31
0
def modernize_hyphenation(xhtml: str) -> str:
    """
	Convert old-timey hyphenated compounds into single words based on the passed DICTIONARY.

	INPUTS
	xhtml: A string of XHTML to modernize

	OUTPUTS
	A string representing the XHTML with its hyphenation modernized
	"""

    # First, initialize our dictionary if we haven't already
    if not se.spelling.DICTIONARY:
        with importlib_resources.open_text("se.data", "words") as dictionary:
            se.spelling.DICTIONARY = {
                line.strip().lower()
                for line in dictionary
            }

    # Easy fix for a common case
    xhtml = regex.sub(r"\b([Nn])ow-a-days\b", r"\1owadays",
                      xhtml)  # now-a-days -> nowadays

    # The non-capturing group at the beginning tries to prevent
    # bad matches like stag's-horn -> stag'shorn or dog's-eared -> dog'seared
    result = regex.findall(r"(?<![’\'])\b[^\W\d_]+\-[^\W\d_]+\b", xhtml)

    for word in set(result):  # set() removes duplicates
        new_word = word.replace("-", "").lower()
        if new_word in se.spelling.DICTIONARY:
            # To preserve capitalization of the first word, we get the individual parts
            # then replace the original match with them joined together and titlecased.
            lhs = regex.sub(r"\-.+$", r"", word)
            rhs = regex.sub(r"^.+?\-", r"", word)
            xhtml = regex.sub(fr"{lhs}-{rhs}", lhs + rhs.lower(), xhtml)

    # Quick fix for a common error cases
    xhtml = xhtml.replace("z3998:nonfiction", "z3998:non-fiction")
    xhtml = regex.sub(r"\b([Mm])anat-arms", r"\1an-at-arms", xhtml)
    xhtml = regex.sub(r"\b([Tt])abled’hôte", r"\1able-d’hôte", xhtml)
    xhtml = regex.sub(r"\b([Pp])ita-pat", r"\1it-a-pat", xhtml)

    return xhtml
Esempio n. 32
0
 def load(cls, filename=None):
     """Load a test report configuration."""
     if filename is None:
         LOGGER.debug("Loading default configuration.")
         with open_text(templates, "test_config.yml",
                        encoding="utf-8") as file_handle:
             content = yaml.load(file_handle)
     else:
         LOGGER.debug("Loading custom configuration '%s'.", filename)
         try:
             with open(filename, encoding="utf-8") as file_handle:
                 content = yaml.load(file_handle)
         except IOError as err:
             LOGGER.error(
                 "Failed to load the custom configuration '%s'. Skipping.",
                 filename)
             LOGGER.debug(str(err))
             content = dict()
     return cls(content)
Esempio n. 33
0
def test_configuration_schema():
    """Validate the schema itself against its specification."""
    with open_text("memote.experimental.schemata", "configuration.json",
                   encoding="utf-8") as file_handle:
        schema = json.load(file_handle)
    Draft4Validator.check_schema(schema)  # Will raise an exception if invalid.
Esempio n. 34
0
from cobra.medium.boundary_types import find_boundary_types
from pylru import lrudecorator

import memote.utils as utils
import memote.support.data

LOGGER = logging.getLogger(__name__)

TRANSPORT_RXN_SBO_TERMS = ['SBO:0000185', 'SBO:0000588', 'SBO:0000587',
                           'SBO:0000655', 'SBO:0000654', 'SBO:0000660',
                           'SBO:0000659', 'SBO:0000657', 'SBO:0000658']


# Read the MetaNetX shortlist to identify specific metabolite IDs across
# different namespaces.
with open_text(memote.support.data, "met_id_shortlist.json",
               encoding="utf-8") as file_handle:
    METANETX_SHORTLIST = pd.read_json(file_handle)


# Provide a compartment shortlist to identify specific compartments whenever
# necessary.
COMPARTMENT_SHORTLIST = {
    'ce': ['cell envelope'],
    'c': ['cytoplasm', 'cytosol', 'default', 'in', 'intra cellular',
          'intracellular', 'intracellular region', 'intracellular space'],
    'er': ['endoplasmic reticulum'],
    'erm': ['endoplasmic reticulum membrane'],
    'e': ['extracellular', 'extraorganism', 'out', 'extracellular space',
          'extra organism', 'extra cellular', 'extra-organism'],
    'f': ['flagellum', 'bacterial-type flagellum'],
    'g': ['golgi', 'golgi apparatus'],