def dump(references: List[Dict], file: TextIO, mapping: Optional[Dict] = None): """Write an RIS file to file or file-like object. Entries are codified as dictionaries whose keys are the different tags. For single line and singly occurring tags, the content is codified as a string. In the case of multiline or multiple key occurrences, the content is returned as a list of strings. Args: references (List[Dict]): List of references. file (TextIO): File handle to store ris formatted data. mapping (Dict, optional): Custom RIS tags mapping. """ text = dumps(references, mapping) file.writelines(text)
async def cli_main( config: confuse.Configuration, *, input: TextIO, output: TextIO, include_unparsed: bool, ) -> None: async def _yield_input() -> AsyncIterable[git.Commit]: for line in input: item = json.loads(line) yield item stream = main(config, input=_yield_input(), include_unparsed=include_unparsed) async for item in stream: line = json.dumps(item, default=json_defaults) output.writelines([line, "\n"])
def _generate_template_to_writer(self, base: pathlib.Path, source: pathlib.Path, writer: TextIO, **extra_variables): try: template = self.env.get_template(str(source)) except jinja2.TemplateNotFound as e: raise GeneratorError( "Template {} not found (search path {})".format( source, self._format_search_path())) from e now = datetime.datetime.now(datetime.timezone.utc).astimezone() comment = ("Generated on {} from {} by {}".format( now.strftime(self.DATETIME_FORMAT), source, self._current_user)) relative_source = source.relative_to(base) stream = template.stream(**self.config, **extra_variables, comment=comment, source_base=base, source=relative_source, source_dir=relative_source.parent) writer.writelines(stream)
def print_config(verbose: bool = False, stream: TextIO = None) -> None: from pathlib import Path if stream is None: stream = sys.stdout stream.writelines([ f"ezdxf {__version__} from {Path(__file__).parent}\n", f"Python version: {sys.version}\n", f"using C-extensions: {YES_NO[options.use_c_ext]}\n", f"using Matplotlib: {YES_NO[options.use_matplotlib]}\n", ]) if verbose: stream.write("\nConfiguration:\n") options.write(stream) stream.write("\nEnvironment Variables:\n") for v in options.CONFIG_VARS: stream.write(f"{v}={os.environ.get(v, '')}\n") stream.write("\nLoaded Config Files:\n") for path in options.loaded_config_files: stream.write(str(path.absolute()) + "\n")
def try_to_write_header_to_output_file( self, project_or_project_and_group: str, output_file: TextIO, empty_config: bool = False, ): """ Writes a shared key for the "_write_to_file" method which actually dumps the configurations. """ if output_file: try: if empty_config: output_file.writelines( f"{project_or_project_and_group}: {{}}\n") else: output_file.writelines( f"{project_or_project_and_group}:\n") except Exception as e: logging.error( f"Error when trying to write to {output_file.name}: {e}") raise e
def _generate_template_to_writer(self, base: pathlib.Path, source: pathlib.Path, writer: TextIO, **extra_variables): try: template = self.env.get_template(str(source)) except jinja2.TemplateNotFound as e: raise GeneratorError("Template {} not found (search path {})" .format(source, self._format_search_path()) ) from e now = datetime.datetime.now(datetime.timezone.utc).astimezone() comment = ("Generated on {} from {} by {}" .format(now.strftime(self.DATETIME_FORMAT), source, self._current_user)) relative_source = source.relative_to(base) stream = template.stream(**self.config, **extra_variables, comment=comment, source_base=base, source=relative_source, source_dir=relative_source.parent) writer.writelines(stream)
def _write_table( module_name: str, module_path: str, supported_status: Dict[str, SupportedStatus], w_fd: TextIO, ) -> None: """ Write table by using Sphinx list-table directive. """ lines = [] lines.append("Supported ") if module_name: lines.append(module_name) else: lines.append("General Function") lines.append(" APIs\n") lines.append("-" * 100) lines.append("\n") lines.append(".. currentmodule:: %s" % module_path) if module_name: lines.append(".%s\n" % module_name) else: lines.append("\n") lines.append("\n") lines.append(".. list-table::\n") lines.append(" :header-rows: 1\n") lines.append("\n") lines.append(" * - API\n") lines.append(" - Implemented\n") lines.append(" - Missing parameters\n") for func_str, status in supported_status.items(): func_str = _escape_func_str(func_str) if status.implemented == Implemented.NOT_IMPLEMENTED.value: lines.append(" * - %s\n" % func_str) else: lines.append(" * - :func:`%s`\n" % func_str) lines.append(" - %s\n" % status.implemented) lines.append(" - \n") if not status.missing else lines.append( " - %s\n" % status.missing) w_fd.writelines(lines)
def _write_to_file( self, configuration_to_process, output_file: TextIO, ): """ Writes indented content of a dict under a key from "try_to_write_header_to_output_file" method. """ try: output_file.writelines(f" {self.__configuration_name}:\n", ) indented_configuration = textwrap.indent( yaml.dump( configuration_to_process, default_flow_style=False, ), " ", ) output_file.write(indented_configuration) except Exception as e: logging.error( f"Error when trying to write to {output_file.name}: {e}") raise e
def write_default_config(self, cfg_file: TextIO) -> None: """Write default configuration for all known tasks into file. Arguments: :cfg_file: Openned file-like object. """ # Write list of tasks cfg_file.write("""; --------------------------- ; Task executor configuration ; --------------------------- """) for option in self.config.options.values(): cfg_file.writelines(option.get_config()) cfg_file.write("\n") # seen_options = {} multi_used = {} # collect all options from tasks for task in self.tasks: for name in task.config.options: seen_options.setdefault(name, list()).append(task.name) # Options used by multiple tasks for name, tasks in seen_options.items(): if len(tasks) > 1: multi_used[name] = tasks cfg_file.write("; ------------------------------\n") cfg_file.write("; Options used by multiple tasks\n") cfg_file.write("; ------------------------------\n\n") for name, tasks in multi_used.items(): task = self.get_task(tasks[0]) option = task.config.get_option(name) lines = option.get_config() lines.insert( len(lines) - 1, "; Used by tasks: %s\n" % ', '.join(tasks)) lines.insert(len(lines) - 1, ";\n") cfg_file.writelines(lines) cfg_file.write("\n") # Options used by only one task for task in self.tasks: title = "Task '%s'" % task.name cfg_file.write("; %s\n" % ('-' * len(title))) cfg_file.write("; %s\n" % title) cfg_file.write("; %s\n\n" % ('-' * len(title))) has_options = False for name, option in task.config.options.items(): if name not in multi_used: has_options = True cfg_file.writelines(option.get_config()) cfg_file.write("\n") if not has_options: if task.config.options: cfg_file.write( "; This task has no private options (not shared with other tasks).\n" ) else: cfg_file.write( "; This task has no configuration options.\n") cfg_file.write("\n")
def dump( references: List[Dict], file: TextIO, *, implementation: Optional[BaseWriter] = None, **kw, ): """Write an RIS file to file or file-like object. Entries are codified as dictionaries whose keys are the different tags. For single line and singly occurring tags, the content is codified as a string. In the case of multiline or multiple key occurrences, the content is returned as a list of strings. Args: references (List[Dict]): List of references. file (TextIO): File handle to store ris formatted data. implementation (RisImplementation): RIS implementation; base by default. """ text = dumps(references, implementation=implementation, **kw) file.writelines(text)
def write(file: TextIO, fields: List[str]) -> Generator: """ the writer method for tab format """ if 'seqid' not in fields: raise ValueError("Tab format expects a 'seqid'") # write the heading file.write('\t'.join(fields) + '\n') while True: # receive a record try: record = yield except GeneratorExit: break # enforce name uniqueness unicifier = Unicifier() record['seqid'] = unicifier.unique(record['seqid']) # collect record fields in a list and join them with tabs file.writelines('\t'.join([record[field] for field in fields]) + '\n')
def dump( file: TextIO, scenario_paths: Iterable[LaxPath], plugins: Sequence[PluginName] = (), with_default_plugins: bool = True, ) -> None: """ Transforms the provided *scenario_paths* using the provided *plugins*, and writes the resulting locustfile code in the provided *file*. See also: :func:`dumps` :param file: an object with a `writelines` method (as specified by io.TextIOBase), e.g. `sys.stdout` or the result of `open`. :param scenario_paths: paths to scenario files (HAR) or directories. :param plugins: names of plugins to use. :param with_default_plugins: whether the default plugins should be used in addition to those provided (recommended: True). """ file.writelines( intersperse( "\n", _dump_as_lines(scenario_paths, plugins, with_default_plugins)))
def dump(obj: List[ButtonBatchLine], fp: TextIO): fp.writelines([x.line for x in obj])
def write_to(self, output: TextIO): if self._as_file_path is not None: with self._as_file_path.open() as f_cached: output.writelines(f_cached) else: self._writer.write(self._tmp_file_space, output)
def _write_lines(file: TextIO, lines): file.writelines((str(i) + "\n" for i in lines))
def sample_export(results, file_handle: TextIO): """ Renders Biosamples as a clinical_sample text file suitable for importing by cBioPortal. cBioPortal Sample fields specs: --------------------------------- Required: - PATIENT_ID - SAMPLE_ID Special columns: - For pan-cancer summary statistics tab: - CANCER_TYPE as an Oncotree code - CANCER_TYPE_DETAILED - SAMPLE_DISPLAY_NAME - SAMPLE_CLASS - METASTATIC_SITE / PRIMARY_SITE overrides the patients level attribute TUMOR_SITE - SAMPLE_TYPE, TUMOR_TISSUE_SITE, TUMOR_TYPE can have the following values (are displayed with a distinct color in the timelines): - "recurrence", "recurred", "progression" - "metastatic", "metastasis" - "primary" or any other value - KNOWN_MOLECULAR_CLASSIFIER - GLEASON_SCORE (prostate cancer) - HISTOLOGY - TUMOR_STAGE_2009 - TUMOR_GRADE - ETS_RAF_SPINK1_STATUS - TMPRSS2_ERG_FUSION_STATUS - ERG_FUSION_ACGH - SERUM_PSA - DRIVER_MUTATIONS """ samples = [] for sample in results: # sample.inidividual may be null: use Phenopacket model Subject field # instead if available or skip. subject_id = None if sample.individual is not None: subject_id = sample.individual elif sample.phenopacket_subject_id is not None: subject_id = sample.phenopacket_subject_id else: continue sample_obj = { 'individual_id': subject_id, 'id': sample.id } if sample.sampled_tissue: sample_obj['tissue_label'] = sample.sampled_tissue.get('label', '') samples.append(sample_obj) columns = samples[0].keys() headers = biosample_to_sample_header(columns) file_handle.writelines([line + '\n' for line in headers]) dict_writer = csv.DictWriter(file_handle, fieldnames=columns, delimiter='\t') dict_writer.writerows(samples)
def dump_to_file(stream: Generator[str, None, None], out: TextIO): out.writelines(stream)
def _write_atoms(self, file_out: TextIO) -> None: atom_header = get_resource("cif_templates/atom_header.txt").read_text() file_out.write(atom_header) file_out.writelines(self.atoms)
def _writelines_to(f: TextIO, data: Sequence[str]): f.writelines(data)
def write(self, source: StringSourceContents, output: TextIO) -> None: with source.as_lines as lines: output.writelines(self._transformer_function(lines))
def write_to(self, output: TextIO): with self.as_lines as lines: output.writelines(lines)
def write_hash_file(fp: TextIO, records: Iterable[HashFileRecord]) -> None: fp.writelines(f"{record.digest} {record.path}\n" for record in records)