Exemple #1
0
    # alphabetic order
    publications = np.union1d(publications[1], publications[0]).tolist()

    html = html % (len(publications), gen_date)

    # sort by year of publication
    years = list()
    for pub in publications:
        m = re.search('\d{4} -', pub)
        if m is None:
            years.append(-1)
        else:
            years.append(int(m.group(0)[:-2]))
    order = np.argsort(years)[::-1]
    publications = [publications[idx] for idx in order]

    # filter out publications not containing (http://, https://, ftp://)
    publications = [
        p for p in publications
        if any(sub in p for sub in ('http://', 'https://', 'ftp://'))
    ]

    # create rst & cleanup
    this_html = cite_template.substitute(publications=publications)
    this_html = this_html.replace('…', '...')
    html += this_html

    # output an rst file
    with open(op.join('..', 'cited.rst'), 'w') as f:
        f.write(html.encode('utf8'))
Exemple #2
0
    # get a union of the citations for the two papers, sorted in
    # alphabetic order
    publications = np.union1d(publications[1], publications[0]).tolist()

    html = html % (len(publications), gen_date)

    # sort by year of publication
    years = list()
    for pub in publications:
        m = re.search(r'\d{4} -', pub)
        if m is None:
            years.append(-1)
        else:
            years.append(int(m.group(0)[:-2]))
    order = np.argsort(years)[::-1]
    publications = [publications[idx] for idx in order]

    # filter out publications not containing (http://, https://, ftp://)
    publications = [p for p in publications if
                    any(sub in p for sub in ('http://', 'https://', 'ftp://'))]

    # create rst & cleanup
    this_html = cite_template.substitute(publications=publications)
    this_html = this_html.replace('…', '...')
    html += this_html

    # output an rst file
    with open(op.join('..', 'cited.rst'), 'w') as f:
        f.write(html.encode('utf8'))
Exemple #3
0
def make_report(root, session=None, verbose=True):
    """Create a methods paragraph string from BIDS dataset.

    Summarizes the REQUIRED components in the BIDS specification
    and also some RECOMMENDED components. Currently, the methods
    paragraph summarize the:

      - dataset_description.json file
      - (optional) participants.tsv file
      - (optional) datatype-agnostic files for (M/I)EEG data,
        which reads files from the ``*_scans.tsv`` file.

    Parameters
    ----------
    root : str | pathlib.Path
        The path of the root of the BIDS compatible folder.
    session : str | None
            The (optional) session for a item. Corresponds to "ses".
    verbose : bool | None
        Set verbose output to true or false.

    Returns
    -------
    paragraph : str
        The paragraph wrapped with 80 characters per line
        describing the summary of the subjects.
    """
    # high level summary
    subjects = get_entity_vals(root, entity_key='subject')
    sessions = get_entity_vals(root, entity_key='session')
    modalities = get_datatypes(root)

    # only summarize allowed modalities (MEG/EEG/iEEG) data
    # map them to a pretty looking string
    datatype_map = {
        'meg': 'MEG',
        'eeg': 'EEG',
        'ieeg': 'iEEG',
    }
    modalities = [
        datatype_map[datatype] for datatype in modalities
        if datatype in datatype_map.keys()
    ]

    # REQUIRED: dataset_description.json summary
    dataset_summary = _summarize_dataset(root)

    # RECOMMENDED: participants summary
    participant_summary = _summarize_participants_tsv(root)

    # RECOMMENDED: scans summary
    scans_summary = _summarize_scans(root, session=session, verbose=verbose)

    # turn off 'recommended' report summary
    # if files are not available to summarize
    if not participant_summary:
        participant_template = ''
    else:
        content = f'{FUNCTION_TEMPLATE}{PARTICIPANTS_TEMPLATE}'
        participant_template = Template(content=content)
        participant_template = participant_template.substitute(
            **participant_summary)
        if verbose:
            print(f'The participant template found: {participant_template}')

    dataset_summary['PARTICIPANTS_TEMPLATE'] = str(participant_template)

    if not scans_summary:
        datatype_agnostic_template = ''
    else:
        datatype_agnostic_template = DATATYPE_AGNOSTIC_TEMPLATE

    dataset_summary.update({
        'system': modalities,
        'n_subjects': len(subjects),
        'n_sessions': len(sessions),
        'sessions': sessions,
    })

    # XXX: add channel summary for modalities (ieeg, meg, eeg)
    # create the content and mne Template
    # lower-case templates are "Recommended",
    # while upper-case templates are "Required".
    content = f'{FUNCTION_TEMPLATE}{BIDS_DATASET_TEMPLATE}' \
              f'{datatype_agnostic_template}'

    paragraph = Template(content=content)
    paragraph = paragraph.substitute(**dataset_summary, **scans_summary)

    # Clean paragraph
    paragraph = paragraph.replace('\n', ' ')
    paragraph = paragraph.replace('  ', ' ')

    return '\n'.join(textwrap.wrap(paragraph, width=80))
Exemple #4
0
"""
===========================
Plotting the analysis chain
===========================

Run the analysis.
"""
import codecs
import os.path as op

from mne.externals.tempita import Template

with codecs.open(op.join(op.dirname(__file__), 'template_analysis.py'), 'r',
                 'utf-8') as f:
    lines = f.readlines()
template = Template(u''.join(lines))
for subject in range(1, 20):
    py_str = template.substitute(subject_id='%02d' % subject)
    out_fname = op.join(op.dirname(__file__), '..', '..', 'results',
                        'single_subject_analysis',
                        'plot_analysis_%02d.py' % subject)
    with codecs.open(out_fname, 'w', 'utf-8') as f:
        f.write(py_str)