Exemple #1
0
    def run(self):
        # Get configurations and settings
        config = self._config
        subject_list = self._sub_dict
        cloudify = self._cloudify
        ns_at_once = config.get('num_subjects_at_once', 1)
        write_report = config.get('write_report', False)

        # Create output directory
        try:
            os.makedirs(config["output_directory"])
        except:
            if not op.isdir(config["output_directory"]):
                err = "[!] Output directory unable to be created.\n" \
                      "Path: %s\n\n" % config["output_directory"]
                raise Exception(err)
            else:
                pass

        # Create working directory
        try:
            os.makedirs(config["working_directory"])
        except:
            if not op.isdir(config["working_directory"]):
                err = "[!] Output directory unable to be created.\n" \
                      "Path: %s\n\n" % config["working_directory"]
                raise Exception(err)
            else:
                pass

        run_name = config['pipeline_config_yaml'].split("/")[-1].split(".")[0]

        results = None
        if not cloudify:
            results = self._run_here(run_name)
        else:
            results = self._run_cloud(run_name)

        # PDF reporting
        if write_report:
            from qap.viz.reports import workflow_report
            logger.info('Writing PDF reports')
            qap_type = 'qap_' + config['qap_type']
            in_csv = op.join(config['output_directory'], '%s.csv' % qap_type)

            reports = workflow_report(in_csv,
                                      qap_type,
                                      run_name,
                                      results,
                                      out_dir=config['output_directory'])

            for k, v in reports.iteritems():
                if v['success']:
                    logger.info('Written report (%s) in %s' % (k, v['path']))
    def run(self):
        # Get configurations and settings
        config = self._config
        subject_list = self._sub_dict
        cloudify = self._cloudify
        ns_at_once = config.get('num_subjects_at_once', 1)
        write_report = config.get('write_report', False)

        # Create output directory
        try:
            os.makedirs(config["output_directory"])
        except:
            if not op.isdir(config["output_directory"]):
                err = "[!] Output directory unable to be created.\n" \
                      "Path: %s\n\n" % config["output_directory"]
                raise Exception(err)
            else:
                pass

        # Create working directory
        try:
            os.makedirs(config["working_directory"])
        except:
            if not op.isdir(config["working_directory"]):
                err = "[!] Output directory unable to be created.\n" \
                      "Path: %s\n\n" % config["working_directory"]
                raise Exception(err)
            else:
                pass

        run_name = config['pipeline_config_yaml'].split("/")[-1].split(".")[0]

        results = None
        if not cloudify:
            results = self._run_here(run_name)
        else:
            results = self._run_cloud(run_name)

        # PDF reporting
        if write_report:
            from qap.viz.reports import workflow_report
            logger.info('Writing PDF reports')
            qap_type = 'qap_' + config['qap_type']
            in_csv = op.join(config['output_directory'], '%s.csv' % qap_type)

            reports = workflow_report(in_csv, qap_type, run_name, results,
                                      out_dir=config['output_directory'])

            for k, v in reports.iteritems():
                if v['success']:
                    logger.info('Written report (%s) in %s' % (k, v['path']))
def main():

    import os
    import argparse
    from qap.script_utils import gather_json_info, json_to_csv
    from qap.qap_utils import raise_smart_exception

    parser = argparse.ArgumentParser()
    parser.add_argument("output_dir", type=str,
                        help="the main output directory of the QAP run "
                        "which contains the participant directories")
    parser.add_argument("--with_group_reports", action='store_true',
                        default=False, help="Write a summary report in PDF "
                        "format.")
    parser.add_argument("--with_full_reports", action='store_true',
                        default=False, help="Write the summary report and "
                        "the individual participant reports as well.")

    args = parser.parse_args()

    json_dict = gather_json_info(args.output_dir)
    json_to_csv(json_dict)

    if args.with_group_reports or args.with_full_reports:
        from qap.viz.reports import workflow_report
        # TODO: read in combined results dictionary from log JSONs
        #logs_dict = gather_json_info("_".join([args.output_dir, "logs"]))

        qap_types = ["anatomical_spatial",
                     "functional_spatial",
                     "functional_temporal"]
        for qap_type in qap_types:
            qap_type = "_".join(["qap", qap_type])
            run_name = args.output_dir.split("/")[-1]
            in_csv = os.path.join(os.getcwd(), '%s.csv' % qap_type)
            if not os.path.isfile(in_csv):
                continue
            reports = workflow_report(in_csv, qap_type, run_name,
                                      out_dir=args.output_dir,
                                      full_reports=args.with_full_reports)
Exemple #4
0
    parser = argparse.ArgumentParser()
    req = parser.add_argument_group("Required Inputs")
    req.add_argument('-i',
                     '--input_csv',
                     type=str,
                     required=True,
                     help='filepath to csv file generated by qap')

    req.add_argument('-m',
                     '--qap_mode',
                     type=str,
                     help='report type',
                     choices=[
                         'qap_anatomical_spatial', 'qap_functional_temporal',
                         'qap_functional_spatial'
                     ],
                     required=True)

    args = parser.parse_args()

    in_csv = args.input_csv
    out_dir = op.dirname(in_csv)
    qap_type = args.qap_mode

    reports = workflow_report(in_csv, qap_type, run_name, out_dir=out_dir)

    for k, v in reports.iteritems():
        if v['success']:
            logger.info('Written report (%s) in %s' % (k, v['path']))
# @Last Modified by:   oesteban
# @Last Modified time: 2015-10-08 14:53:20

if __name__ == "__main__":
    import argparse
    import os.path as op
    from qap.viz.reports import workflow_report

    parser = argparse.ArgumentParser()
    req = parser.add_argument_group("Required Inputs")
    req.add_argument('-i', '--input_csv', type=str, required=True,
                     help='filepath to csv file generated by qap')

    req.add_argument(
        '-m', '--qap_mode', type=str, help='report type',
        choices=['qap_anatomical_spatial', 'qap_functional_temporal',
                 'qap_functional_spatial'], required=True)

    args = parser.parse_args()

    in_csv = args.input_csv
    out_dir = op.dirname(in_csv)
    qap_type = args.qap_mode

    reports = workflow_report(in_csv, qap_type, run_name,
                              out_dir=out_dir)

    for k, v in reports.iteritems():
        if v['success']:
            logger.info('Written report (%s) in %s' % (k, v['path']))