parser.add_argument('--stderr', dest='stderr', help='Job stderr')
parser.add_argument('--tool_id', dest='tool_id', help='Tool that was executed to produce the input dataset')
parser.add_argument('--tool_parameters', dest='tool_parameters', help='Tool parameters that were set when producing the input dataset')
parser.add_argument('--workflow_step_id', dest='workflow_step_id', default=None, help='Workflow step id')
parser.add_argument('--user_email', dest='user_email', help='Current user email')
args = parser.parse_args()

payload = None
statistics = []
datasets = []
# Generate the statistics and datasets.
if args.inputs is None:
    payload = stats_util.get_base_json_dict(args.config_file, 'unknown', args.history_id, args.history_name, args.stats_tool_id, args.stderr, args.tool_id, args.tool_parameters, args.user_email, args.workflow_step_id)
else:
    for input in args.inputs:
        file_path, hid, input_id, input_datatype, dbkey = input
        if payload is None:
            # Initialize the payload.
            payload = stats_util.get_base_json_dict(args.config_file, dbkey, args.history_id, args.history_name, args.stats_tool_id, args.stderr, args.tool_id, args.tool_parameters, args.user_email, args.workflow_step_id)
        statistics.append(stats_util.get_statistics(file_path, STATS))
        datasets.append(stats_util.get_datasets(args.config_file, input_id, input_datatype))
payload['statistics'] = statistics
payload['datasets'] = datasets
# Send the payload to PEGR.
pegr_url = stats_util.get_pegr_url(args.config_file)
response = stats_util.submit(args.config_file, payload)
# Make sure all is well.
stats_util.check_response(pegr_url, payload, response)
# If all is well, store the results in the output.
stats_util.store_results(args.output, pegr_url, payload, response)
args = parser.parse_args()

payload = None
statistics = []
datasets = []
# Generate the statistics and datasets.
if args.input_gffs is None:
    payload = stats_util.get_base_json_dict(args.config_file, 'unknown', args.history_id, args.history_name, args.stats_tool_id, args.stderr, args.tool_id, args.tool_parameters, args.user_email, args.workflow_step_id)
else:
    for input in args.input_gffs:
        file_path, hid, input_id, input_datatype, dbkey = input
        if payload is None:
            # Initialize the payload.
            payload = stats_util.get_base_json_dict(args.config_file, dbkey, args.history_id, args.history_name, args.stats_tool_id, args.stderr, args.tool_id, args.tool_parameters, args.user_email, args.workflow_step_id)
        statistics.append({})
        datasets.append(stats_util.get_datasets(args.config_file, input_id, input_datatype))

input_xmls = args.input_xmls or []
for input in input_xmls:
    file_path, hid, input_id, input_datatype, dbkey = input
    statistics.append({})
    datasets.append(stats_util.get_datasets(args.config_file, input_id, input_datatype))

payload['statistics'] = statistics
payload['datasets'] = datasets
# Send the payload to PEGR.
pegr_url = stats_util.get_pegr_url(args.config_file)
response = stats_util.submit(args.config_file, payload)
# Make sure all is well.
stats_util.check_response(pegr_url, payload, response)
# If all is well, store the results in the output.
                    help='Workflow step id')
parser.add_argument('--user_email',
                    dest='user_email',
                    help='Current user email')
args = parser.parse_args()

# Initialize the payload.
payload = stats_util.get_base_json_dict(args.config_file, args.dbkey,
                                        args.history_id, args.history_name,
                                        args.stats_tool_id, args.stderr,
                                        args.tool_id, args.tool_parameters,
                                        args.user_email, args.workflow_step_id)
# Generate the statistics and datasets.
payload['statistics'] = [
    stats_util.get_statistics(args.input,
                              STATS,
                              dbkey=args.dbkey,
                              chrom_lengths_file=args.chrom_len_file)
]
payload['datasets'] = [
    stats_util.get_datasets(args.config_file, args.input_id,
                            args.input_datatype)
]
# Send the payload to PEGR.
pegr_url = stats_util.get_pegr_url(args.config_file)
response = stats_util.submit(args.config_file, payload)
# Make sure all is well.
stats_util.check_response(pegr_url, payload, response)
# If all is well, store the results in the output.
stats_util.store_results(args.output, pegr_url, payload, response)
Exemplo n.º 4
0
parser.add_argument('--user_email',
                    dest='user_email',
                    help='Current user email')
args = parser.parse_args()

# Initialize the payload.
payload = stats_util.get_base_json_dict(args.config_file, args.dbkey,
                                        args.history_id, args.history_name,
                                        args.stats_tool_id, args.stderr,
                                        args.tool_id, args.tool_parameters,
                                        args.user_email, args.workflow_step_id)
# Each statistics dictionary maps to a dataset in the corresponding list.
statistics = []
# The png dataset has no statistics.
statistics.append({})
# Generate the statistics for the tabular dataset.
statistics.append(stats_util.get_statistics(args.input_tabular, STATS))
payload['statistics'] = statistics
d1 = stats_util.get_datasets(args.config_file, args.input_png_id,
                             args.input_png_datatype)
d2 = stats_util.get_datasets(args.config_file, args.input_tabular_id,
                             args.input_tabular_datatype)
payload['datasets'] = [d1, d2]
# Send the payload to PEGR.
pegr_url = stats_util.get_pegr_url(args.config_file)
response = stats_util.submit(args.config_file, payload)
# Make sure all is well.
stats_util.check_response(pegr_url, payload, response)
# If all is well, store the results in the output.
stats_util.store_results(args.output, pegr_url, payload, response)
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', dest='config_file', help='stats_config.ini')
parser.add_argument('--dbkey', dest='dbkey', help='Input dbkey')
parser.add_argument('--history_id', dest='history_id', help='History id')
parser.add_argument('--history_name', dest='history_name', help='History name')
parser.add_argument('--input', dest='input', help='Input dataset')
parser.add_argument('--input_datatype', dest='input_datatype', help='Input dataset datatype')
parser.add_argument('--input_id', dest='input_id', help='Encoded input dataset id')
parser.add_argument('--output', dest='output', help='Output dataset')
parser.add_argument('--stats_tool_id', dest='stats_tool_id', help='The caller of this script')
parser.add_argument('--stderr', dest='stderr', help='Job stderr')
parser.add_argument('--tool_id', dest='tool_id', help='Tool that was executed to produce the input dataset')
parser.add_argument('--tool_parameters', dest='tool_parameters', help='Tool parameters that were set when producing the input dataset')
parser.add_argument('--workflow_step_id', dest='workflow_step_id', default=None, help='Workflow step id')
parser.add_argument('--user_email', dest='user_email', help='Current user email')
args = parser.parse_args()

# Initialize the payload.
payload = stats_util.get_base_json_dict(args.config_file, args.dbkey, args.history_id, args.history_name, args.stats_tool_id, args.stderr, args.tool_id, args.tool_parameters, args.user_email, args.workflow_step_id)
# Generate the statistics and datasets.
payload['statistics'] = [stats_util.get_statistics(args.input, STATS)]
payload['datasets'] = [stats_util.get_datasets(args.config_file, args.input_id, args.input_datatype)]
# Send the payload to PEGR.
pegr_url = stats_util.get_pegr_url(args.config_file)
response = stats_util.submit(args.config_file, payload)
# Make sure all is well.
stats_util.check_response(pegr_url, payload, response)
# If all is well, store the results in the output.
stats_util.store_results(args.output, pegr_url, payload, response)
Exemplo n.º 6
0
# Initialize the payload.
payload = stats_util.get_base_json_dict(args.config_file, args.dbkey,
                                        args.history_id, args.history_name,
                                        args.stats_tool_id, args.stderr,
                                        args.tool_id, args.tool_parameters,
                                        args.user_email, args.workflow_step_id)
statistics = []
datasets = []

# Generate statistics for heatmap dataset collection.
input_heatmaps = args.input_heatmaps or []
for input in input_heatmaps:
    file_path, hid, input_id, input_datatype, dbkey = input
    statistics.append({})
    datasets.append(
        stats_util.get_datasets(args.config_file, input_id, input_datatype))
# Generate statistics for tabular dataset.
statistics.append({})
datasets.append(
    stats_util.get_datasets(args.config_file, args.input_tabular_id,
                            args.input_tabular_datatype))
payload['statistics'] = statistics
payload['datasets'] = datasets

# Send the payload to PEGR.
pegr_url = stats_util.get_pegr_url(args.config_file)
response = stats_util.submit(args.config_file, payload)
# Make sure all is well.
stats_util.check_response(pegr_url, payload, response)
# If all is well, store the results in the output.
stats_util.store_results(args.output, pegr_url, payload, response)
Exemplo n.º 7
0
parser = argparse.ArgumentParser()
parser.add_argument('--config_file', dest='config_file', help='stats_config.ini')
parser.add_argument('--dbkey', dest='dbkey', help='Input dbkey')
parser.add_argument('--history_id', dest='history_id', help='History name')
parser.add_argument('--history_name', dest='history_name', help='History name')
parser.add_argument('--input', dest='input', help='Input dataset')
parser.add_argument('--input_datatype', dest='input_datatype', help='Input dataset datatype')
parser.add_argument('--input_id', dest='input_id', help='Encoded input dataset id')
parser.add_argument('--output', dest='output', help='Output dataset')
parser.add_argument('--stats_tool_id', dest='stats_tool_id', help='The caller of this script')
parser.add_argument('--stderr', dest='stderr', help='Job stderr')
parser.add_argument('--tool_id', dest='tool_id', help='Tool that was executed to produce the input dataset')
parser.add_argument('--tool_parameters', dest='tool_parameters', help='Tool parameters that were set when producing the input dataset')
parser.add_argument('--workflow_step_id', dest='workflow_step_id', default=None, help='Workflow step id')
parser.add_argument('--user_email', dest='user_email', help='Current user email')
args = parser.parse_args()

# Initialize the payload.
payload = stats_util.get_base_json_dict(args.config_file, args.dbkey, args.history_id, args.history_name, args.stats_tool_id, args.stderr, args.tool_id, args.tool_parameters, args.user_email, args.workflow_step_id)
# Generate the statistics and datasets.
payload['statistics'] = [{}]
payload['datasets'] = [stats_util.get_datasets(args.config_file, args.input_id, args.input_datatype)]
# Send the payload to PEGR.
pegr_url = stats_util.get_pegr_url(args.config_file)
response = stats_util.submit(args.config_file, payload)
# Make sure all is well.
stats_util.check_response(pegr_url, payload, response)
# If all is well, store the results in the output.
stats_util.store_results(args.output, pegr_url, payload, response)
parser.add_argument('--input_tabular_datatype', dest='input_tabular_datatype', help='Input dataset datatype')
parser.add_argument('--input_tabular_id', dest='input_tabular_id', help='Encoded input_tabular dataset id')
parser.add_argument('--output', dest='output', help='Output dataset')
parser.add_argument('--stats_tool_id', dest='stats_tool_id', help='The caller of this script')
parser.add_argument('--stderr', dest='stderr', help='Job stderr')
parser.add_argument('--tool_id', dest='tool_id', help='Tool that was executed to produce the input dataset')
parser.add_argument('--tool_parameters', dest='tool_parameters', help='Tool parameters that were set when producing the input dataset')
parser.add_argument('--workflow_step_id', dest='workflow_step_id', default=None, help='Workflow step id')
parser.add_argument('--user_email', dest='user_email', help='Current user email')
args = parser.parse_args()

# Initialize the payload.
payload = stats_util.get_base_json_dict(args.config_file, args.dbkey, args.history_id, args.history_name, args.stats_tool_id, args.stderr, args.tool_id, args.tool_parameters, args.user_email, args.workflow_step_id)
# Each statistics dictionary maps to a dataset in the corresponding list.
statistics = []
# The png dataset has no statistics.
statistics.append({})
# Generate the statistics for the tabular dataset.
statistics.append(stats_util.get_statistics(args.input_tabular, STATS))
payload['statistics'] = statistics
d1 = stats_util.get_datasets(args.config_file, args.input_png_id, args.input_png_datatype)
d2 = stats_util.get_datasets(args.config_file, args.input_tabular_id, args.input_tabular_datatype)
payload['datasets'] = [d1, d2]
# Send the payload to PEGR.
pegr_url = stats_util.get_pegr_url(args.config_file)
response = stats_util.submit(args.config_file, payload)
# Make sure all is well.
stats_util.check_response(pegr_url, payload, response)
# If all is well, store the results in the output.
stats_util.store_results(args.output, pegr_url, payload, response)
                    dest='user_email',
                    help='Current user email')
args = parser.parse_args()

# Initialize the payload.
payload = stats_util.get_base_json_dict(args.config_file, args.dbkey,
                                        args.history_id, args.history_name,
                                        args.stats_tool_id, args.stderr,
                                        args.tool_id, args.tool_parameters,
                                        args.user_email, args.workflow_step_id)
# Each statistics dictionary maps to a dataset in the corresponding list.
statistics = []
# The HTML dataset has no statistics.
statistics.append({})
# Generate the statistics for the txt dataset.
statistics.append(stats_util.get_statistics(args.input_txt, STATS))
payload['statistics'] = statistics
# Generate the list of datasets.
d1 = stats_util.get_datasets(args.config_file, args.input_html_id,
                             args.input_html_datatype)
d2 = stats_util.get_datasets(args.config_file, args.input_txt_id,
                             args.input_txt_datatype)
payload['datasets'] = [d1, d2]
# Send the payload to PEGR.
pegr_url = stats_util.get_pegr_url(args.config_file)
response = stats_util.submit(args.config_file, payload)
# Make sure all is well.
stats_util.check_response(pegr_url, payload, response)
# If all is well, store the results in the output.
stats_util.store_results(args.output, pegr_url, payload, response)
parser.add_argument('--input_txt_id', dest='input_txt_id', help='Encoded input_txt dataset id')
parser.add_argument('--output', dest='output', help='Output dataset')
parser.add_argument('--stats_tool_id', dest='stats_tool_id', help='The caller of this script')
parser.add_argument('--stderr', dest='stderr', help='Job stderr')
parser.add_argument('--tool_id', dest='tool_id', help='Tool that was executed to produce the input dataset')
parser.add_argument('--tool_parameters', dest='tool_parameters', help='Tool parameters that were set when producing the input dataset')
parser.add_argument('--workflow_step_id', dest='workflow_step_id', default=None, help='Workflow step id')
parser.add_argument('--user_email', dest='user_email', help='Current user email')
args = parser.parse_args()

# Initialize the payload.
payload = stats_util.get_base_json_dict(args.config_file, args.dbkey, args.history_id, args.history_name, args.stats_tool_id, args.stderr, args.tool_id, args.tool_parameters, args.user_email, args.workflow_step_id)
# Each statistics dictionary maps to a dataset in the corresponding list.
statistics = []
# The HTML dataset has no statistics.
statistics.append({})
# Generate the statistics for the txt dataset.
statistics.append(stats_util.get_statistics(args.input_txt, STATS))
payload['statistics'] = statistics
# Generate the list of datasets.
d1 = stats_util.get_datasets(args.config_file, args.input_html_id, args.input_html_datatype)
d2 = stats_util.get_datasets(args.config_file, args.input_txt_id, args.input_txt_datatype)
payload['datasets'] = [d1, d2]
# Send the payload to PEGR.
pegr_url = stats_util.get_pegr_url(args.config_file)
response = stats_util.submit(args.config_file, payload)
# Make sure all is well.
stats_util.check_response(pegr_url, payload, response)
# If all is well, store the results in the output.
stats_util.store_results(args.output, pegr_url, payload, response)