示例#1
0
文件: main.py 项目: pkulev/Fract
def parse_args():
    """Parse incoming args.

    :return argparse.Namespace: args"""

    parser = common.get_default_parser("PyGame fractal viewer")
    parser.add_argument(
        "-t", "--terminate", action="store_true",
        help="terminate window exactly after rendering.")

    return parser.parse_args()
示例#2
0
                           tag=tag,
                           show=show,
                           host=host)
        downloaded.append(obj)
    if not preserve_ccdb_structure:
        print("Printing")
        for i in downloaded:
            j = i.split("/")[-2]
            j = os.path.join(out_path, f"{j}.root")
            print(i, "->", j)
            os.rename(i, j)


if __name__ == "__main__":
    parser = get_default_parser(
        "Fetch data from CCDB"
        "Basic example: `./fetch_output.py qc/TOF/MO/TaskRaw/hDiagnostic`")
    parser.add_argument(
        'ccdb_path',
        metavar='path_to_object',
        type=str,
        help=
        'Path of the object in the CCDB repository. If a `.txt` file is passed the all the file input is downloaded'
    )
    parser.add_argument(
        '--timestamp',
        "-t",
        metavar='object_timestamp',
        type=str,
        default=["-1"],
        nargs="+",
示例#3
0
    nnet.cuda()

    criterion = nn.CrossEntropyLoss()
    optimizer = th.optim.Adam(nnet.parameters(), lr=args.learning_rate)

    train_dataset = THCHS30(root=args.data_dir, data_type='train', left_context=left_context,
            right_context=right_context, model_type='cnn')
    train_loader  = data.DataLoader(dataset=train_dataset, batch_size=args.min_batch,
                                    shuffle=True, num_workers=6)

    test_dataset = THCHS30(root=args.data_dir, data_type='test', left_context=left_context,
            right_context=right_context, model_type='cnn')
    test_loader  = data.DataLoader(dataset=test_dataset, batch_size=args.min_batch,
                                    shuffle=True, num_workers=6)

    cross_validate(-1, nnet, test_dataset, test_loader) 
    for epoch in range(args.num_epochs):
        common.train_one_epoch(nnet, criterion, optimizer, train_loader)
        cross_validate(epoch, nnet, test_dataset, test_loader) 
        th.save(nnet, common..join_path(args.checkout_dir, 'cnn.{}.pkl'.format(epoch + 1)))

if __name__ == '__main__':
    parser = argparse.ArgumentParser(
        description="""Trains a simple CNN acoustic model using CE loss function""",
        formatter_class=argparse.ArgumentDefaultsHelpFormatter,
        conflict_handler='resolve',
        parents=[common.get_default_parser()])
    args = parser.parse_args()
    print(args)
    train(args)
示例#4
0
            msg("Merging", len(files_per_type[i]), "files to", merged_file)
            run_cmd(
                f"hadd -j {njobs} -f {merged_file} `cat {merge_file_list}`",
                log_file=merge_file_list.replace(".txt", ".log"),
                time_it=True,
                comment=f"Merging to {merged_file}")
        if len(merged_files) == 0:
            warning_msg("Merged no files")
        else:
            msg("Merging completed, merged:",
                *merged_files,
                color=bcolors.BOKGREEN)


if __name__ == "__main__":
    parser = get_default_parser(description="Runner for O2 analyses")
    parser.add_argument(
        "modes",
        type=str,
        nargs="+",
        help="Running modes, as defined in the input configuration file")
    parser.add_argument(
        "--input",
        "-i",
        type=str,
        nargs="+",
        default=["listfiles.txt"],
        help=
        "Input file, can be in form of a list of AODs or a list of text files with the list of AODs"
    )
    parser.add_argument("--out_path",
示例#5
0
            bunched_aod_names[fn] = {
                "out_aod": out_aod,
                "file_index": i,
                "total_files": len(bunched_files),
                "input_size": bunched_sizes[i]
            }

    run_in_parallel(jobs,
                    run_merge,
                    list(bunched_aod_names.keys()),
                    job_message="Running AOD merging",
                    linearize_single_core=True)


if __name__ == "__main__":
    parser = get_default_parser(__doc__)
    parser.add_argument("input_files",
                        type=str,
                        nargs="+",
                        help="Input files to merge")
    parser.add_argument(
        "--max_bunch_size",
        "--max",
        "-m",
        default=1000,
        type=float,
        help="Approximate maximum size of the bunch to merge in MB")
    parser.add_argument("--output_path",
                        "-o",
                        default="./",
                        type=str,
示例#6
0
            run_cmd(f"mv {summaryfile} {output_path}")
            run_cmd(f"ln -s {os.path.join(output_path, summaryfile)} ./")

    if qa:
        msg(" --- running test analysis", color=bcolors.HEADER)
        run_cmd(
            f"./diagnostic_tools/doanalysis.py TrackQA RICH TOF -i {output_list_file} -M 25 -B 25"
        )
    if tof_mismatch == 1:  # TOF mismatch in create mode
        run_cmd(
            f"hadd -j {njobs} -f tofMM.root tof_mismatch_template_DF_*.root && rm tof_mismatch_template_DF_*.root"
        )


if __name__ == "__main__":
    parser = get_default_parser(description=__doc__)
    parser.add_argument(
        "configuration_file",
        type=str,
        help=
        "Input configuration file e.g. you can use the provided default_configfile.ini or variations of it."
    )
    parser.add_argument(
        "--entry",
        "-e",
        type=str,
        default="DEFAULT",
        help=
        "Entry in the configuration file, e.g. the INEL or CCBAR entries in the configuration file."
    )
    parser.add_argument(
示例#7
0
            copyfile(i)
    elif args.command == "copylist":
        for i in input_files:
            copylist(i, jobs=args.jobs)
    elif args.command == "copied":
        for i in input_files:
            print(copied(i))
    elif args.command == "merge_aod":
        for i in input_files:
            merge_aod(i, input_file=args.what)
    else:
        warning_msg("Did not do anything")


if __name__ == "__main__":
    parser = get_default_parser(description=__doc__, njobs=False)
    parser.add_argument(
        "input_files",
        type=str,  # nargs="+",
        help="List of files in .txt file or files to download")
    # parser.add_argument("--input_files", "--input", "-i", type=str,# nargs="+",
    #                     default=[],
    #                     help="List of files in .txt file or files to download")
    subparsers = parser.add_subparsers(dest='command', help='sub-commands')

    def add_subp(fn, g=None):
        if g is None:
            g = subparsers.add_parser(fn.__name__, help=fn.__doc__)
        a = inspect.getfullargspec(fn)
        for i, j in enumerate(a.args):
            d = a.defaults[i]