help=
        "If provided with a /path/to/save/file.npz, the script saves the results of the analysis to file."
    )

    args = parser.parse_args()

    # load spreadsheet / data frame
    datasets = load_dataframe(args.spreadsheet_file_path)

    # check contents of spreadsheet
    check_dataframe(datasets,
                    columns=[
                        'file_path_raw_signals',
                        'file_path_preprocessed_signals',
                        'file_path_manual_state_annotation',
                    ],
                    column_to_dtype={
                        'file_path_raw_signals': str,
                        'file_path_preprocessed_signals': str,
                        'file_path_manual_state_annotation': str,
                    })

    if args.only:
        datasets = datasets.loc[np.in1d(range(len(datasets)), args.only)]

    # --------------------------------------------------------------------------------
    print("Loading data sets...")

    signal_arrays = []
    state_vectors = []
    for ii, dataset in datasets.iterrows():
        "--model",
        help=
        "Use pre-trained model saved at /path/to/trained_model.pickle. If none is provided, the test is run in a hold-one-out fashion instead."
    )

    args = parser.parse_args()

    # load spreadsheet / data frame
    datasets = load_dataframe(args.spreadsheet_file_path)

    # check contents of spreadsheet
    check_dataframe(datasets,
                    columns=[
                        'file_path_preprocessed_signals',
                        'file_path_manual_state_annotation',
                    ],
                    column_to_dtype={
                        'file_path_preprocessed_signals': str,
                        'file_path_manual_state_annotation': str,
                    })

    if args.only:
        datasets = datasets.loc[np.in1d(range(len(datasets)), args.only)]

    # --------------------------------------------------------------------------------
    print("Loading data sets...")

    signal_arrays = []
    state_vectors = []
    for ii, dataset in datasets.iterrows():
        print("{} ({}/{})".format(dataset['file_path_preprocessed_signals'],
    )
    args = parser.parse_args()

    # load spreadsheet / data frame
    datasets = load_dataframe(args.spreadsheet_file_path)

    # check contents of spreadsheet
    check_dataframe(
        datasets,
        columns=[
            'file_path_raw_signals',
            'sampling_frequency_in_hz',
            'file_path_{}_state_annotation'.format(args.annotation_type_a),
            'file_path_{}_state_annotation'.format(args.annotation_type_b),
        ] + state_annotation_signals,
        column_to_dtype={
            'file_path_raw_signals':
            str,
            'sampling_frequency_in_hz':
            (int, float, np.int, np.float, np.int64, np.float64),
            'file_path_{}_state_annotation'.format(args.annotation_type_a):
            str,
            'file_path_{}_state_annotation'.format(args.annotation_type_b):
            str,
        })

    if args.only:
        datasets = datasets.loc[np.in1d(range(len(datasets)), args.only)]

    # turn off interactive mode if on
    plt.ioff()
                        type  = int,
                        help  = 'Indices corresponding to the rows to use (default: all). Indexing starts at zero.'
    )

    args = parser.parse_args()

    # load spreadsheet / data frame
    datasets = load_dataframe(args.spreadsheet_file_path)

    # check contents of spreadsheet
    check_dataframe(datasets,
                    columns = [
                        'file_path_preprocessed_signals',
                        'file_path_automated_state_annotation',
                        'file_path_review_intervals',
                    ],
                    column_to_dtype = {
                        'file_path_preprocessed_signals' : str,
                        'file_path_automated_state_annotation' : str,
                        'file_path_review_intervals' : str,
                    }
    )

    if args.only:
        datasets = datasets.loc[np.in1d(range(len(datasets)), args.only)]

    annotator = StateAnnotator()
    annotator.load(args.trained_model_file_path)

    for ii, dataset in datasets.iterrows():

        print("{} ({}/{})".format(dataset['file_path_preprocessed_signals'], ii+1, len(datasets)))
        nargs='+',
        type=int,
        help=
        'Indices corresponding to the rows to use (default: all). Indexing starts at zero.'
    )
    args = parser.parse_args()

    # load spreadsheet / data frame
    datasets = load_dataframe(args.spreadsheet_file_path)

    # check contents of spreadsheet
    check_dataframe(datasets,
                    columns=[
                        'file_path_sleepsign_state_annotation',
                        'file_path_manual_state_annotation',
                    ],
                    column_to_dtype={
                        'file_path_sleepsign_state_annotation': str,
                        'file_path_manual_state_annotation': str,
                    })

    if args.only:
        datasets = datasets.loc[np.in1d(range(len(datasets)), args.only)]

    for ii, dataset in datasets.iterrows():
        print("{} ({}/{})".format(
            dataset['file_path_sleepsign_state_annotation'], ii + 1,
            len(datasets)))
        old_file_path = dataset['file_path_sleepsign_state_annotation']
        new_file_path = dataset['file_path_manual_state_annotation']
        convert_sleepsign_hypnogram(old_file_path, new_file_path)
    parser.add_argument('--only',
                        nargs = '+',
                        type  = int,
                        help  = 'Indices corresponding to the rows to use (default: all). Indexing starts at zero.'
    )
    args = parser.parse_args()

    # load spreadsheet / data frame
    datasets = load_dataframe(args.spreadsheet_file_path)

    # check contents of spreadsheet
    check_dataframe(datasets,
                    columns = [
                        'file_path_{}_state_annotation'.format(args.annotation_type),
                        'file_path_{}_state_annotation_mat'.format(args.annotation_type)
                    ],
                    column_to_dtype = {
                        'file_path_{}_state_annotation'.format(args.annotation_type) : str,
                        'file_path_{}_state_annotation_mat'.format(args.annotation_type) : str,
                    }
    )

    if args.only:
        datasets = datasets.loc[np.in1d(range(len(datasets)), args.only)]

    for ii, dataset in datasets.iterrows():
        print("{} ({}/{})".format(dataset['file_path_{}_state_annotation'.format(args.annotation_type)], ii+1, len(datasets)))
        old_file_path = dataset['file_path_{}_state_annotation'.format(args.annotation_type)]
        new_file_path = dataset['file_path_{}_state_annotation_mat'.format(args.annotation_type)]
        convert_hypnogram_to_mat(old_file_path, new_file_path, mapping=state_to_int, time_resolution=1)