write_hadoop_input(input_filename, timing_run_parameters,  n_steps, SEED=gen_seed)

    n_tasks = len(num_rows_list)*len(num_cols_list)*len(num_clusters_list)*len(num_splits_list)*5
    # Create a dummy table data file
    table_data=dict(T=[],M_c=[],X_L=[],X_D=[])
    fu.pickle(table_data, table_data_filename)

    if do_local:
        xu.run_script_local(input_filename, script_filename, output_filename, table_data_filename)
        print('Local Engine for automated timing runs has not been completely implemented/tested')
    elif do_remote:
        hadoop_engine = HE.HadoopEngine(which_engine_binary=which_engine_binary,
                output_path=output_path,
                input_filename=input_filename,
                table_data_filename=table_data_filename)
        xu.write_support_files(table_data, hadoop_engine.table_data_filename,
                              dict(command='time_analyze'), hadoop_engine.command_dict_filename)
        hadoop_engine.send_hadoop_command(n_tasks=n_tasks)
        was_successful = hadoop_engine.get_hadoop_results()
        if was_successful:
            hu.copy_hadoop_output(hadoop_engine.output_path, output_filename)
            parse_timing.parse_timing_to_csv(output_filename, outfile=parsed_out_file)
            coeff_list = find_regression_coeff(parsed_out_file, parameter_list)

        else:
            print('remote hadoop job NOT successful')
    else:
        # print what the command would be
        hadoop_engine = HE.HadoopEngine(which_engine_binary=which_engine_binary,
                output_path=output_path,
                input_filename=input_filename,
                table_data_filename=table_data_filename)
    fu.pickle(table_data, table_data_filename)

    if do_local:
        xu.run_script_local(input_filename, script_filename, output_filename,
                            table_data_filename)
        print(
            'Local Engine for automated timing runs has not been completely implemented/tested'
        )
    elif do_remote:
        hadoop_engine = HE.HadoopEngine(
            which_engine_binary=which_engine_binary,
            output_path=output_path,
            input_filename=input_filename,
            table_data_filename=table_data_filename)
        xu.write_support_files(table_data, hadoop_engine.table_data_filename,
                               dict(command='time_analyze'),
                               hadoop_engine.command_dict_filename)
        hadoop_engine.send_hadoop_command(n_tasks=n_tasks)
        was_successful = hadoop_engine.get_hadoop_results()
        if was_successful:
            hu.copy_hadoop_output(hadoop_engine.output_path, output_filename)
            parse_timing.parse_timing_to_csv(output_filename,
                                             outfile=parsed_out_file)
            coeff_list = find_regression_coeff(parsed_out_file, parameter_list)

        else:
            print('remote hadoop job NOT successful')
    else:
        # print what the command would be
        hadoop_engine = HE.HadoopEngine(
            which_engine_binary=which_engine_binary,