Slengths)
    num_plots = len(spec_templates)
    num_rows = 2
    num_cols = num_plots/num_rows+1
    for i in xrange(len(spec_templates)):
        plt.subplot(num_cols,num_rows,i+1)
        plt.imshow(spec_templates[i].T[::-1],interpolation='nearest')
    plt.savefig('aar1_spec_templates_%d.png' % num_mix)
    np.savez('aar1_templates_%d.npz' % num_mix, templates)
    np.save('aar1_affinities_%d.npy' % num_mix, bem.affinities)


test_path = '/home/mark/Template-Speech-Recognition/Data/Test/'
test_file_indices = gtrd.get_data_files_indices(test_path)

test_example_lengths = gtrd.get_detect_lengths(test_file_indices,test_path)
np.save("data/test_example_lengths.npy",test_example_lengths)
np.save("data/test_file_indices.npy",test_file_indices)

import collections
FOMS = collections.defaultdict(list)

bgd  = np.clip(np.load("data/aar_bgd_mel.npy"),.1,.4)
test_example_lengths =np.load("data/test_example_lengths.npy")
test_path = "/home/mark/Template-Speech-Recognition/Data/Test/"
test_file_indices =np.load("data/test_file_indices.npy")

for num_mix in num_mix_params:
    templates = tuple(np.clip(T,.01,.99) for T in (np.load('aar1_templates_%d.npz' % num_mix))['arr_0'])
    detection_array = np.zeros((test_example_lengths.shape[0],
                            test_example_lengths.max() + 2),dtype=np.float32)
def get_params(
    args,
    sample_rate=16000,
    num_window_samples=320,
    num_window_step_samples=80,
    fft_length=512,
    kernel_length=7,
    freq_cutoff=3000,
    use_mel=False,
):
    # we get the basic file paths right here
    # TODO: make this system adaptive
    root_path = "/home/mark/Template-Speech-Recognition/"
    utterances_path = "/home/mark/Template-Speech-Recognition/Data/Train/"
    try:
        file_indices = np.load("data_parts/train_file_indices.npy")
    except:
        file_indices = gtrd.get_data_files_indices(utterances_path)
        np.save("data_parts/train_file_indices.npy", file_indices)

    num_mix_params = [1, 2, 3, 5, 7, 9]

    test_path = "/home/mark/Template-Speech-Recognition/Data/Test/"
    train_path = "/home/mark/Template-Speech-Recognition/Data/Train/"

    try:
        test_example_lengths = np.load("data_parts/test_example_lengths.npy")
        test_file_indices = np.load("data_parts/test_file_indices.npy")
    except:
        test_file_indices = gtrd.get_data_files_indices(test_path)
        test_example_lengths = gtrd.get_detect_lengths(test_file_indices, test_path)
        np.save("data_parts/test_example_lengths.npy", test_example_lengths)
        np.save("data_parts/test_file_indices.npy", test_file_indices)

    try:
        train_example_lengths = np.load("data_parts/train_example_lengths.npy")
        train_file_indices = np.load("data_parts/train_file_indices.npy")
    except:
        train_file_indices = gtrd.get_data_files_indices(train_path)
        train_example_lengths = gtrd.get_detect_lengths(train_file_indices, train_path)
        np.save("data_parts/train_example_lengths.npy", train_example_lengths)
        np.save("data_parts/train_file_indices.npy", train_file_indices)

    return (
        gtrd.SpectrogramParameters(
            sample_rate=16000,
            num_window_samples=320,
            num_window_step_samples=80,
            fft_length=512,
            kernel_length=7,
            freq_cutoff=3000,
            use_mel=args.use_mel,
        ),
        gtrd.makeEdgemapParameters(
            block_length=args.edgeMapBlockLength,
            spread_length=args.edgeMapSpreadLength,
            threshold=args.edgeMapThreshold,
        ),
        root_path,
        utterances_path,
        file_indices,
        num_mix_params,
        test_path,
        train_path,
        train_example_lengths,
        train_file_indices,
        test_example_lengths,
        test_file_indices,
    )
                                                                                       file_indices,
                                                                                       syllable,
                                                                                       
                                                                                       num_examples=-1,
                                                                                       verbose=True)


clipped_bgd = np.clip(avg_bgd.E,.1,.4)
np.save(tmp_data_path+'clipped_bgd_102012.npy',clipped_bgd)

padded_examples, lengths = et.extend_examples_to_max(clipped_bgd,syllable_examples,
                           return_lengths=True)

aar_template, aar_registered = et.register_templates_time_zero(syllable_examples,min_prob=.01)

test_example_lengths = gtrd.get_detect_lengths(data_path+'Test/')

np.save(tmp_data_path+'test_example_lengths_102012.npy',test_example_lengths)


detection_array = np.zeros((test_example_lengths.shape[0],
                            test_example_lengths.max() + 2),dtype=np.float32)

linear_filter,c = et.construct_linear_filter(aar_template,
                                             clipped_bgd)
# need to state the syllable we are working with
syllable = np.array(['aa','r'])


detection_array,example_start_end_times, detection_lengths = gtrd.get_detection_scores(data_path+'Test/',
                                                                                         detection_array,
                                                           Slengths)
    num_plots = len(spec_templates)
    num_rows = 2
    num_cols = num_plots/num_rows+1
    for i in xrange(len(spec_templates)):
        plt.subplot(num_cols,num_rows,i+1)
        plt.imshow(spec_templates[i].T[::-1],interpolation='nearest')
    plt.savefig('aar1_spec_templates_%d.png' % num_mix)
    np.savez('aar1_templates_%d.npz' % num_mix, templates)
    np.save('aar1_affinities_%d.npy' % num_mix, bem.affinities)


train_path = '/home/mark/Template-Speech-Recognition/Data/Train/'
train_file_indices = gtrd.get_data_files_indices(train_path)

train_example_lengths = gtrd.get_detect_lengths(train_file_indices,train_path)
np.save("data/train_example_lengths.npy",train_example_lengths)
np.save("data/train_file_indices.npy",train_file_indices)

import collections
FOMS = collections.defaultdict(list)
for num_mix in num_mix_params:
    templates = (np.load('aar1_templates_%d.npz' % num_mix))['arr_0']
    detection_array = np.zeros((train_example_lengths.shape[0],
                            train_example_lengths.max() + 2),dtype=np.float32)
    linear_filters_cs = et.construct_linear_filters(templates,
                                                    bgd)
    np.savez('data/linear_filter_aar_%d.npy'% num_mix,linear_filters_cs[:][0])
    np.savez('data/c_aar_%d.npy'%num_mix,np.array(linear_filters_cs[:][1]))
    syllable = np.array(['aa','r'])
    detection_array,example_start_end_times, detection_lengths = gtrd.get_detection_scores_mixture(train_path,