syllable = np.array(['aa','r'])
avg_bgd, syllable_examples, backgrounds = gtrd.get_syllable_examples_backgrounds_files(train_data_path,
                                                                                       file_indices,
                                                                                       syllable,
                                                                                       
                                                                                       num_examples=-1,
                                                                                       verbose=True)


clipped_bgd = np.clip(avg_bgd.E,.1,.4)
np.save(tmp_data_path+'clipped_bgd_102012.npy',clipped_bgd)

padded_examples, lengths = et.extend_examples_to_max(clipped_bgd,syllable_examples,
                           return_lengths=True)

aar_template, aar_registered = et.register_templates_time_zero(syllable_examples,min_prob=.01)

test_example_lengths = gtrd.get_detect_lengths(data_path+'Test/')

np.save(tmp_data_path+'test_example_lengths_102012.npy',test_example_lengths)


detection_array = np.zeros((test_example_lengths.shape[0],
                            test_example_lengths.max() + 2),dtype=np.float32)

linear_filter,c = et.construct_linear_filter(aar_template,
                                             clipped_bgd)
# need to state the syllable we are working with
syllable = np.array(['aa','r'])

import template_speech_rec.get_train_data as gtrd


log_part_blocks, log_invpart_blocks = gtrd.reorg_parts_for_fast_filtering(parts)
log_part_blocks = log_part_blocks.astype(np.float32)
log_invpart_blocks = log_invpart_blocks.astype(np.float32)

# get the basic examples

aar_examples = np.load(tmp_data_path+'aar_examples.npy')
aar_lengths = np.load(tmp_data_path + 'aar_lengths.npy')
clipped_bgd = np.load(tmp_data_path+'clipped_train_bgd.npy')

import template_speech_rec.estimate_template as et

aar_template, aar_registered = et.register_templates_time_zero(aar_examples,aar_lengths,min_prob=.01)


#test_example_lengths = gtrd.get_detect_lengths(data_path+'Test/')


import template_speech_rec.bernoulli_mixture as bm


#
# we are now going to do the clustering procedure on these
#

aar_examples = np.load(tmp_data_path+'aar_examples.npy')
aar_lengths = np.load(tmp_data_path + 'aar_lengths.npy')
clipped_bgd = np.load(tmp_data_path+'clipped_train_bgd.npy')