syllable = np.array(['aa','r'])

clipped_bgd = np.load(old_data_path+'clipped_bgd_102012.npy')

padded_examples = np.load(old_data_path+'aar_padded_examples_bgd.npy')

lengths = np.load(old_data_path+'aar_lengths.npy')


test_example_lengths = np.load(old_data_path+'test_example_lengths_102012.npy')


detection_array = np.zeros((test_example_lengths.shape[0],
                            test_example_lengths.max() + 2),dtype=np.float32)

linear_filter,c = et.construct_linear_filter(aar_template,
                                             clipped_bgd)
# need to state the syllable we are working with
syllable = np.array(['aa','r'])


detection_array,example_start_end_times, detection_lengths = gtrd.get_detection_scores(data_path+'Test/',
                                                                                         detection_array,
                                                                                         syllable,
                                                                                         linear_filter,c,verbose=True)

np.save(tmp_data_path+'detection_array_aar_1.npy',detection_array)
out = open(tmp_data_path+'example_start_end_times_aar_1.pkl','wb')
cPickle.dump(example_start_end_times,out)
out.close()

out = open(tmp_data_path+'detection_lengths_aar_1.pkl','wb')
out.close()

detection_arrays = ()
for i in xrange (num_mix):
    detection_arrays += (np.load(tmp_data_path+'detection_array_aar_new%d_%d.npy' % (num_mix,i)),)


clipped_bgd = np.load(tmp_data_path+'clipped_bgd_102012.npy')


aar_ts = ()
cs = ()
LFs = ()
for i in xrange(num_mix):
    aar_ts += (np.load(tmp_data_path+'aar_templates_%d_%d.npy' % (num_mix,i)),)
    LF,c = et.construct_linear_filter(aar_ts[i],
                                                        clipped_bgd)
    cs += (c,)
    LFs += (LF,)


# general plan
# example_start_end_times has a non-zero entry in the first utterance
# going to confirm that we have the right utterance

os = np.load(data_path + 'Test/'+file_indices[0]+'s.npy')
phns = np.load(data_path + 'Test/'+file_indices[0]+'phns.npy')
flts = np.load(data_path + 'Test/'+file_indices[0]+'feature_label_transitions.npy')

# don't seem to have the data on my local machine:
# somethign to check later
# now we want to look at the curves simulataneously