import template_speech_rec.bernoulli_mixture as bm

bem = bm.BernoulliMixture(2,aar_registered)
bem.run_EM(.000001)



detection_array = np.zeros((test_example_lengths.shape[0],
                            test_example_lengths.max() + 2),dtype=np.float32)

linear_filters_cs = et.construct_linear_filters(bem.templates,
                                             clipped_bgd)


detection_array,example_start_end_times, detection_lengths = gtrd.get_detection_scores_mixture(data_path+'Test/',
                                                                                         detection_array,
                                                                                         syllable,
                                                                                         linear_filters_cs,verbose=True)


np.save(tmp_data_path+'detection_array_aar_2.npy',detection_array)
out = open(tmp_data_path+'example_start_end_times_aar_2.pkl','wb')
cPickle.dump(example_start_end_times,out)
out.close()

out = open(tmp_data_path+'detection_lengths_aar_2.pkl','wb')
cPickle.dump(detection_lengths,out)
out.close()

import template_speech_rec.roc_functions as rf

                                                       parts_padded_examples,
                                                       parts_lengths)
 for i in xrange(num_mix):
     np.save(tmp_data_path+'parts_aar_template_%d_%d.npy' %(num_mix,i),parts_templates[i])
 detection_array = np.zeros((parts_test_example_lengths.shape[0],
                         parts_test_example_lengths.max() + 2),dtype=np.float32)
 linear_filters_cs = et.construct_linear_filters(parts_templates,
                                          waliji_bgd)
 for i in xrange(num_mix):
     np.save(cur_tmp_data_path+'linear_filter_aar_%d_%d.npy'%(num_mix,i),linear_filters_cs[i][0])
     np.save(cur_tmp_data_path+'c_aar_%d_%d.npy'%(num_mix,i),np.array(linear_filters_cs[i][0]))
 syllable = np.array(['aa','r'])
 detection_array,parts_example_start_end_times, parts_detection_lengths = gtrd.get_detection_scores_mixture(test_data_path,
                                                                                      detection_array,
                                                                                      syllable,
                                                                                      linear_filters_cs,
                                                                                                log_part_blocks=log_part_blocks,
                                                                                                log_invpart_blocks=log_invpart_blocks,
                                                                                                verbose=True)
 np.save(cur_tmp_data_path+'parts_detection_array_aar_%d.npy' % num_mix,detection_array)
 if num_mix == 2:
     out = open(cur_tmp_data_path+'parts_example_start_end_times_aar.pkl','wb')
     cPickle.dump(parts_example_start_end_times,out)
     out.close()
     out = open(cur_tmp_data_path+'parts_detection_lengths_aar.pkl','wb')
     cPickle.dump(parts_detection_lengths,out)
     out.close()
 window_start = -2
 window_end = 2
 max_detect_vals = rf.get_max_detection_in_syllable_windows(detection_array,
                                                     parts_example_start_end_times,