def get_detections(neg_E,mean_template,bg_len,mean_bgd,
                   edge_feature_row_breaks,edge_orientations,
                   spread_length=3,abst_threshold = .0001*np.ones(8)):
    num_detections = neg_E.shape[1] - mean_template.shape[1]
    detections = []
    for d in xrange(num_detections):
        E_segment = neg_E[:,d:d+mean_template.shape[1]].copy()
        esp.threshold_edgemap(E_segment,.30,edge_feature_row_breaks,abst_threshold=abst_threshold)
        esp.spread_edgemap(E_segment,edge_feature_row_breaks,edge_orientations,spread_length=spread_length)        
        if d > bg_len:
            bg = np.maximum(np.minimum(np.mean(neg_E[:,d-bg_len:d],
                                               axis=1),
                                       .4),
                            .01)
        else:
            bg = mean_bgd.copy()                            
        P,C =  tt.score_template_background_section(mean_template,
                                                    bg,E_segment)
        detections.append((P+C,d))
    return detections
 b_components = []
 for component_id in xrange(p_num_components):
     p_component_length = (int(p_lengths[p_train_masks[train_mask_id]][p_cluster_set[train_mask_id][component_id]].mean()+.5))
     p_components.append(
         np.clip(p_examples5[p_train_masks[train_mask_id]][p_cluster_set[train_mask_id][component_id]].mean(0)[:,:p_component_length],.05,.95))
 for component_id in xrange(b_num_components):
     b_component_length = (int(b_lengths[b_train_masks[train_mask_id]][b_cluster_set[train_mask_id][component_id]].mean()+.5))
     b_components.append(
         np.clip(b_examples5[b_train_masks[train_mask_id]][b_cluster_set[train_mask_id][component_id]].mean(0)[:,
                 :b_component_length],.05,.95))
 p_components = tuple(p_components)
 b_components = tuple(b_components)
 p_dev_exs = p_examples3[True-p_train_masks[train_mask_id]]
 p_dev_bgs = p_bgs[True-p_train_masks[train_mask_id]]
 p_classify_p = np.array([
         max((sum(tt.score_template_background_section(p_comp,p_dev_bg,p_dev_ex[:,:p_comp.shape[1]])) for p_comp in p_components)) for p_dev_bg, p_dev_ex in zip(p_dev_bgs, p_dev_exs)])
 b_classify_p = np.array([
         max((sum(tt.score_template_background_section(b_comp,p_dev_bg,p_dev_ex[:,:b_comp.shape[1]])) for b_comp in b_components)) for p_dev_bg, p_dev_ex in zip(p_dev_bgs, p_dev_exs)])
 b_dev_exs = b_examples3[b_train_masks[train_mask_id]][True-b_cluster_set[train_mask_id][component_id]]
 b_dev_bgs = b_bgs[b_train_masks[train_mask_id]][True-b_cluster_set[train_mask_id][component_id]]
 p_classify_b = np.array([
         max((sum(tt.score_template_background_section(p_comp,b_dev_bg,b_dev_ex[:,:p_comp.shape[1]])) for p_comp in p_components)) for b_dev_bg, b_dev_ex in zip(b_dev_bgs, b_dev_exs)])
 b_classify_b = np.array([
         max((sum(tt.score_template_background_section(b_comp,b_dev_bg,b_dev_ex[:,:b_comp.shape[1]])) for b_comp in b_components)) for b_dev_bg, b_dev_ex in zip(b_dev_bgs, b_dev_exs)])
 p_errors = np.sum(p_classify_p < b_classify_p)/float(p_classify_p.shape[0])
 b_errors = np.sum(p_classify_b > b_classify_b)/float(b_classify_b.shape[0])
 p_error_rates[p_cluster_id,b_cluster_id,
               train_mask_id] = p_errors
 b_error_rates[p_cluster_id,b_cluster_id,
               train_mask_id] = b_errors
 print "p_errors:",p_errors
length = 20
E_test = (np.random.rand(length*num_features).reshape(num_features,length) > .6).astype(np.uint8)
bg_test = (np.random.rand(num_features)*.3 + .1).astype(np.float32)
template_test = (np.random.rand(num_features*length).reshape(num_features,length)*.9 + .05).astype(np.float32)
"""

timeit_run_statement = """
length = min(E_test.shape[1],template_test.shape[1])
sum(tt.score_template_background_section(template_test[:,:length],bg_test,E_test[:,:length]))"""

num_features = 384
length = 20
E_test = (np.random.rand(length*num_features).reshape(num_features,length) > .6).astype(np.uint8)
bg_test = (np.random.rand(num_features)*.3 + .1).astype(np.float32)
template_test = (np.random.rand(num_features*length).reshape(num_features,length)*.9 + .05).astype(np.float32)
out1 = sum(tt.score_template_background_section(template_test,bg_test,E_test))

log_template = np.log(template_test.T)
log_invtemplate = np.log(1-template_test.T)
E_test_transpose = E_test.T
out2 = tt.score_template_background_section_quantizer(log_template,
                                                      log_invtemplate,
                                                      bg_test,
                                                      E_test_transpose)



import timeit
t = timeit.Timer(timeit_run_statement,timeit_import_statement)
t.repeat(5,10000)
all_def_templates = np.empty((def_range.shape[0],
                            tpm.def_template.shape[0],
                            tpm.def_template.shape[1]))
for d in xrange(def_range.shape[0]):
    tpm.get_def_template(def_range[d])
    all_def_templates[d] = tpm.def_template.copy()
    

optimal_detection_scores = -np.inf * np.ones((len(tuning_patterns_context),def_range.shape[0]))
optimal_detection_idx = np.zeros((len(tuning_patterns_context),def_range.shape[0]))
for c_id in xrange(len(tuning_patterns_context)):
    print c_id
    cur_context = tuning_patterns_context[c_id]
    num_detections = cur_context.shape[1] - tpm.length_range[1]
    win_length = tpm.length_range[1]
    for d in xrange(num_detections):
        E_window = cur_context[:,d:d+win_length].copy()
        esp.threshold_edgemap(E_window,.30,edge_feature_row_breaks,report_level=False,abst_threshold=abst_threshold)
        esp.spread_edgemap(E_window,edge_feature_row_breaks,edge_orientations,spread_length=3)
        # base detection
        for deformation in def_range:
            def_template = all_def_templates[10+deformation]
            P,C = tt.score_template_background_section(def_template,tpm.bg,E_window)
            score = P+C
            if score > optimal_detection_scores[c_id,deformation]:
                optimal_detection_scores[c_id,deformation] = score
                optimal_detection_idx[c_id,deformation] = d
            
                                                   

plosive_lengths = np.load(data_path+plosive+'class_examples_lengths.npy')
plosive_bgs = np.load(data_path+plosive+'class_examples_bgs.npy')
num_validate = plosive_train_mask[0].shape[0]-np.sum(plosive_train_mask[0])
num_folds = plosive_train_mask.shape[0]
classifier_output = np.empty((num_folds,num_validate,
                              len(classifiers),num_classifier_models),
                             dtype=np.float32)
for classifier_id, classifier in enumerate(classifiers):
    print classifier
    # 1 template classification
    template0 = np.load(exp_path+classifier+'0template1_1_0.npy')
    t_len = template0.shape[1]
    valid_mask = np.logical_not(plosive_train_mask[0])
    classifier_output[0][:,classifier_id,0] = np.array([
        sum(tt.score_template_background_section(
            template0[:,:min(t_len,l)],
            bg,
            E[:,:min(t_len,l)]))/np.float32(min(t_len,l)) for E,l,bg in zip(plosive_examples[valid_mask],
                                                  plosive_lengths[valid_mask],
                                                  plosive_bgs[valid_mask])]).astype(np.float32)[:]
    # 2 template classification
    template0 = np.load(exp_path+classifier+'0template2_1_0.npy')
    template1 = np.load(exp_path+classifier+'0template2_1_1.npy')
    templates = (template0,template1)
    t_lens = (template0.shape[1], template1.shape[1])
    classifier_output[0][:,classifier_id,1] = np.array([
        max([sum(tt.score_template_background_section(
            template[:,:min(t_len,l)],
            bg,
            E[:,:min(t_len,l)]))/np.float32(min(t_len,l)) for template,t_len in zip(
                        templates,t_lens)]) 
            for E,l,bg in zip(plosive_examples[valid_mask],
"""

'''

'''

# get backgrounds in order to compute tests
template_height = 366;
num_patterns =registered_templates.shape[0]
bgds_mat = np.zeros((template_height,num_patterns))

template_length = 32


P,C = tt.score_template_background_section(mean_template,bgds_mat[:,0],
                                  registered_templates[0])


# get negative examples for testing
neg_bdg_mat = np.zeros((template_height,exp.num_data))
neg_patterns = np.zeros((exp.num_data,template_height,template_length))
cur_bgd = 0
for path_idx in range(exp.num_data):
    if path_idx % 10 ==0:
        print "on path", path_idx
    phns = exp.get_phns(path_idx)
    # check if this datum has what we need
    if True:
        s = exp.get_s(path_idx)
        E,edge_feature_row_breaks,\
            edge_orientations= exp.get_edgemap_no_threshold(s)
                                     E_num_cols)).reshape(T_num_rows,E_num_cols).astype(np.uint8)
num_detections = 1
pad_front = 0
pad_back = 0


W_bg = np.empty(T_num_cols,dtype=np.float64)
detect_sums = np.zeros(num_detections, dtype=np.float64)
bg = np.maximum(.05,
                 np.random.rand(T_num_rows).astype(np.float64)*.4)

C_inv_long = np.empty((T_num_rows,T_num_cols),dtype=np.float64)
W = np.empty((T_num_rows,T_num_cols),dtype=np.float64)


s,C = tt.score_template_background_section(T,bg,E_window,front_bgd_pad=0,back_bgd_pad=0)

C2 = fast_like.fast_like(T,
                         bg,
                         C_inv_long,
                         W,
                         T_num_rows,
                         T_num_cols,
                         E_window,
                         E_num_cols,
                         num_detections,
                             pad_front,
                             W_bg,
                             detect_sums)

detect_sums[:] = 0.
 def __init__(self,base_object, coarse_factor=2,
              coarse_template_threshold = .7,
              bg = None):
     self.coarse_factor = coarse_factor
     if base_object.__class__ == np.ndarray:
         # this means that we just have a template which will be a
         # 2-d ndarray, our function assumes this
         template_height, template_length = base_object.shape
         self.type = "template"
         self.window = np.array((template_height,template_length))
         self.template = base_object
         self.score = lambda E_window,bg:\
             sum(tt.score_template_background_section(self.template,
                                                      bg,E_window))
         # just create a uniform background with .4 as the edge frequency
         self.score_no_bg = lambda E_window:\
             sum(tt.score_template_background_section(self.template,
                                                      self.bg,
                                                      E_window))
         self.coarse_template = get_coarse_segment(self.template,
                                                   coarse_type="avg",
                                                   coarse_factor = self.coarse_factor)
         self.coarse_length = self.coarse_template.shape[1]
         self.coarse_template_mask = self.coarse_template > .7
         self.coarse_score_like = lambda E_window,bg:\
             sum(tt.score_template_background_section(self.coarse_template,
                         bg,
                         get_coarse_segment(E_window,
                                  coarse_type='max',
                                  coarse_factor=self.coarse_factor)))
         self.coarse_score_like_no_bg = lambda E_window:\
             sum(tt.score_template_background_section(self.coarse_template,
                         self.bg,
                         get_coarse_segment(E_window,
                                  coarse_type='max',
                                  coarse_factor=self.coarse_factor)))
         self.coarse_score_count = lambda E_window:\
             np.sum(get_coarse_segment(E_window,
                            coarse_type='max',
                            coarse_factor=self.coarse_factor)[self.coarse_template_mask])
     elif base_object.__class__ == TwoPartModel:
         template_height, template_length = base_object.bg.shape[0],base_object.length_range[1]
         if bg is not None:
             self.bg = bg
         else:
             self.bg = .4 * np.ones(template_height)
         self.type = "TwoPartModel"
         self.window = np.array((template_height,template_length))
         self.template = base_object
         self._score_sub_no_bg = lambda E_window,t_id:\
             sum(tt.score_template_background_section(self.template.def_templates[t_id],
                                                      self.template.bg,E_window))
         self.score_no_bg = lambda E_window:\
             max([ self._score_sub_no_bg(E_window,t_id) for t_id in xrange(self.template.def_templates.shape[0])])
         # just create a uniform background with .4 as the edge frequency
         self.coarse_template = get_coarse_segment(self.template.base_template,
                                                   coarse_type="avg",
                                                   coarse_factor = self.coarse_factor)
         self.coarse_length = self.coarse_template.shape[1]
         self.coarse_template_mask = self.coarse_template > .7
         self.coarse_score_like_no_bg = lambda E_window:\
             sum(tt.score_template_background_section(self.coarse_template,
                         self.bg,
                         get_coarse_segment(E_window,
                                  coarse_type='max',
                                  coarse_factor=self.coarse_factor)[:,:self.coarse_length]))
         self.coarse_score_count = lambda E_window:\
             np.sum(get_coarse_segment(E_window,
                            coarse_type='max',
                            coarse_factor=self.coarse_factor)[:,:self.coarse_length][self.coarse_template_mask])
    data_iter.reset_exp()
    for datum_id in xrange(data_iter.num_data):
        if datum_id % 10 == 0:
            print "working on example", datum_id
        if data_iter.next(wait_for_positive_example=True,
                          compute_pattern_times=True,
                            max_template_length=classifier.window[1]):
            pattern_times = data_iter.pattern_times
            num_detections = data_iter.E.shape[1] - liy_template.shape[1]
            num_frames += data_iter.E.shape[1]
            scores = -np.inf * np.ones(num_detections)
            for d in xrange(num_detections):
                E_segment = data_iter.E[:,d:d+liy_template.shape[1]].copy()                
                esp.threshold_edgemap(E_segment,.30,edge_feature_row_breaks,report_level=False,abst_threshold=abst_threshold)
                esp.spread_edgemap(E_segment,edge_feature_row_breaks,edge_orientations,spread_length=3)
                scores[d] = sum(tt.score_template_background_section(liy_template,
                                                         mean_background,E_segment))
            # now we get the indices sorted
            indices = remove_overlapping_examples(np.argsort(scores),
                                                  liy_template.shape[1],
                                                  int(allowed_overlap*liy_template.shape[1]))
            positives, negatives =  get_pos_neg_scores(indices,pattern_times,
                                                                     scores,classifier.window[1])
            all_positives.extend(positives)
            all_negatives.extend(negatives)
        else:
            break
    liy_roc_full,liy_roc_vals_full = get_roc(np.sort(all_positives)[::-1],
                                                 np.sort(all_negatives)[::-1],
                                                 num_frames)

     else:
         cur_bgd = mean_background
     if frame_idx >= 2:
         if j0_scores[frame_idx-1] >= np.maximum(j0_threshold,
                                            np.maximum(j0_scores[frame_idx-2],
                                                       j0_scores[frame_idx])):
             j0_maxima.append((frame_idx,E_segment,cur_bgd))
             j0_detections[frame_idx-1] = 1
 j_detections = -np.inf * np.ones(num_detections)
 for frame_idx,E_segment,cur_bgd in j0_maxima:
     cur_max_score = -np.inf
     for fd in xrange(-parts_model.front_def_radius,parts_model.front_def_radius):
         for bd in xrange(-parts_model.back_def_radius,parts_model.back_def_radius):
             dt = deformed_templates[parts_model.front_def_radius+fd,
                        parts_model.back_def_radius+bd].copy()
             P,C =  tt.score_template_background_section(dt,cur_bgd,E_segment)
             cur_max_score = np.maximum(cur_max_score,P+C)
     j_detections[frame_idx] = cur_max_score
 print "Computed Scores"
 detection_list = [ (j_detections[d],d) for d in xrange(j_detections.shape[0])]
 detection_list = sorted(detection_list)
 detect_bool = np.empty(num_detections,dtype=bool)
 detect_bool[:] = False
 detect_bool[j_detections > -np.inf] = True
 # removing the overlapping detections
 for val,loc in detection_list:
     if detect_bool[loc]:
         detect_bool[loc+1:loc+parts_model.deformed_max_length] = False
 pattern_times = exp.get_pattern_times(phns,phn_times,s)
 maxima_idx = np.arange(num_detections)[j_detections>-np.inf]
 # see if we pick up the patterns 
# for the data provenance from the simulation
# 

#
#1 /p/ itself
# need to redo the feature extraction with the features being less spread
# it would make sense to do this with the whole training data, this time, simply let it run over night
# want to make sure that everything is in order


p_bgs = np.load(root_path+'Data/p_bgs.npy')

p_p_scores = np.zeros((phn_examples.shape[0],len(template_versions)))
for t_id, t in enumerate(template_versions):
    for p_id,p_ex in enumerate(phn_examples):
        p_p_scores[p_id,t_id] = sum(tt.score_template_background_section(t,p_bgs[p_id],p_ex[:,:t.shape[1]]))

# going to do a regression test based on length and look at
# the residuals from that
p_lengths_regress = np.vstack((np.ones(phn_lengths.shape[0]),
                               np.vstack((phn_lengths,
                                          phn_lengths**2)))).T

p_p_max_idx = np.argsort(p_p_scores, axis=1)

p_p_least_squares = np.linalg.solve(np.dot(p_lengths_regress.T ,
                                           p_lengths_regress),
                                    np.dot(p_lengths_regress.T,
                                           p_p_max_idx))

# try to build up the best abst threshold vector
cur_abst = abst_threshold.copy()
spread_length=5
for edge in xrange(num_edges):
    for thresh in xrange(abst_threshold_check_range.shape[0]):
        print "Considering edge",edge," and threshold",abst_threshold_check_range[thresh]
        cur_abst[edge] = abst_threshold_check_range[thresh]
        for pattern_id in xrange(all_raw_patterns_context_array.shape[0]):
            cur_section = all_raw_patterns_context_array[pattern_id].copy()
            scores = np.empty(cur_section.shape[1] - template_length)
            for t in xrange(cur_section.shape[1] - template_length):
                E_segment = cur_section[:,t:t+template_length].copy()
                esp.threshold_edgemap(E_segment,.30,edge_feature_row_breaks,abst_threshold=cur_abst)
                esp.spread_edgemap(E_segment,edge_feature_row_breaks,edge_orientations,spread_length=spread_length)
                P,C = tt.score_template_background_section(mean_template,bgd,E_segment)
                scores[t] = P+C
            thresholds_score[pattern_id,edge,thresh] = np.max(scores)
        # best peformance for the choice of edge type
        mean_scores_edge = np.mean(thresholds_score,axis=0)[edge]
        best_thresh = np.argmax(mean_scores_edge)
        cur_abst[edge] = abst_threshold_check_range[best_thresh]
                
"""
>>> print cur_abst
[ 0.001  0.001  0.001  0.001  0.001  0.001  0.001  0.001]
"""
#
# very interesting, it appears that we might be better off with the edge
#    thresholds much lower
bgd = np.load('/home/mark/projects/Template-Speech-Recognition/Experiments/042212/mean_background042212.npy')


template_length = template_shape[1]
max_detections = -np.inf * np.ones(len(all_patterns_context))


for p_id in xrange(len(all_patterns_context)):
    print p_id
    p = all_patterns_context[p_id]
    num_detections = p.shape[1] - template_length
    for d in xrange(num_detections):
        E_segment = p[:,d:d+template_length].copy()
        esp.threshold_edgemap(E_segment,.30,edge_feature_row_breaks,report_level=False,abst_threshold=abst_threshold)
        esp.spread_edgemap(E_segment,edge_feature_row_breaks,edge_orientations,spread_length=3)
        P,C = tt.score_template_background_section(mean_template,bgd,E_segment)
        cur_score = P+C
        if cur_score > max_detections[p_id]:
            max_detections[p_id] = cur_score


#
# want to see where the bad fits of the model are
# mainly want to see if we can improve these with the 
# parts model
#
#

max_detections_pairs = sorted([(max_detections[s],s) for s in xrange(max_detections.shape[0])])

del(all_patterns_context)