def make_average_datamask_from_mean(donor_dict, fmask = True):
    '''Will create the mean mask file from the donor_dict or hybdatadict information
      meanmask_file = a file called donor_id.amsk.1.
       This will be used when computing the spike similarity measure
       to assess the quality of spike detection'''
    clufilename = donor_dict['donor_path'] + donor_dict['donor']+'_'+ donor_dict['donorcluid']+'.clu.1'

    clusters = klustersloader.read_clusters(clufilename)
    if fmask == True: 
        fmaskfilename = donor_dict['donor_path']+ donor_dict['donor']+'.fmask.1'
    else:
        fmaskfilename = donor_dict['donor_path']+ donor_dict['donor']+'.mask.1'
    [fmasks,fmasks_full] = klustersloader.read_masks(fmaskfilename,donor_dict['numchannels'])
    #output [False, True, False, False, ...] where True indicates 
    #the indices corresponding to the chosen donor cluster
    selected_cluster_indices = np.in1d(clusters, donor_dict['donorcluster'])
    num_selected_spikes = np.sum(selected_cluster_indices)
    selected_fmasks = fmasks_full[selected_cluster_indices,:]
    donor_id_amsk_data  = np.sum(selected_fmasks,axis = 0)/num_selected_spikes     
    return donor_id_amsk_data
def create_average_hybrid_wave_spike(donor_dict):
    '''Input a dictionary donor_dict or a hybdatadict
      will  create the mean spike file from taking the average of
      spikes in a donor cluster of the donor dataset
      meanspike_file_id = a file called donor_id.msua.1.'''
    clufilename = donor_dict['donor_path'] + donor_dict['donor']+'_'+ donor_dict['donorcluid']+'.clu.1'
    spkfilename = donor_dict['donor_path']+ donor_dict['donor']+'.spk.1'
    uspkfilename = donor_dict['donor_path']+ donor_dict['donor']+'.uspk.1'
    clusters = klustersloader.read_clusters(clufilename)
    #uspk = klustersloader.read_waveforms(uspkfilename,20,32)
    ##print uspkfilename
    uspk = np.array(load_binary(uspkfilename), dtype = np.int16)
   # uspk = np.array(uspk, dtype = np.int16)
   # uspk = normalize(uspk, symmetric = True)
    
    uspk = uspk.reshape((-1, 20, 32))
    
    #uspk = np.transpose(uspk, (0,2,1))
    
    #print uspk[1,:,:]
    #print 'the 8th column, 8th channel recording ', uspk[1,:,7]
    
    #print uspk.shape
 
    #print ' uspk.dtype =  ' , uspk.dtype
    #output [False, True, False, False, ...] where True indicates 
    #the indices corresponding to the chosen donor cluster
    selected_cluster_indices = np.in1d(clusters, donor_dict['donorcluster'])
    num_selected_spikes = np.sum(selected_cluster_indices)
    selected_spikes = uspk[selected_cluster_indices,:,:]
    
    #print 'second selected spike ' , selected_spikes[20,:,:]
    #print 'the 8th channel recording of a selected uspk ', selected_spikes[20,:,7]
    
    #print 'number of selected spikes ', num_selected_spikes
    #print ' sum of spikes ', np.sum(selected_spikes,axis = 0)
    donor_id_msua_data  = np.sum(selected_spikes,axis = 0)/num_selected_spikes
    #print donor_id_msua_data
    #print donor_id_msua_data.dtype
    
    return donor_id_msua_data