def precreation_hybridict(donor_dict,acceptor_dict,time_size_dict):
    ''' Creates one dictionary hybdatadict out of three
    
    The inputs are:
        
        donordict = {'donor': 'n6mab031109', 'donorshanknum': 1, 'donorcluster': 54, 
             'donor_path':'/chandelierhome/skadir/hybrid_analysis/mariano/donors/',
                 'experiment_path': '/chandelierhome/skadir/hybrid_analysis/mariano/', 'donorcluid': 'MKKdistfloat'}
        
        time_size_dict = {'amplitude_generating_function_args':[0.5, 1.5],'amplitude_generating_function':make_uniform_amplitudes,
                  'donorspike_timeseries_generating_function':create_time_series_constant,
                  'sampling_rate':20000, 'firing_rate':3, 'start_time':10,'end_time':None,
                  'donorspike_timeseries_arguments': 'arg'}
                  
        acceptor_dict = {'acceptor_path':'/chandelierhome/skadir/hybrid_analysis/mariano/acceptors/',
                 'acceptor': 'n6mab041109_60sec.dat','numchannels':32,
                 'output_path':'/chandelierhome/skadir/hybrid_analysis/mariano/',
                 }    
    
    '''
    
    
    hashDlist = hash_utils.get_product_hashlist([donor_dict,acceptor_dict,time_size_dict])
    hashD = hash_utils.make_concatenated_filename(hashDlist)
    hybdatadict = merge_input_dicts(donor_dict,merge_input_dicts(acceptor_dict,time_size_dict))
    hybdatadict['hashD']= hashD
    print 'Printing during execution changed'
    return hybdatadict
def create_hybrid_kwdfile_old(donor_dict,acceptor_dict,time_size_dict):
    ''' This function outputs a file called:
        Hash(hybdatadict = [donor_dict,acceptor_dict,time_size_dict]).kwd,
        
        The inputs are:
        
        donordict = {'donor': 'n6mab031109', 'donorshanknum': 1, 'donorcluster': 54, 
             'donor_path':'/chandelierhome/skadir/hybrid_analysis/mariano/donors/',
                 'experiment_path': '/chandelierhome/skadir/hybrid_analysis/mariano/', 'donorcluid': 'MKKdistfloat'}
        
        time_size_dict = {'amplitude_generating_function_args':[0.5, 1.5],'amplitude_generating_function':make_uniform_amplitudes,
                  'donorspike_timeseries_generating_function':create_time_series_constant,
                  'sampling_rate':20000, 'firing_rate':3, 'start_time':10,'end_time':None,
                  'donorspike_timeseries_arguments': 'arg'}
                  
        acceptor_dict = {'acceptor_path':'/chandelierhome/skadir/hybrid_analysis/mariano/acceptors/',
                 'acceptor': 'n6mab041109_60sec.dat','numchannels':32,
                 'output_path':'/chandelierhome/skadir/hybrid_analysis/mariano/',
                 }    
                        
        
        it adds a mean waveform to an acceptor .dat file at specified groundtruth times
        It returns the creation groundtruth which is equivalent
        to the old 
        regular3.res.1 file of the times of added spikes
        (i.e. the times and the cluster labels form the added
        hybrid spikes. 
        It also returns hybdatadict = donordict U time_size_dict U acceptor_dict
        '''
    hashDlist = hash_utils.get_product_hashlist([donor_dict,acceptor_dict,time_size_dict])
    hashD = hash_utils.make_concatenated_filename(hashDlist)
    hybdatadict = merge_input_dicts(donor_dict,merge_input_dicts(acceptor_dict,time_size_dict))
    hybdatadict['hashD']= hashD
    
    avespike = create_average_hybrid_wave_spike(donor_dict)
    prehamspike = np.zeros_like(avespike)
    hamspike = np.zeros_like(avespike)
    #hamspike.shape = (number of samples per spike,nChannels) Check this!
    #avespike.shape[0] = number of samples per spike
    SamplesPerSpike = avespike.shape[0]
    ham = scipy.signal.hamming(SamplesPerSpike-2)
    hammy = np.expand_dims(ham,axis = 1)
    hamnut = np.zeros((SamplesPerSpike,1))
    hamnut[1:SamplesPerSpike-1] = hammy
    for i in np.arange(SamplesPerSpike):
        prehamspike[i,:] = avespike[i,:]*hamnut[i]
        
    
        
    # Typical generating function is: 
    #create_time_series_constant(rate, samplerate, num_channels, start = 0, end = None, acceptor = None)
    print 'acceptor path is ', acceptor_dict['acceptor_path']
    acceptordat = acceptor_dict['acceptor_path']+acceptor_dict['acceptor']
    donorspike_timeseries = time_size_dict['donorspike_timeseries_generating_function'](time_size_dict['firing_rate'], 
                                                                                        time_size_dict['sampling_rate'],
                                                        acceptor_dict['numchannels'],
                                                        time_size_dict['start_time'],
                                                        time_size_dict['end_time'],
                                                         acceptor = acceptordat, buffersamples = np.ceil(SamplesPerSpike/2))
    # NOTE: The buffer ensures that the spikes can fit inside the acceptor dat file.
    #donorspike_timeseries.shape = (NumSpikes2Add, )
    NumSpikes2Add = donorspike_timeseries.shape[0] #Number of spikes be added
    times_to_start_adding_spikes = donorspike_timeseries - SamplesPerSpike/2 #Subtract half the number of samples
    #per spike
    #print times_to_start_adding_spikes
    fractional_times = np.ceil(times_to_start_adding_spikes) - times_to_start_adding_spikes
    #print fractional_times
    
    
    
    #data_readin = DatRawDataReader([acceptordat],dtype = np.int16, dtype_to = np.int16, shape = (-1,nchannels))
    kwdoutputname = acceptor_dict['output_path']+hashD+'.kwd'
    print kwdoutputname

    prm = {'nchannels': acceptor_dict['numchannels']}
    create_kwd_from_dat(kwdoutputname,acceptordat,prm)
    #tb.close(kwdoutputname)
    with tb.openFile(kwdoutputname, mode = 'a') as kwdfile:
        kwdfile = tb.openFile(kwdoutputname, mode = 'a')
        rawdata = kwdfile.root.recordings._f_getChild('0').data
        #rawdata.shape = (Number of samples, nChannels)
        
    
        
        amp_gen_args = [NumSpikes2Add]
        amp_gen_args.extend(time_size_dict['amplitude_generating_function_args'])
        amplitude = time_size_dict['amplitude_generating_function'](*amp_gen_args)
        # amplitude.shape = (NumSpikes2Add,)
        
        print 'Adding ', NumSpikes2Add, ' spikes'
        for i in range(NumSpikes2Add): 
            if np.all(fractional_times[i]==0):
                hamspike = prehamspike
            else:
                knownsample_times = np.arange(SamplesPerSpike+1)
                lastrowprehamspike = prehamspike[SamplesPerSpike-1,:].reshape((1,acceptor_dict['numchannels']))
                
                #print prehamspike.shape
                #print lastrowprehamspike.shape
                appendedprehamspike = np.concatenate((prehamspike,lastrowprehamspike), axis = 0)
                #print appendedprehamspike.shape
                #Add one to prevent interpolation error: (ValueError: A value in x_new is above the interpolation range.)
                hamspike = interpolated_waveshift(acceptor_dict['numchannels'],appendedprehamspike,fractional_times[i],knownsample_times) 
            for j in range(SamplesPerSpike):        
                #print '(i,j)',i,j
                #rawdata[np.ceil(times_to_start_adding_spikes[i])+j,:] = rawdata[np.ceil(times_to_start_adding_spikes[i])+j,:]+ amplitude[i]*hamspike[j,:]
                rawdata[np.ceil(times_to_start_adding_spikes[i])+j,:] += amplitude[i]*hamspike[j,:]
                
        rawdata.flush()
    #kwdfile.close()
    creation_groundtruth = donorspike_timeseries 
    return hybdatadict,kwdoutputname, creation_groundtruth, amplitude