예제 #1
0
def make_KKfiles_Script_supercomp(hybdatadict, SDparams,prb, detectioncrit, KKparams,supercomparams):
    '''Creates the files required to run KlustaKwik'''
    argSD = [hybdatadict,SDparams,prb]
    if ju.is_cached(rsd.run_spikedetekt,*argSD):
        print 'Yes, SD has been run \n'
        hash_hyb_SD = rsd.run_spikedetekt(hybdatadict,SDparams,prb)
    else:
        print 'You need to run Spikedetekt before attempting to analyse results ' 
    
    
    argTD = [hybdatadict, SDparams,prb, detectioncrit]      
    if ju.is_cached(ds.test_detection_algorithm,*argTD):
        print 'Yes, you have run detection_statistics.test_detection_algorithm() \n'
        detcrit_groundtruth = ds.test_detection_algorithm(hybdatadict, SDparams,prb, detectioncrit)
    else:
        print 'You need to run detection_statistics.test_detection_algorithm() \n in order to obtain a groundtruth' 
    
    KKhash = hash_utils.hash_dictionary_md5(KKparams)
    baselist = [hash_hyb_SD, detcrit_groundtruth['detection_hashname'], KKhash]
    basefilename =  hash_utils.make_concatenated_filename(baselist)
    
    mainbasefilelist = [hash_hyb_SD, detcrit_groundtruth['detection_hashname']]
    mainbasefilename = hash_utils.make_concatenated_filename(mainbasefilelist)
    
    DIRPATH = hybdatadict['output_path']
    os.chdir(DIRPATH)
    
    KKscriptname = basefilename
    make_KKscript_supercomp(KKparams,basefilename,KKscriptname,supercomparams)
    
    return basefilename
def run_spikedetekt_debug(hybdatadict,sdparams,prb):
    ''' Uncached version for debugging
    This function will call hash_hyb_SD(sdparams,hybdatadict) 
    and will run SpikeDetekt on the hybrid dataset specified by
    hybdatadict with the parameters sd params'''
    filename = hybdatadict['hashD']+'.kwd'
    DIRPATH = hybdatadict['output_path']
    
    
    # Make the product hash output name
    hashSDparams = hash_utils.hash_dictionary_md5(sdparams)
    # chose whether to include the probe, if so uncomment the two lines below
    #hashprobe = hash_utils.hash_dictionary_md5(prb)
    #hashdictlist = [ hybdatadict['hashD'],hashSDparams, hashprobe]
    hashdictlist = [hybdatadict['hashD'],hashSDparams]
    hash_hyb_SD_prb = hash_utils.make_concatenated_filename(hashdictlist)
    outputfilename = hash_hyb_SD_prb +'.kwd'
    
    # Need to create a symlink from hashD.kwd to hash_hyb_SD_prb.kwd 
    datasource = os.path.join(DIRPATH, filename )
    dest = os.path.join(DIRPATH, outputfilename )
    if not os.path.isfile(dest):
        os.symlink(datasource, dest)
    else: 
        print 'Warning: Symbolic link ',dest  ,' already exists'
    
    #Read in the raw data 
    raw_data = read_raw(dest,sdparams['nchannels'])
    
    create_files_Experiment(outputfilename, DIRPATH,  sdparams, prb)
    
    # Run SpikeDetekt2
    with Experiment(hash_hyb_SD_prb, dir= DIRPATH, mode='a') as exp:
        run(raw_data,experiment=exp,prm=sdparams,probe=Probe(prb))
    return hash_hyb_SD_prb
def precreation_hybridict(donor_dict,acceptor_dict,time_size_dict):
    ''' Creates one dictionary hybdatadict out of three
    
    The inputs are:
        
        donordict = {'donor': 'n6mab031109', 'donorshanknum': 1, 'donorcluster': 54, 
             'donor_path':'/chandelierhome/skadir/hybrid_analysis/mariano/donors/',
                 'experiment_path': '/chandelierhome/skadir/hybrid_analysis/mariano/', 'donorcluid': 'MKKdistfloat'}
        
        time_size_dict = {'amplitude_generating_function_args':[0.5, 1.5],'amplitude_generating_function':make_uniform_amplitudes,
                  'donorspike_timeseries_generating_function':create_time_series_constant,
                  'sampling_rate':20000, 'firing_rate':3, 'start_time':10,'end_time':None,
                  'donorspike_timeseries_arguments': 'arg'}
                  
        acceptor_dict = {'acceptor_path':'/chandelierhome/skadir/hybrid_analysis/mariano/acceptors/',
                 'acceptor': 'n6mab041109_60sec.dat','numchannels':32,
                 'output_path':'/chandelierhome/skadir/hybrid_analysis/mariano/',
                 }    
    
    '''
    
    
    hashDlist = hash_utils.get_product_hashlist([donor_dict,acceptor_dict,time_size_dict])
    hashD = hash_utils.make_concatenated_filename(hashDlist)
    hybdatadict = merge_input_dicts(donor_dict,merge_input_dicts(acceptor_dict,time_size_dict))
    hybdatadict['hashD']= hashD
    print 'Printing during execution changed'
    return hybdatadict
예제 #4
0
def make_KKfiles_Script_detindep_supercomp(hybdatadict, SDparams,prb, KKparams,supercomparams):
    '''Creates the files required to run KlustaKwik'''
    argSD = [hybdatadict,SDparams,prb]
    if ju.is_cached(rsd.run_spikedetekt,*argSD):
        print 'Yes, SD has been run \n'
        hash_hyb_SD = rsd.run_spikedetekt(hybdatadict,SDparams,prb)
    else:
        print 'You need to run Spikedetekt before attempting to analyse results ' 
    

    KKhash = hash_utils.hash_dictionary_md5(KKparams)
    baselist = [hash_hyb_SD, KKhash]
    KKbasefilename =  hash_utils.make_concatenated_filename(baselist)
    
    mainbasefilename = hash_hyb_SD
    
    DIRPATH = hybdatadict['output_path']
    os.chdir(DIRPATH)
    
    KKscriptname = KKbasefilename
    make_KKscript_supercomp(KKparams,KKbasefilename,KKscriptname,supercomparams)
    
    return KKbasefilename
def create_hybrid_kwdfile_old(donor_dict,acceptor_dict,time_size_dict):
    ''' This function outputs a file called:
        Hash(hybdatadict = [donor_dict,acceptor_dict,time_size_dict]).kwd,
        
        The inputs are:
        
        donordict = {'donor': 'n6mab031109', 'donorshanknum': 1, 'donorcluster': 54, 
             'donor_path':'/chandelierhome/skadir/hybrid_analysis/mariano/donors/',
                 'experiment_path': '/chandelierhome/skadir/hybrid_analysis/mariano/', 'donorcluid': 'MKKdistfloat'}
        
        time_size_dict = {'amplitude_generating_function_args':[0.5, 1.5],'amplitude_generating_function':make_uniform_amplitudes,
                  'donorspike_timeseries_generating_function':create_time_series_constant,
                  'sampling_rate':20000, 'firing_rate':3, 'start_time':10,'end_time':None,
                  'donorspike_timeseries_arguments': 'arg'}
                  
        acceptor_dict = {'acceptor_path':'/chandelierhome/skadir/hybrid_analysis/mariano/acceptors/',
                 'acceptor': 'n6mab041109_60sec.dat','numchannels':32,
                 'output_path':'/chandelierhome/skadir/hybrid_analysis/mariano/',
                 }    
                        
        
        it adds a mean waveform to an acceptor .dat file at specified groundtruth times
        It returns the creation groundtruth which is equivalent
        to the old 
        regular3.res.1 file of the times of added spikes
        (i.e. the times and the cluster labels form the added
        hybrid spikes. 
        It also returns hybdatadict = donordict U time_size_dict U acceptor_dict
        '''
    hashDlist = hash_utils.get_product_hashlist([donor_dict,acceptor_dict,time_size_dict])
    hashD = hash_utils.make_concatenated_filename(hashDlist)
    hybdatadict = merge_input_dicts(donor_dict,merge_input_dicts(acceptor_dict,time_size_dict))
    hybdatadict['hashD']= hashD
    
    avespike = create_average_hybrid_wave_spike(donor_dict)
    prehamspike = np.zeros_like(avespike)
    hamspike = np.zeros_like(avespike)
    #hamspike.shape = (number of samples per spike,nChannels) Check this!
    #avespike.shape[0] = number of samples per spike
    SamplesPerSpike = avespike.shape[0]
    ham = scipy.signal.hamming(SamplesPerSpike-2)
    hammy = np.expand_dims(ham,axis = 1)
    hamnut = np.zeros((SamplesPerSpike,1))
    hamnut[1:SamplesPerSpike-1] = hammy
    for i in np.arange(SamplesPerSpike):
        prehamspike[i,:] = avespike[i,:]*hamnut[i]
        
    
        
    # Typical generating function is: 
    #create_time_series_constant(rate, samplerate, num_channels, start = 0, end = None, acceptor = None)
    print 'acceptor path is ', acceptor_dict['acceptor_path']
    acceptordat = acceptor_dict['acceptor_path']+acceptor_dict['acceptor']
    donorspike_timeseries = time_size_dict['donorspike_timeseries_generating_function'](time_size_dict['firing_rate'], 
                                                                                        time_size_dict['sampling_rate'],
                                                        acceptor_dict['numchannels'],
                                                        time_size_dict['start_time'],
                                                        time_size_dict['end_time'],
                                                         acceptor = acceptordat, buffersamples = np.ceil(SamplesPerSpike/2))
    # NOTE: The buffer ensures that the spikes can fit inside the acceptor dat file.
    #donorspike_timeseries.shape = (NumSpikes2Add, )
    NumSpikes2Add = donorspike_timeseries.shape[0] #Number of spikes be added
    times_to_start_adding_spikes = donorspike_timeseries - SamplesPerSpike/2 #Subtract half the number of samples
    #per spike
    #print times_to_start_adding_spikes
    fractional_times = np.ceil(times_to_start_adding_spikes) - times_to_start_adding_spikes
    #print fractional_times
    
    
    
    #data_readin = DatRawDataReader([acceptordat],dtype = np.int16, dtype_to = np.int16, shape = (-1,nchannels))
    kwdoutputname = acceptor_dict['output_path']+hashD+'.kwd'
    print kwdoutputname

    prm = {'nchannels': acceptor_dict['numchannels']}
    create_kwd_from_dat(kwdoutputname,acceptordat,prm)
    #tb.close(kwdoutputname)
    with tb.openFile(kwdoutputname, mode = 'a') as kwdfile:
        kwdfile = tb.openFile(kwdoutputname, mode = 'a')
        rawdata = kwdfile.root.recordings._f_getChild('0').data
        #rawdata.shape = (Number of samples, nChannels)
        
    
        
        amp_gen_args = [NumSpikes2Add]
        amp_gen_args.extend(time_size_dict['amplitude_generating_function_args'])
        amplitude = time_size_dict['amplitude_generating_function'](*amp_gen_args)
        # amplitude.shape = (NumSpikes2Add,)
        
        print 'Adding ', NumSpikes2Add, ' spikes'
        for i in range(NumSpikes2Add): 
            if np.all(fractional_times[i]==0):
                hamspike = prehamspike
            else:
                knownsample_times = np.arange(SamplesPerSpike+1)
                lastrowprehamspike = prehamspike[SamplesPerSpike-1,:].reshape((1,acceptor_dict['numchannels']))
                
                #print prehamspike.shape
                #print lastrowprehamspike.shape
                appendedprehamspike = np.concatenate((prehamspike,lastrowprehamspike), axis = 0)
                #print appendedprehamspike.shape
                #Add one to prevent interpolation error: (ValueError: A value in x_new is above the interpolation range.)
                hamspike = interpolated_waveshift(acceptor_dict['numchannels'],appendedprehamspike,fractional_times[i],knownsample_times) 
            for j in range(SamplesPerSpike):        
                #print '(i,j)',i,j
                #rawdata[np.ceil(times_to_start_adding_spikes[i])+j,:] = rawdata[np.ceil(times_to_start_adding_spikes[i])+j,:]+ amplitude[i]*hamspike[j,:]
                rawdata[np.ceil(times_to_start_adding_spikes[i])+j,:] += amplitude[i]*hamspike[j,:]
                
        rawdata.flush()
    #kwdfile.close()
    creation_groundtruth = donorspike_timeseries 
    return hybdatadict,kwdoutputname, creation_groundtruth, amplitude
예제 #6
0
def make_KKfiles_viewer(hybdatadict, SDparams,prb, detectioncrit, KKparams):
    
    argSD = [hybdatadict,SDparams,prb]
    if ju.is_cached(rsd.run_spikedetekt,*argSD):
        print 'Yes, SD has been run \n'
        hash_hyb_SD = rsd.run_spikedetekt(hybdatadict,SDparams,prb)
    else:
        print 'You need to run Spikedetekt before attempting to analyse results ' 
    
    
    argTD = [hybdatadict, SDparams,prb, detectioncrit]      
    if ju.is_cached(ds.test_detection_algorithm,*argTD):
        print 'Yes, you have run detection_statistics.test_detection_algorithm() \n'
        detcrit_groundtruth = ds.test_detection_algorithm(hybdatadict, SDparams,prb, detectioncrit)
    else:
        print 'You need to run detection_statistics.test_detection_algorithm() \n in order to obtain a groundtruth'
        
    argKKfile = [hybdatadict, SDparams,prb, detectioncrit, KKparams]
    if ju.is_cached(make_KKfiles_Script,*argKKfile):
        print 'Yes, make_KKfiles_Script  has been run \n'
        
    else:
        print 'Need to run make_KKfiles_Script first, running now ' 
    basefilename = make_KKfiles_Script(hybdatadict, SDparams,prb, detectioncrit, KKparams)    
        
    mainbasefilelist = [hash_hyb_SD, detcrit_groundtruth['detection_hashname']]
    mainbasefilename = hash_utils.make_concatenated_filename(mainbasefilelist)    
    
    DIRPATH = hybdatadict['output_path']
    os.chdir(DIRPATH)
    with Experiment(hash_hyb_SD, dir= DIRPATH, mode='r') as expt:
        if KKparams['numspikesKK'] is not None: 
            #spk = expt.channel_groups[0].spikes.waveforms_filtered[0:KKparams['numspikesKK'],:,:]
            res = expt.channel_groups[0].spikes.time_samples[0:KKparams['numspikesKK']]
            #fets = expt.channel_groups[0].spikes.features[0:KKparams['numspikesKK']]
            #fmasks = expt.channel_groups[0].spikes.features_masks[0:KKparams['numspikesKK'],:,1]
            
           # masks = expt.channel_groups[0].spikes.masks[0:KKparams['numspikesKK']]

        else: 
            #spk = expt.channel_groups[0].spikes.waveforms_filtered[:,:,:]
            res = expt.channel_groups[0].spikes.time_samples[:]
            #fets = expt.channel_groups[0].spikes.features[:]
            #fmasks = expt.channel_groups[0].spikes.features_masks[:,:,1]
            #print fmasks[3,:]
            #masks = expt.channel_groups[0].spikes.masks[:]
            
        mainresfile = DIRPATH + mainbasefilename + '.res.1' 
        mainspkfile = DIRPATH + mainbasefilename + '.spk.1'
        detcritclufilename = DIRPATH + mainbasefilename + '.detcrit.clu.1'
        trivialclufilename = DIRPATH + mainbasefilename + '.clu.1'
        write_res(res,mainresfile)
        write_trivial_clu(res,trivialclufilename)
        
       # write_spk_buffered(exptable,filepath, indices,
       #                buffersize=512)
        write_spk_buffered(expt.channel_groups[0].spikes.waveforms_filtered,
                            mainspkfile,
                           np.arange(len(res)))
        
        write_clu(detcrit_groundtruth['detected_groundtruth'], detcritclufilename)
            
        #s_total = SDparams['extract_s_before']+SDparams['extract_s_after']
            
        #write_xml(prb,
        #          n_ch = SDparams['nchannels'],
        #          n_samp = SDparams['S_TOTAL'],
        #          n_feat = s_total,
        #          sample_rate = SDparams['sample_rate'],
        #          filepath = basename+'.xml')
    mainxmlfile =  hybdatadict['donor_path'] + hybdatadict['donor']+'_afterprocessing.xml'   
    
    #os.system('ln -s %s %s.clu.1 ' %(trivialclufilename,basefilename))
    os.system('ln -s %s %s.spk.1 ' %(mainspkfile,basefilename))
    os.system('ln -s %s %s.res.1 ' %(mainresfile,basefilename))
    os.system('cp %s %s.xml ' %(mainxmlfile,basefilename))
    
    return basefilename
예제 #7
0
def make_KKfiles_Script_detindep_full(hybdatadict, SDparams,prb, KKparams):
    '''Creates the files required to run KlustaKwik'''
    argSD = [hybdatadict,SDparams,prb]
    if ju.is_cached(rsd.run_spikedetekt,*argSD):
        print 'Yes, SD has been run \n'
        hash_hyb_SD = rsd.run_spikedetekt(hybdatadict,SDparams,prb)
    else:
        print 'You need to run Spikedetekt before attempting to analyse results ' 
   
    KKhash = hash_utils.hash_dictionary_md5(KKparams)
    baselist = [hash_hyb_SD, KKhash]
    KKbasefilename =  hash_utils.make_concatenated_filename(baselist)
    
    mainbasefilename = hash_hyb_SD
    
    DIRPATH = hybdatadict['output_path']
    os.chdir(DIRPATH)
    
    mainresfile = DIRPATH + mainbasefilename + '.res.1' 
    mainspkfile = DIRPATH + mainbasefilename + '.spk.1'        
    trivialclufilename = DIRPATH + mainbasefilename + '.clu.1'
    mainfetfile = DIRPATH + mainbasefilename+'.fet.1'
    mainfmaskfile = DIRPATH + mainbasefilename+'.fmask.1'
    mainmaskfile = DIRPATH + mainbasefilename+'.mask.1'
    
    #arg_spkresdetclu = [expt,res,mainresfile, mainspkfile, detcritclufilename, trivialclufilename]
        #if ju.is_cached(make_spkresdetclu_files,*arg_spkresdetclu):
    if os.path.isfile(mainspkfile):
            print 'miscellaneous files probably already exist, moving on, saving time'
    else:
        with Experiment(hash_hyb_SD, dir= DIRPATH, mode='r') as expt:
            if KKparams['numspikesKK'] is not None: 
                feats = expt.channel_groups[0].spikes.features[0:KKparams['numspikesKK']]
                prefmasks = expt.channel_groups[0].spikes.features_masks[0:KKparams['numspikesKK'],:,1]
                
                premasks = expt.channel_groups[0].spikes.masks[0:KKparams['numspikesKK']]
                res = expt.channel_groups[0].spikes.time_samples[0:KKparams['numspikesKK']]
            else: 
                feats = expt.channel_groups[0].spikes.features[:]
                prefmasks = expt.channel_groups[0].spikes.features_masks[:,:,1]
                #print fmasks[3,:]
                premasks = expt.channel_groups[0].spikes.masks[:]
                res = expt.channel_groups[0].spikes.time_samples[:]    
                
            
            
            #arg_spkresdetclu = [expt,res,mainresfile, mainspkfile, detcritclufilename, trivialclufilename]
            #if ju.is_cached(make_spkresdetclu_files,*arg_spkresdetclu):
            #if os.path.isfile(mainspkfile):
            #    print 'miscellaneous files probably already exist, moving on, saving time'
            #else:
                make_spkresclu_files(expt,res,mainresfile, mainspkfile, trivialclufilename) 
            
            #write_res(res,mainresfile)
            #write_trivial_clu(res,trivialclufilename)
            #write_spk_buffered(expt.channel_groups[0].spikes.waveforms_filtered,
            #                    mainspkfile,
            #                   np.arange(len(res)))
            #write_clu(detcrit_groundtruth['detected_groundtruth'], detcritclufilename)
            
            times = np.expand_dims(res, axis =1)
            masktimezeros = np.zeros_like(times)
            fets = np.concatenate((feats, times),axis = 1)
            fmasks = np.concatenate((prefmasks, masktimezeros),axis = 1)
            masks = np.concatenate((premasks, masktimezeros),axis = 1)
       
        #print fets
        #embed()
        
        if not os.path.isfile(mainfetfile):
            write_fet(fets,mainfetfile )
        else: 
            print mainfetfile, ' already exists, moving on \n '
            
        if not os.path.isfile(mainfmaskfile):
            write_mask(fmasks,mainfmaskfile,fmt='%f')
        else: 
            print mainfmaskfile, ' already exists, moving on \n '  
        
        if not os.path.isfile(mainmaskfile):
            write_mask(masks,mainmaskfile,fmt='%f')
        else: 
            print mainmaskfile, ' already exists, moving on \n '    
        
    
    mainxmlfile =  hybdatadict['donor_path'] + hybdatadict['donor']+'_afterprocessing.xml'   
    os.system('ln -s %s %s.fet.1 ' %(mainfetfile,KKbasefilename))
    os.system('ln -s %s %s.fmask.1 ' %(mainfmaskfile,KKbasefilename))
    os.system('ln -s %s %s.mask.1 ' %(mainmaskfile,KKbasefilename))
    os.system('ln -s %s %s.trivial.clu.1 ' %(trivialclufilename,KKbasefilename))
    os.system('ln -s %s %s.spk.1 ' %(mainspkfile,KKbasefilename))
    os.system('ln -s %s %s.res.1 ' %(mainresfile,KKbasefilename))
    os.system('cp %s %s.xml ' %(mainxmlfile,mainbasefilename))
    os.system('cp %s %s.xml ' %(mainxmlfile,KKbasefilename))
    
    KKscriptname = KKbasefilename
    make_KKscript(KKparams,KKbasefilename,KKscriptname)
    
    return KKbasefilename
예제 #8
0
def make_KKfiles_Script(hybdatadict, SDparams,prb, detectioncrit, KKparams):
    '''Creates the files required to run KlustaKwik'''
    argSD = [hybdatadict,SDparams,prb]
    if ju.is_cached(rsd.run_spikedetekt,*argSD):
        print 'Yes, SD has been run \n'
        hash_hyb_SD = rsd.run_spikedetekt(hybdatadict,SDparams,prb)
    else:
        print 'You need to run Spikedetekt before attempting to analyse results ' 
    
    
    argTD = [hybdatadict, SDparams,prb, detectioncrit]      
    if ju.is_cached(ds.test_detection_algorithm,*argTD):
        print 'Yes, you have run detection_statistics.test_detection_algorithm() \n'
        detcrit_groundtruth = ds.test_detection_algorithm(hybdatadict, SDparams,prb, detectioncrit)
    else:
        print 'You need to run detection_statistics.test_detection_algorithm() \n in order to obtain a groundtruth' 
    
    KKhash = hash_utils.hash_dictionary_md5(KKparams)
    baselist = [hash_hyb_SD, detcrit_groundtruth['detection_hashname'], KKhash]
    basefilename =  hash_utils.make_concatenated_filename(baselist)
    
    mainbasefilelist = [hash_hyb_SD, detcrit_groundtruth['detection_hashname']]
    mainbasefilename = hash_utils.make_concatenated_filename(mainbasefilelist)
    
    DIRPATH = hybdatadict['output_path']
    os.chdir(DIRPATH)
    with Experiment(hash_hyb_SD, dir= DIRPATH, mode='r') as expt:
        if KKparams['numspikesKK'] is not None: 
            fets = expt.channel_groups[0].spikes.features[0:KKparams['numspikesKK']]
            fmasks = expt.channel_groups[0].spikes.features_masks[0:KKparams['numspikesKK'],:,1]
            
            masks = expt.channel_groups[0].spikes.masks[0:KKparams['numspikesKK']]
        else: 
            fets = expt.channel_groups[0].spikes.features[:]
            fmasks = expt.channel_groups[0].spikes.features_masks[:,:,1]
            #print fmasks[3,:]
            masks = expt.channel_groups[0].spikes.masks[:]
    
    mainfetfile = DIRPATH + mainbasefilename+'.fet.1'
    mainfmaskfile = DIRPATH + mainbasefilename+'.fmask.1'
    mainmaskfile = DIRPATH + mainbasefilename+'.mask.1'
    
    if not os.path.isfile(mainfetfile):
        write_fet(fets,mainfetfile )
    else: 
        print mainfetfile, ' already exists, moving on \n '
        
    if not os.path.isfile(mainfmaskfile):
        write_mask(fmasks,mainfmaskfile,fmt='%f')
    else: 
        print mainfmaskfile, ' already exists, moving on \n '  
    
    if not os.path.isfile(mainmaskfile):
        write_mask(masks,mainmaskfile,fmt='%f')
    else: 
        print mainmaskfile, ' already exists, moving on \n '    
        
    
    
    os.system('ln -s %s %s.fet.1 ' %(mainfetfile,basefilename))
    os.system('ln -s %s %s.fmask.1 ' %(mainfmaskfile,basefilename))
    os.system('ln -s %s %s.mask.1 ' %(mainmaskfile,basefilename))
    
    KKscriptname = basefilename
    make_KKscript(KKparams,basefilename,KKscriptname)
    
    return basefilename