def plotSaveROISignals(dataDir, saveFig=False, mode='Stimulus'): """ Extracts signals from a selected ROI set and creates plots for all ROI signals according to their tags along with the stimulus. Parameters ========== dataDir : str Path containing the ROIs and sima dataset structure saveFig : bool, optional Default: False Whether to save figures or not. mode : str, optional Default: Stimulus Determines what kind of plotting will be done according to the given data type. If stimulus is used then it will plot traces with stimulus, if MarkPoints are used then it will plot with markpoints. Returns ======= """ # Get parent dir t_series_dir = os.path.dirname(dataDir) # Get signal file if exists if not extract try: signalFile_path = os.path.join(dataDir, '*.csv') signalFile = (glob.glob(signalFile_path))[0] print('Signal file found...') except: print('Signal file not found proceeding with extraction from ROIs...') print('File: %s' % dataDir) (signalFile, chNames, usedChannel, roiKeys, usedRoiKey, usedExtLabel) = extractRawSignal(motCorrDir=dataDir) if mode == 'MarkPoints': xmlPath = os.path.join(t_series_dir, '*_MarkPoints.xml') xmlFile = (glob.glob(xmlPath))[0] # Read the file and organize the data frame for plotting # Data comes from the bg subtracted traces and tags comes from the csv # file which has the no bg subtracted traces. ROI_data = pd.read_csv(signalFile, sep='\t', header=2, dtype='float') ROI_data = ROI_data.drop(['Unnamed: 0', 'tags'], axis=1) # Background subtraction by finding the 'bg' tag as background bg_data = ROI_data['bg'] signal_data = ROI_data.subtract(bg_data, axis=0) signal_data = signal_data.drop(['bg'], axis=1) # Get rid of bg # dF/F by mean of traces mean_data = signal_data.mean(axis=0) signal_data = (signal_data - mean_data) / mean_data # Checking the tags of ROIs, while importing a pandas df the column names # which in our case are tags,that have the same name are altered with # a dot '.' and a number -> e.g. Layer1 & Layer1 -> Layer1 & Layer1.1 # Here label the same type of ROIs the same again for convenient indexing signal_data = signal_data.T signal_data.index = [this.split(".")[0] for this in signal_data.index] signal_data = signal_data.T # Finding the unique tags and their occurences for plotting unique_columns, column_occurences = np.unique(signal_data.columns.values, return_counts=True) if mode == 'Stimulus': # Finding stimulus stimOutPath = os.path.join(t_series_dir, '_stimulus_output_*') stimOutFile_path = (glob.glob(stimOutPath))[0] (stimType, rawStimData) = readStimOut(stimOutFile=stimOutFile_path, skipHeader=1) stim_name = stimType.split('\\')[-1] stim_frames = rawStimData[:, 7] # Frame information stim_vals = rawStimData[:, 3] # Stimulus value uniq_frame_id = np.unique(stim_frames, return_index=True)[1] stim_vals = stim_vals[uniq_frame_id] stim_vals = stim_vals[:signal_data.shape[0]] stim_df = pd.DataFrame(stim_vals, columns=['Stimulus'], dtype='float') # Make normalized values of stimulus values for plotting stim_df = (stim_df / np.max(np.unique(stim_vals))) * 5 elif mode == 'MarkPoints': a = 5 # Some color maps for plotting cmaps = OrderedDict() cmaps['Sequential'] = [ 'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds', 'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu', 'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn' ] # Figure size etc. fig = plt.figure(1, figsize=(14, 12), facecolor='w', edgecolor='k') fig.suptitle('%s, stim: %s' % (os.path.basename(t_series_dir), stim_name), fontsize=16) subPlotNumbers = unique_columns.shape[0] nrows = round(float(subPlotNumbers) / float(2)) if nrows == 1: ncols = 1 else: ncols = 2 for iSubplot, column_name in enumerate(unique_columns): # Add linear values 1-2-3 to traces to shift them for visualization add_to_shift_traces = np.linspace(1, column_occurences[iSubplot], column_occurences[iSubplot]) curr_plot_data = signal_data[[column_name]] + add_to_shift_traces iSubplot = iSubplot + 1 ax = fig.add_subplot(nrows, ncols, iSubplot) curr_plot_data.plot(ax=ax, legend=False, colormap=cmaps['Sequential'][iSubplot], alpha=0.8) stim_df.plot(dashes=[6, 2], ax=ax, color='k') plt.title(column_name) plt.ylabel('dF/F') a = int(raw_input("How long you want to inspect this image?")) plt.pause(a) # Save the figure if desired if saveFig: # Saving figure exp_ID = os.path.split(os.path.split(t_series_dir)[0])[1] save_name = 'dF-%s-%s' % (exp_ID, os.path.basename(t_series_dir)) os.chdir(dataDir) plt.savefig('%s.png' % save_name, bbox_inches='tight') print('Figure saved') plt.close(fig) return None
# Finding the xml file and retrieving relevant information t_series_path = os.path.dirname(flyPath) xmlPath = os.path.join(t_series_path, '*-???.xml') xmlFile = (glob.glob(xmlPath))[0] # Finding the frame period (1/FPS) and layer position framePeriod = getFramePeriod(xmlFile=xmlFile) frameRate = 1/framePeriod layerPosition = getLayerPosition(xmlFile=xmlFile) depth = layerPosition[2] # Stimulus output information stimOutPath = os.path.join(t_series_path, '_stimulus_output_*') stimOutFile_path = (glob.glob(stimOutPath))[0] (stimType, rawStimData) = readStimOut(stimOutFile=stimOutFile_path, skipHeader=1) stim_name = stimType.split('\\')[-1] stim_frames = rawStimData[:,7] # Frame information stim_vals = rawStimData[:,3] # Stimulus value uniq_frame_id = np.unique(stim_frames,return_index=True)[1] stim_vals = stim_vals[uniq_frame_id] stim_vals = stim_vals[:frame_num] # Stimulus information (stimInputFile,stimInputData) = readStimInformation(stimType=stimType, stimInputDir=stimInputDir) isRandom = int(stimInputData['Stimulus.randomize'][0])
def dataProcessSave(t_series_path, stimInputDir, saveOutputDir, imageID, current_exp_ID, use_aligned=True, intRate=10): """ Processes the data and saves the necessary variables Parameters ========== t_series_path : str Path of the T-series that includes the motion correction directory along with stimulus output and xml file. stimInputDir : str Path of the folder where stimulus input files are located. These files contain information about all the stimuli used in the experiments. saveOutputDir : str Path of the folder where the data output files will be saved imageID : str The unique ID of the image data to be saved current_exp_ID : str The experiment ID of the image data to be saved use_aligned: bool, optional Default: True Defines if aligned 'motCorr.sima' or non-aligned 'TIFFs.sima' will be used. intRate: int, optional Default: 10 The rate which data will be interpolated. Returns ======= """ if use_aligned: print('Using the aligned sequences for extraction') dataDir = os.path.join(t_series_path, 'motCorr.sima') else: print('Using the non-aligned sequences for extraction') dataDir = os.path.join(t_series_path, 'TIFFs.sima') t_series_name = os.path.basename(t_series_path) # Finding the xml file and retrieving relevant information xmlPath = os.path.join(t_series_path, '*-???.xml') xmlFile = (glob.glob(xmlPath))[0] micRelTimes = getMicRelativeTime(xmlFile) # Finding the frame period (1/FPS) and layer position framePeriod = getFramePeriod(xmlFile=xmlFile) layerPosition = getLayerPosition(xmlFile=xmlFile) # Finding and reading the stimulus output file, extracting relevant info stimOutPath = os.path.join(t_series_path, '_stimulus_output_*') stimOutFile = (glob.glob(stimOutPath))[0] (stimType, rawStimData) = readStimOut(stimOutFile=stimOutFile, skipHeader=1) (stimInputFile, stimInputData) = readStimInformation(stimType=stimType, stimInputDir=stimInputDir) stimName = os.path.basename(stimInputFile) isRandom = int(stimInputData['Stimulus.randomize'][0]) epochDur = stimInputData['Stimulus.duration'] epochDur = [float(sec) for sec in epochDur] # Finding epoch coordinates and number of trials epochCount = getEpochCount(rawStimData=rawStimData, epochColumn=3) (trialCoor, trialCount, isRandom) = divideEpochs(rawStimData=rawStimData, epochCount=epochCount, isRandom=isRandom, framePeriod=framePeriod, trialDiff=0.20, overlappingFrames=0, firstEpochIdx=0, epochColumn=3, imgFrameColumn=7, incNextEpoch=True, checkLastTrialLen=True) # Signal extraction and background subtraction print('Signal extraction...') (signalFile, chNames, usedChannel, roiKeys, usedRoiKey, usedExtLabel) = extractRawSignal(motCorrDir=dataDir) # ROI information, header includes the ROI numbers # tags include the types of ROIs e.g. Layer1 (header, bgIndex, tags) = getROIinformation(signalFile=signalFile, bgLabel=['bg', 0]) (bgSub, rawTrace) = subtractBg(signalFile=signalFile, bgIndex=bgIndex, skipHeader=3) # Calculating dF/F according to the baseline type if isRandom == 1: # There is an epoch used for baseline baselineEpochPresent = True baselineDurationBeforeEpoch = 1.5 # In seconds baseDur = int(baselineDurationBeforeEpoch / framePeriod) # In frames else: # Presumably no epoch present for baseline, taking the mean of trial baselineEpochPresent = False baselineDurationBeforeEpoch = np.nan baseDur = np.nan (dffTraceAllRoi, baselineStdAllRoi, baselineMeanAllRoi) = dff(trialCoor=trialCoor, header=header, bgIndex=bgIndex, bgSub=bgSub, baselineEpochPresent=baselineEpochPresent, baseDur=baseDur) # Trial averaging trialAvgAllRoi = trialAverage(dffTraceAllRoi=dffTraceAllRoi, bgIndex=bgIndex) # Correlation with stimulus (corrHeader, pvalHeader) = corrTrialAvg(trialAvgAllRoi=trialAvgAllRoi, epochDur=epochDur, bgIndex=bgIndex, framePeriod=framePeriod) # Interpolation of responses to a certain frequency print('Interpolating to %d Hz', intRate) interpolationRate = intRate # Interpolation rate in Hz interpolatedAllRoi = interpolateTrialAvgROIs(trialAvgAllRoi=trialAvgAllRoi, framePeriod=framePeriod, intRate=interpolationRate) # locals() needs to be called within the script that # generates the variables to be saved varDict = locals() savePath = saveWorkspace(outDir=saveOutputDir, baseName=imageID, varDict=varDict, varFile='variablesToSave.txt', extension='.pickle')