def main(): from Behavior.General.ExpDir import ExpDir firstDir = '/mnt/storageNASRe/tph1/Results/09-Feb-2020/TPH_1_ATR_TRAIN_75M_D0.avi_11.05.51' secondDir = '/mnt/storageNASRe/tph1/Results/09-Feb-2020/TPH_1_NO_ATR_TRAIN_75M_D0.avi_11.04.57' exp1 = np.load(ExpDir(firstDir).getExpFile())[0] exp2 = np.load(ExpDir(secondDir).getExpFile())[0] PairWiseRoi('ATR+ (Experiment)', exp1, 'ATR- (Control)', exp2)
def main(filenmame): expDir = ExpDir(expDir=path.dirname(filename)) exp = np.load(filename)[0] tracks = filterTracksForAnalyses(exp._tracks, minSteps=18, minDistance=80) tracks = [t.getTrackDict() for t in tracks] st = SegmentedTracker(expDir.getExpSegVid(), expDir.getVidFile()) st._tracks = tracks st.createTrackedMovie()
def __init__(self, firstExpPath, secExpPath, targetPath=None, cond1=None, cond2=None): self.FIRST_COLOR = "#3C65B7" self.SECOND_COLOR = "#00A99C" self._firstExp = np.load(firstExpPath)[0] self._secondExp = np.load(secExpPath)[0] self._firstExpDir = ExpDir(path.dirname(firstExpPath)) self._secondExpDir = ExpDir(path.dirname(secExpPath)) self._targetDir = path.dirname(firstExpPath) # Try to get the name of the condition from one of the experiment files. if cond1 is None: fn1 = path.basename(self._firstExpDir.getExpSegVid()) self._cond1 = re.search('.+-(.+)\.avi.*', fn1)[1] else: self._cond1 = cond1 if cond2 is None: fn2 = path.basename(self._secondExpDir.getExpSegVid()) self._cond2 = re.search('.+-(.+)\.avi.*', fn2)[1] else: self._cond2 = cond2 #DEBUG!!! #self._cond1 = 'Trained' #self._cond2 = 'Mock Trained' #DEBUG!! if targetPath is None: self._targetPath = path.dirname(firstExpPath) else: self._targetPath = targetPath
def seiveTracks(exp): scale = exp._scale SMALL_PROP = 0.3 BIG_PROP = 1 - SMALL_PROP FRAMES = 4500 ANGLE = np.pi / 3 small_radius = (SMALL_PROP) * exp._scale big_radius = (BIG_PROP) * exp._scale # Getting the tracks. tracks = exp._tracks good_tracks = np.array([]) # horizontal line horiz_line = np.array(exp._regionsOfInterest['startReg']['pos']) - np.array(exp._regionsOfInterest['endReg']['pos']) horiz_line /= np.linalg.norm(horiz_line) # Perp line if horiz_line[0] != 0: perp_line = -np.ones((2,)) perp_line[1] = horiz_line[0] / horiz_line[1] else: perp_line = [1, 0] perp_line /= -np.linalg.norm(perp_line) # New basis new_basis = np.array([perp_line, horiz_line]).T trans = np.linalg.inv(new_basis) new_end_point = np.matmul(trans, exp._regionsOfInterest['endReg']['pos']) newTracks = [] for i, t in enumerate(tracks): # Filter in time. if t._trackCords.shape[0] < 50: continue if t.getMaxDistTravelled() < 75: continue # First, Trimming the track. t = t.trimTrack(FRAMES) # Check if there are any coordinates left. if t == None or t._trackCords.shape[0] == 0: continue # Sieving by position distances = np.linalg.norm(t._trackCords - exp._regionsOfInterest['endReg']['pos'], axis=1) t = subset_track(t, (distances > small_radius)) if t._trackCords.shape[0] == 0: continue distances = np.linalg.norm(t._trackCords - exp._regionsOfInterest['endReg']['pos'], axis=1) t = subset_track(t, (distances < big_radius)) if t._trackCords.shape[0] == 0: continue newCords = np.matmul(trans, t._trackCords.T) newCords = newCords.T y_boundaries = (newCords[:, 1] - np.array(new_end_point[1])) * np.tan(ANGLE) t = subset_track(t, np.abs(newCords[:, 0] - np.array(new_end_point[0])) < y_boundaries) if t._trackCords.shape[0] == 0: continue # Trim it again to create the metrics again. t = t.trimTrack(FRAMES) if t is not None: newTracks.append(t) print('Gone over track:%d' % (i,)) from copy import deepcopy from Behavior.General.ExpDir import ExpDir from os import path import matplotlib.pyplot as plt import seaborn as sns exp._regionsOfInterest['endReg']['pos'] = new_end_point exp._tracks = None exp._cap = None new_exp = deepcopy(exp) new_exp._tracks = np.array(newTracks) new_exp.initialize(ExpDir(path.dirname(new_exp._videoFilename))) #df = tracks_to_df(new_exp) return new_exp
class ExpPair: def __init__(self, firstExpPath, secExpPath, targetPath=None, cond1=None, cond2=None): self.FIRST_COLOR = "#3C65B7" self.SECOND_COLOR = "#00A99C" self._firstExp = np.load(firstExpPath)[0] self._secondExp = np.load(secExpPath)[0] self._firstExpDir = ExpDir(path.dirname(firstExpPath)) self._secondExpDir = ExpDir(path.dirname(secExpPath)) self._targetDir = path.dirname(firstExpPath) # Try to get the name of the condition from one of the experiment files. if cond1 is None: fn1 = path.basename(self._firstExpDir.getExpSegVid()) self._cond1 = re.search('.+-(.+)\.avi.*', fn1)[1] else: self._cond1 = cond1 if cond2 is None: fn2 = path.basename(self._secondExpDir.getExpSegVid()) self._cond2 = re.search('.+-(.+)\.avi.*', fn2)[1] else: self._cond2 = cond2 #DEBUG!!! #self._cond1 = 'Trained' #self._cond2 = 'Mock Trained' #DEBUG!! if targetPath is None: self._targetPath = path.dirname(firstExpPath) else: self._targetPath = targetPath def save(self): fileName = path.join(self._targetDir, 'expsPair.npy') np.save(fileName, self) def alignImage(self, img): # First we roll the image to center the plate. rightBorder = np.min(np.where(img > 0)[1]) leftBorder = img.shape[1] - np.max(np.where(img > 0)[1]) allBorders = rightBorder + leftBorder correctBorder = np.floor(allBorders / 2) return int(correctBorder - rightBorder) def createPairVisualization(self, numberOfFrames=0, dpi=200): firstCap = cv2.VideoCapture(self._firstExpDir.getExpSegVid()) secCap = cv2.VideoCapture(self._secondExpDir.getExpSegVid()) firstMovieLength = int(firstCap.get(cv2.CAP_PROP_FRAME_COUNT)) secondMovieLength = int(secCap.get(cv2.CAP_PROP_FRAME_COUNT)) firstCap.set(cv2.CAP_PROP_POS_FRAMES, 1) secCap.set(cv2.CAP_PROP_POS_FRAMES, 1) # Getting the minimum framecount of the two videos. frameLength = np.min((firstMovieLength, secondMovieLength)) # Getting the requested frame number. frameLength = np.min((numberOfFrames, frameLength)) # Trimming the experiments #self._firstExp.trimExperiment(numberOfFrames) #self._secondExp.trimExperiment(numberOfFrames) # # First, running the ROI analyses firstRoi = RoiAnalysis(self._firstExp) secondRoi = RoiAnalysis(self._secondExp) firstRoi.execute() secondRoi.execute() fig = plt.figure(facecolor='black') plt.style.use('dark_background') ax_vid = fig.add_subplot(2, 1, 1) ax_fig = fig.add_subplot(2, 1, 2) ax_vid.axis('off') im = None vertLine = None # The font for the ImageDraw. fnt = ImageFont.truetype("DejaVuSans-Bold.ttf", 96) firstAlign = 0 secondAlign = 0 def updateMovie(frameNum): fig.sca(ax_vid) nonlocal im nonlocal vertLine nonlocal firstAlign nonlocal secondAlign _, firstFrame = firstCap.read() _, secondFrame = secCap.read() if (frameNum == -1): firstAlign = self.alignImage(firstFrame) secondAlign = self.alignImage(secondFrame) firstFrame = np.roll(firstFrame, +firstAlign, axis=1) secondFrame = np.roll(secondFrame, +secondAlign, axis=1) firstImSeg = Image.fromarray(firstFrame) firstImRawDraw = ImageDraw.Draw(firstImSeg) firstImRawDraw.text((0, 0), self._cond1, font=fnt, fill=ImageColor.getrgb(self.FIRST_COLOR)) countStr = "%d \ %d (%d%%)" % \ (int(firstRoi._results['arrived'][frameNum]), int(firstRoi._results['wormCount']), int((firstRoi._results['arrived'][frameNum] * 100) / firstRoi._results['wormCount'])) firstImRawDraw.text((0, 120), countStr, font=fnt, fill=ImageColor.getrgb(self.FIRST_COLOR)) firstChemoPos = np.fliplr( np.atleast_2d( self._firstExp._regionsOfInterest['endReg']['pos'])) firstChemoPos = np.ravel(firstChemoPos) firstChemoPos[0] += firstAlign firstRad = self._firstExp._regionsOfInterest['endReg']['rad'] secChemoPos = np.fliplr( np.atleast_2d( self._secondExp._regionsOfInterest['endReg']['pos'])) secChemoPos = np.ravel(secChemoPos) secChemoPos[0] += secondAlign secRad = self._firstExp._regionsOfInterest['endReg']['rad'] secondImSeg = Image.fromarray(secondFrame) secondImRawDraw = ImageDraw.Draw(secondImSeg) secondImRawDraw.text((0, 0), self._cond2, font=fnt, fill=ImageColor.getrgb(self.SECOND_COLOR)) countStr = "%d \ %d (%d%%)" % \ (int(secondRoi._results['arrived'][frameNum]), int(secondRoi._results['wormCount']), int((secondRoi._results['arrived'][frameNum] * 100) / secondRoi._results['wormCount'])) secondImRawDraw.text((0, 120), countStr, font=fnt, fill=ImageColor.getrgb(self.SECOND_COLOR)) width = 10 for d in range(width): firstImRawDraw.arc( (firstChemoPos[0] - (firstRad + d), firstChemoPos[1] - (firstRad + d), firstChemoPos[0] + (firstRad + d), firstChemoPos[1] + (firstRad + d)), 0, 360, fill=ImageColor.getrgb(self.FIRST_COLOR)) secondImRawDraw.arc( (secChemoPos[0] - (secRad + d), secChemoPos[1] - (secRad + d), secChemoPos[0] + (secRad + d), secChemoPos[1] + (secRad + d)), 0, 360, fill=ImageColor.getrgb(self.SECOND_COLOR)) firstFrame = np.asarray(firstImSeg) secondFrame = np.asarray(secondImSeg) fullFrame = np.concatenate((firstFrame, secondFrame), axis=1) if (frameNum == -1): im = plt.imshow(fullFrame, aspect='auto') else: im.set_data(fullFrame) if (frameNum == -1): fig.sca(ax_fig) plt.plot(firstRoi._results['arrivedFrac'], label=" %s, %d worms" % (self._cond1, firstRoi._results['wormCount']), color=self.FIRST_COLOR) plt.plot(secondRoi._results['arrivedFrac'], label=" %s, %d worms" % (self._cond2, secondRoi._results['wormCount']), color=self.SECOND_COLOR) plt.xlabel('Frames (2Hz)') plt.ylabel('Worms Arrived') fontP = FontProperties() fontP.set_size('x-small') ax_fig.grid(alpha=0.2) #plt.legend(prop=fontP) #plt.legend() #plt.show() else: if vertLine is not None: vertLine.remove() vertLine = ax_fig.axvline(x=frameNum, ymin=0, ymax=1) print('\rProcessed Frame %d / %d' % (frameNum, frameLength), end="") return (ax_vid, ax_fig) anim = FuncAnimation(fig, updateMovie, frames=range(frameLength - 1), init_func=lambda: updateMovie(-1)) anim.save(path.join(self._targetPath, 'exp_pair_vis.mp4'), fps=70, extra_args=['-vcodec', 'libx264'], dpi=dpi)
def createDemMovie(exp_dir): expDir = ExpDir(exp_dir) artifacts = Artifacts(expLocation=exp_dir) # Getting the frame intensities values frame_intensities = artifacts.getArtifact('frame_intensities') frame_intensities /= np.max(frame_intensities) frame_range = range(1200, 3000) #cap = cv2.VideoCapture(expDir.getVidFile()) cap = cv2.VideoCapture(expDir.getExpSegVid()) cap.set(cv2.CAP_PROP_POS_FRAMES, np.min(frame_range)) frame_intensities = frame_intensities[frame_range] plt.style.use('dark_background') plt.tight_layout() sns.set_context('paper') # Setting a 3 to 1 ratio between video and figure. fig, axs = \ plt.subplots(2, 3, gridspec_kw={'height_ratios':[4, 1], 'width_ratios':[1 ,3, 1]}) ax_vid = axs[0][1] ax_fig = axs[1][1] ax_vid.axis('off') axs[0][0].axis('off') axs[0][2].axis('off') axs[1][0].axis('off') axs[1][2].axis('off') # Global variables im = None vidAlign = 0 vertLine = None def updateMovie(frameNumber): nonlocal ax_vid nonlocal ax_fig nonlocal cap nonlocal im nonlocal vidAlign nonlocal frame_range nonlocal vertLine print('Frame number: %d' % frameNumber) #DEBUG cap.set(cv2.CAP_PROP_POS_FRAMES, frameNumber) #DEBUG # Updating the video fig.sca(ax_vid) _, frame = cap.read() frame = frame[480:1440, 1100:2300] if frameNumber == -1: vidAlign = alignImage(frame) frame = np.roll(frame, vidAlign, axis=1) im = plt.imshow(frame) cap.set(cv2.CAP_PROP_POS_FRAMES, np.min(frame_range)) else: frame = np.roll(frame, vidAlign, axis=1) im.set_data(frame) # Updating the figure fig.sca(ax_fig) if frameNumber == -1: plt.gca().grid(alpha=0.2) plt.plot(np.array(frame_range) * 0.5, frame_intensities, linewidth=1.5, color=sns.xkcd_rgb['windows blue'], alpha=0.75) plt.xlabel('Time [s]') h = plt.ylabel('Power [au]') #h.set_rotation(0) plt.xlim(np.min(frame_range) * 0.5, np.max(frame_range) * 0.5) else: if vertLine is not None: vertLine.remove() vertLine = ax_fig.axvline(x=frameNumber * 0.5, ymin=0, ymax=1) return (ax_vid, ax_fig) anim = FuncAnimation(fig, updateMovie, frames=[1905], init_func=lambda: updateMovie(-1)) anim.save('/home/itskov/Dropbox/DemMovie2.mp4', fps=50, extra_args=['-vcodec', 'libx264'], dpi=250) cap.release()
def demonstrateLightingSegmentation(exp_dir): expDir = ExpDir(exp_dir) artifacts = Artifacts(expLocation=exp_dir) # Getting the light intensity file light_intensities = artifacts.getArtifact('frame_intensities') base_line = np.mean(light_intensities[1:10]) #vid_cap = cv2.VideoCapture(expDir.getVidFile()) vid_cap = cv2.VideoCapture( '/mnt/storageNASRe/tph1/12.03.20/12-Mar-2020-21.16.44-MIC2-TPH_1_ATR_ONLINE[NO_IAA]_5S.avi.mj2' ) seg_cap = cv2.VideoCapture(expDir.getExpSegVid()) spike_count = 0 in_spike = False in_spike_count = 0 full_vid_handle = FFmpegWriter('/home/itskov/Dropbox/dem.mp4', outputdict={'-crf': '0'}) # Reading first frame for shape. _, first_frame = vid_cap.read() _, second_frame = seg_cap.read() # Alignining only the segmented version. seg_frame_align = alignImage(second_frame) start_frame = 700 vid_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) seg_cap.set(cv2.CAP_PROP_POS_FRAMES, start_frame) for i in range(start_frame, 2000): _, vid_frame = vid_cap.read() _, seg_frame = seg_cap.read() vid_frame = cv2.cvtColor(vid_frame, cv2.COLOR_BGR2GRAY) vid_frame = cv2.cvtColor(vid_frame, cv2.COLOR_GRAY2BGR) #vid_frame = np.reshape(vid_frame, (vid_frame.shape)) vid_frame = np.roll(vid_frame, seg_frame_align, axis=1) seg_frame = np.roll(seg_frame, seg_frame_align, axis=1) if light_intensities[i] > (base_line + 7): in_spike = True in_spike_count += 1 if in_spike_count == 6 and spike_count % 2 == 0 and spike_count > 0: splits_num = 80 width = vid_frame.shape[1] segments = [ tuple(s) for s in np.split(np.array(range(width)), splits_num) ] for i in range(2 * splits_num - 1): bar_step = i % (splits_num * 2) if bar_step > splits_num: bar_step = splits_num - (bar_step - splits_num) if bar_step > 0 and bar_step < splits_num: print(bar_step) #print(segments) #print(segments[bar_step:]) first_part = vid_frame[:, np.ravel(segments[bar_step:] ), :] second_part = seg_frame[:, np.ravel(segments[0:bar_step] ), :] new_frame = np.concatenate((second_part, first_part), axis=1) cur_img = Image.fromarray(new_frame) cur_imgdraw = ImageDraw.Draw(cur_img) cur_imgdraw.ellipse((40, 40, 180, 180), fill='red', outline='red') full_vid_handle.writeFrame(np.array(cur_img)) else: # Do Spike stuff here. cur_img = Image.fromarray(vid_frame) cur_imgdraw = ImageDraw.Draw(cur_img) cur_imgdraw.ellipse((40, 40, 180, 180), fill='red', outline='red') full_vid_handle.writeFrame(np.array(cur_img)) else: if in_spike == True: in_spike = False spike_count += 1 in_spike_count = 0 full_vid_handle.writeFrame(vid_frame) print('Frame %d. Spike Count; %d' % (i, spike_count)) vid_cap.release() seg_cap.release() full_vid_handle.close()
for fileName in Path(rootDir).rglob('exp.npy'): try: print(fileName) dirName = path.dirname(fileName) # load experiment. exp = np.load(fileName, allow_pickle=True)[0] print(exp._scale) print(exp._regionsOfInterest) if exp._scale == 1: print('Bad scale. skipping') continue exp.initialize(ExpDir(dirName)) exp.trimExperiment(4500) # Create an artifact folder. art = Artifacts(exp, dirName) # Checking for artifact dirs. art.checkForArtifactsDir() roiAnalyses = RoiAnalysis(exp) projectionAnalyses = ProjectionAnalyses(exp) occupAnalyses = OccupVisualizer(exp) roiAnalyses.execute() projectionAnalyses.execute() occupAnalyses.execute(showPlot=False)