示例#1
0
    def update_kymograph(self):
        tif = self.window.image
        if tif.ndim != 3:
            g.alert("Can only kymograph a 3d movie")
            return

        xx, yy = self.getMask()
        mt = len(tif)
        if len(xx) == 0:
            return
        xx = np.array(xx)
        yy = np.array(yy)

        if len(xx) == 0:
            return
        mn = np.zeros((mt, len(xx)))
        for t in np.arange(mt):
            mn[t] = tif[t, xx, yy]
        mn = mn.T
        if self.kymograph is None:
            self.createKymograph(mn)
        else:
            self.kymograph.imageview.setImage(mn,
                                              autoLevels=False,
                                              autoRange=False)
 def __call__(self,
              nSteps,
              first_volume,
              nVolumes,
              ratio_type,
              keepSourceWindow=False):
     self.start(keepSourceWindow)
     A = np.copy(self.tif).astype(np.float)
     mt, mx, my = A.shape
     mv = mt // nSteps  # number of volumes
     for i in range(nSteps):
         baseline = A[i + first_volume * nSteps:nVolumes * nSteps:nSteps]
         if ratio_type == 'average':
             baseline = np.average(baseline, 0)
         elif ratio_type == 'standard deviation':
             baseline = np.std(baseline, 0)
         else:
             g.alert(
                 "'{}' is an unknown ratio_type.  Try 'average' or 'standard deviation'"
                 .format(ratio_type))
             return None
         A[i::nSteps] = A[i::nSteps] / baseline
     self.newtif = A
     self.newname = self.oldname + ' - Ratioed by ' + str(ratio_type)
     return self.end()
示例#3
0
    def getTrace(self, bounds=None):
        '''Compute the average of the pixels within this ROI in its window

        Returns:
            Average value within ROI mask, as an array. Cropped to bounds if specified
        '''
        trace = None
        if self.window.image.ndim == 4 or self.window.metadata['is_rgb']:
            g.alert(
                "Plotting trace of RGB movies is not supported. Try splitting the channels."
            )
            return None
        s1, s2 = self.getMask()
        if np.size(s1) == 0 or np.size(s2) == 0:
            trace = np.zeros(self.window.mt)

        elif self.window.image.ndim == 3:
            trace = self.window.image[:, s1, s2]
            while trace.ndim > 1:
                trace = np.average(trace, 1)
        elif self.window.image.ndim == 2:
            trace = self.window.image[s1, s2]
            trace = [np.average(trace)]

        if bounds:
            trace = trace[bounds[0]:bounds[1]]
        return trace
示例#4
0
def refine_pts(pts, blur_window, sigma, amplitude):
    global halt_current_computation
    if blur_window is None:
        g.alert("Before refining points, you must select a 'blurred window'")
        return None, False
    new_pts = []
    old_frame = -1
    for pt in pts:
        new_frame = int(pt[0])
        if old_frame != new_frame:
            old_frame = new_frame
            blur_window.imageview.setCurrentIndex(old_frame)
            qApp.processEvents()
            if halt_current_computation:
                halt_current_computation = False
                return new_pts, False
        width = 9
        mid = int(np.floor(width / 2))
        I, corner = cutout(pt, blur_window.image, width)
        xorigin = mid
        yorigin = mid
        p0 = [xorigin, yorigin, sigma, amplitude]
        fit_bounds = [(0, 9), (0, 9), (0, 4), (0, 1000)]
        p, I_fit, _ = fitGaussian(I, p0, fit_bounds)
        xfit = p[0] + corner[0]
        yfit = p[1] + corner[1]
        #                t,  old x, old y, new_x, new_y, sigma, amplitude
        new_pts.append([pt[0], pt[1], pt[2], xfit, yfit, p[2], p[3]])
    new_pts = np.array(new_pts)
    return new_pts, True
示例#5
0
def makeROI(kind, pts, window=None, color=None, **kargs):
    """Create an ROI object in window with the given points

    Args:
        kind (str): one of ['line', 'rectangle', 'freehand', 'rect_line']
        pts ([N, 2] list of coords): points used to draw the ROI, differs by kind
        window (window.Window): window to draw the ROI in, or currentWindow if not specified
        color (QtGui.QColor): pen color of the new ROI
        **kargs: additional arguments to pass to the ROI __init__ function

    Returns:
        ROI Object extending ROI_Base
    """
    if window is None:
        window = g.win
        if window is None:
            g.alert(
                'ERROR: In order to make and ROI a window needs to be selected'
            )
            return None

    if kind == 'freehand':
        roi = ROI_freehand(window, pts, **kargs)
    elif kind == 'rectangle':
        if len(pts) > 2:
            size = np.ptp(pts, 0)
            top_left = np.min(pts, 0)
        else:
            size = pts[1]
            top_left = pts[0]
        roi = ROI_rectangle(window, top_left, size, **kargs)
    elif kind == 'line':
        roi = ROI_line(window, (pts), **kargs)
    elif kind == 'rect_line':
        roi = ROI_rect_line(window, pts, **kargs)
    elif kind == 'surround':
        if len(pts) > 2:
            size = np.ptp(pts, 0)
            top_left = np.min(pts, 0)
        else:
            size = pts[1]
            top_left = pts[0]
        roi = ROI_surround(window, top_left, size, **kargs)

    else:
        g.alert("ERROR: THIS KIND OF ROI COULD NOT BE FOUND: {}".format(kind))
        return None

    if color is None or not isinstance(color, QtGui.QColor):
        pen = QtGui.QPen(
            QtGui.QColor(g.settings['roi_color'])
            if g.settings['roi_color'] != 'random' else random_color())
    else:
        pen = QtGui.QPen(color)
    pen.setWidth(0)

    roi.drawFinished()
    roi.setPen(pen)
    return roi
示例#6
0
 def getPoints(self):
     if self.binary_window_selector.window is None:
         g.alert(
             'You must select a Binary Window before using it to determine where the points are.'
         )
     else:
         self.txy_pts = get_points(self.binary_window_selector.window.image)
         self.algorithm_gui.showPointsButton.setEnabled(True)
         nPoints = len(self.txy_pts)
         self.algorithm_gui.num_pts_label.setText(str(nPoints))
示例#7
0
    def start(self):
        #select window
        self.currentWin = self.getValue('active_window')
        if self.currentWin == None:
            g.alert('First select window')
            return

        #disconnect previous ROI (if exists)
        try:
            self.currentROI.sigRegionChanged.disconnect()
        except:
            pass

        #disconnect previous time update (if exists)
        try:
            self.currentWin.sigTimeChanged.disconnect(self.update)
        except:
            pass

        try:
            self.ROIwindow.close()
        except:
            pass

        try:
            self.histoWindow.close()
        except:
            pass

        #select current ROI
        self.currentROI = self.currentWin.currentROI

        if self.currentWin.currentROI == None:
            g.alert('First draw an ROI')
            return

        #set updates
        self.currentROI.sigRegionChanged.connect(self.update)
        self.currentWin.sigTimeChanged.connect(self.update)
        self.currentWin.imageview.scene.sigMouseClicked.connect(self.update)

        #start plotting
        self.startPlot()
        self.displayStarted = True

        #get stack data
        self.data = np.array(deepcopy(self.currentWin.image))

        #get histo range from whole stack
        self.startScale = np.min(self.data)
        self.endScale = np.max(self.data)

        self.update()
示例#8
0
def get_text_file(filename=None):
    if filename is None:
        filetypes = '.txt'
        prompt = 'Open File'
        filename = open_file_gui(prompt, filetypes=filetypes)
        if filename is None:
            return None
    else:
        filename = g.settings['filename']
        if filename is None:
            g.alert('No filename selected')
            return None
    print("Filename: {}".format(filename))
    g.m.statusBar().showMessage('Loading {}'.format(
        os.path.basename(filename)))
    return filename
示例#9
0
    def load_classifications(self, filename=None):
        if filename is None:
            filename = open_file_gui("Open classifications",
                                     filetypes='*.json')
        if filename is None:
            return None
        obj_text = codecs.open(filename, 'r', encoding='utf-8').read()
        data = json.loads(obj_text)
        roi_states = np.array(data['states'])

        if len(roi_states) != len(self.window_states):
            g.alert(
                'The number of ROIs in this file does not match the number of ROIs in the image. Cannot import classifications'
            )
        else:
            g.quantimus.roiStates = np.copy(roi_states)
            self.window_states = np.copy(roi_states)
            self.set_roi_states()
示例#10
0
    def getTrace(self, bounds=None):
        if self.window.image.ndim > 3 or self.window.metadata['is_rgb']:
            g.alert(
                "Plotting trace of RGB movies is not supported. Try splitting the channels."
            )
            return None
        if self.window.image.ndim == 3:
            region = self.getArrayRegion(self.window.imageview.image,
                                         self.window.imageview.getImageItem(),
                                         (1, 2))
            while region.ndim > 1:
                region = np.average(region, 1)
        elif self.window.image.ndim == 2:
            region = self.getArrayRegion(self.window.imageview.image,
                                         self.window.imageview.getImageItem(),
                                         (0, 1))
            region = [np.average(region)]

        if bounds:
            region = region[bounds[0]:bounds[1]]
        return region
示例#11
0
    def plotData(self):
        ### plot test result
        result_data = self.result_dict[0]  # just results for 1st trace
        numChunks = int(len(list(result_data.keys())) / 3)
        if numChunks > 40:
            g.alert(
                'More than 40 plots would be generated - aborting plotting')
            return

        for i in range(1, numChunks + 1):
            print('Plotting chunk {}'.format(str(i)))
            plt.figure(i)
            plt.subplot(211)
            plt.plot(result_data['chunk_{}'.format(str(i))])
            plt.xlabel("frame")
            plt.ylabel("DF/F0")
            plt.ylim(ymin=self.minTime, ymax=self.maxTime)

            plt.subplot(212)
            x = result_data['frequency_{}'.format(str(i))]
            y = result_data['power_{}'.format(str(i))]

            plt.scatter(x, y, s=8, c='blue')
            #plt.plot(result_data['frequency_{}'.format(str(i))],result_data['power_{}'.format(str(i))])
            plt.title("FFT analysis - chunk {}".format(str(i)))
            plt.xlabel("frequency")
            plt.ylabel("power")

            #add average line
            #y_mean = [np.mean(y)]*len(x)
            #plt.plot(x,y_mean, label='Mean', color='red')

            #plt.xscale('log')
            plt.yscale('log')
            #plt.xlim(xmin= 0.0001, xmax=self.X_max)
            plt.ylim(ymin=self.Y_min, ymax=self.Y_max)

            plt.show()

        return
示例#12
0
    def update_kymograph(self):
        tif = self.window.image
        if tif.ndim != 3:
            g.alert("Can only kymograph on 3D movies")
            return

        if self.width == 1:
            w, h = self.window.imageDimensions()
            r = QtCore.QRect(0, 0, w, h)
            xx, yy = self.getMask()
            mn = tif[:, xx, yy].T
        else:
            region = self.getArrayRegion(self.window.imageview.image,
                                         self.window.imageview.getImageItem(),
                                         (1, 2))
            mn = np.average(region, 2).T

        if self.kymograph is None:
            self.createKymograph(mn)
        else:
            if mn.size > 0:
                self.kymograph.imageview.setImage(mn,
                                                  autoLevels=False,
                                                  autoRange=False)
示例#13
0
    def openTiff(self, filename):
        ext = os.path.splitext(filename)[-1]

        if ext in ['.tif', '.tiff', 'stk']:
            Tiff = tifffile.TiffFile(str(filename))

            A = Tiff.asarray()
            Tiff.close()

            axes = [tifffile.AXES_LABELS[ax] for ax in Tiff.series[0].axes]

        elif ext == '.czi':
            #import czifile
            czi = CziFile(filename)
            A = czi.asarray()
            czi.close()

            axes = [ax for ax in czi.axes]

            ##remove axes length ==1
            #get shape
            shape = A.shape
            #squeeze array to remove length ==1
            A = A.squeeze()
            #remove axis length 1 labels
            toRemove = []
            for n, i in enumerate(shape):
                if i == 1:
                    toRemove.append(n)

            delete_multiple_element(axes, toRemove)

            #convert labels to tiff format
            for n, i in enumerate(axes):
                if i == 'T':
                    axes[n] = 'time'
                elif i == 'C':
                    axes[n] = 'channel'
                elif i == 'Y':
                    axes[n] = 'height'
                elif i == 'X':
                    axes[n] = 'width'
                elif i == 'Z':
                    axes[n] = 'depth'

        else:
            msg = "Could not open.  Filetype for '{}' not recognized".format(
                filename)
            g.alert(msg)
            if filename in g.settings['recent_files']:
                g.settings['recent_files'].remove(filename)
            # make_recent_menu()
            return

        print(axes)

        if set(axes) == set(['time', 'depth', 'height',
                             'width']):  # single channel, multi-volume
            target_axes = ['time', 'depth', 'width', 'height']
            perm = get_permutation_tuple(axes, target_axes)
            A = np.transpose(A, perm)
            nScans, nFrames, x, y = A.shape

            #interleaved = np.zeros((nScans*nFrames,x,y))
            #
            #z = 0
            #for i in np.arange(nFrames):
            #    for j in np.arange(nScans):
            #        interleaved[z] = A[j%nScans][i]
            #        z = z +1
            #newWindow = Window(interleaved,'Loaded Tiff')

            A = A.reshape(nScans * nFrames, x, y)
            newWindow = Window(A, 'Loaded Tiff')

        elif set(axes) == set(['series', 'height',
                               'width']):  # single channel, single-volume
            target_axes = ['series', 'width', 'height']
            perm = get_permutation_tuple(axes, target_axes)
            A = np.transpose(A, perm)
            nFrames, x, y = A.shape
            A = A.reshape(nFrames, x, y)
            newWindow = Window(A, 'Loaded Tiff')

        elif set(axes) == set(['time', 'height',
                               'width']):  # single channel, single-volume
            target_axes = ['time', 'width', 'height']
            perm = get_permutation_tuple(axes, target_axes)
            A = np.transpose(A, perm)
            nFrames, x, y = A.shape
            A = A.reshape(nFrames, x, y)
            newWindow = Window(A, 'Loaded Tiff')

        elif set(axes) == set(['time', 'depth', 'channel', 'height',
                               'width']):  # multi-channel, multi-volume
            target_axes = ['channel', 'time', 'depth', 'width', 'height']
            perm = get_permutation_tuple(axes, target_axes)
            A = np.transpose(A, perm)
            B = A[0]
            C = A[1]

            n1Scans, n1Frames, x1, y1 = B.shape
            n2Scans, n2Frames, x2, y2 = C.shape

            B = B.reshape(n1Scans * n1Frames, x1, y1)
            C = C.reshape(n2Scans * n2Frames, x2, y2)

            channel_1 = Window(B, 'Channel 1')
            channel_2 = Window(C, 'Channel 2')

            #clear A array to reduce memory use
            A = np.zeros((2, 2))

        elif set(axes) == set(['depth', 'channel', 'height',
                               'width']):  # multi-channel, single volume
            target_axes = ['channel', 'depth', 'width', 'height']
            perm = get_permutation_tuple(axes, target_axes)
            A = np.transpose(A, perm)
            B = A[0]
            C = A[1]

            channel_1 = Window(B, 'Channel 1')
            channel_2 = Window(C, 'Channel 2')

            #clear A array to reduce memory use
            A = np.zeros((2, 2))

        elif set(axes) == set(['time', 'channel', 'height',
                               'width']):  # multi-channel, single volume
            target_axes = ['channel', 'time', 'width', 'height']
            perm = get_permutation_tuple(axes, target_axes)
            A = np.transpose(A, perm)
            B = A[0]
            C = A[1]

            channel_1 = Window(B, 'Channel 1')
            channel_2 = Window(C, 'Channel 2')

            #clear A array to reduce memory use
            A = np.zeros((2, 2))
示例#14
0
    def savetracksCSV(self):
        self.exportIntensity = True
        #Intensity value export option
        if self.blurred_window_selector.window is None:
            g.alert(
                'If you wish to export intensity values (mean for 3x3 pixel around x,y position) first set a Blurred Window. Otherwise just x,y coordinates exported'
            )
            self.exportIntensity = False

        if self.exportIntensity:
            self.dataArray = self.blurred_window_selector.window.imageArray()
            #get intensities
            self.getIntensities()

        tracks = self.points.tracks
        if isinstance(tracks[0][0], np.int64):
            tracks = [[np.asscalar(a) for a in b] for b in tracks]
        txy_pts = self.points.txy_pts.tolist()

        if self.exportIntensity:
            txy_intensities = self.points.intensities

        filename = save_file_gui("Save tracks as CSV", filetypes='*.csv')

        #filter tracks list to only include linked tracks
        linkedTracks = [item for item in tracks if len(item) > 1]

        #get xy coordinates and intensities for linked tracks
        trackNumber = []

        txy_intensitiesByTrack = []

        # for i, indices in enumerate(linkedTracks):
        #     trackNumber.append( i )
        #     txy_ptsByTrack.append(list(txy_pts[j] for j in indices))
        #     #txy_intensitiesByTrack.append(list(txy_intensities[k] for k in indices))

        frameList = []
        xList = []
        yList = []

        for i, indices in enumerate(linkedTracks):
            ptsList = list(txy_pts[j] for j in indices)
            if self.exportIntensity:
                intensitiesList = list(txy_intensities[k] for k in indices)

            for pts in ptsList:
                trackNumber.append(i)
                frameList.append(pts[0])
                xList.append(pts[1])
                yList.append(pts[2])

            if self.exportIntensity:
                for intensity in intensitiesList:
                    txy_intensitiesByTrack.append(intensity)

        #make dataframe of tracks, xy coordianates and intensities for linked tracks

        if self.exportIntensity:
            dict = {
                'track_number': trackNumber,
                'frame': frameList,
                'x': xList,
                'y': yList,
                'intensities': txy_intensitiesByTrack
            }

        else:
            dict = {
                'track_number': trackNumber,
                'frame': frameList,
                'x': xList,
                'y': yList
            }

        self.linkedtrack_DF = pd.DataFrame(dict)

        #save df as csv
        self.linkedtrack_DF.to_csv(filename, index=False)

        print('CSV file {} saved'.format(filename))
示例#15
0
 def save(self):
     self.nosePointsArray
     g.alert(
         """Save not implemented. To access the data, in a console use the variables \n\ng.rodentTracker.nosePointsArray\ng.rodentTracker.headVectors """
     )