Ejemplo n.º 1
0
def plot_image(fig, ax, imageId, img_key, selected_channels=None):
    '''
    Plot get_images(imageId)[image_key] on axis/fig
    Optional: select which channels of the image are used (used for sixteen_band/ images)
    Parameters
    ----------
    img_key : str, {'3', 'P', 'N', 'A'}
        See get_images for description.
    '''
    images = get_images(imageId, img_key)
    img = images[img_key]
    title_suffix = ''
    if selected_channels is not None:
        img = img[selected_channels]
        title_suffix = ' (' + ','.join([repr(i)
                                        for i in selected_channels]) + ')'
    if len(img.shape) == 2:
        new_img = np.zeros((3, img.shape[0], img.shape[1]))
        new_img[0] = img
        new_img[1] = img
        new_img[2] = img
        img = new_img

    tiff.imshow(img, figure=fig, subplot=ax)
    ax.set_title(imageId + ' - ' + img_key + title_suffix)
    ax.set_xlabel(img.shape[-2])
    ax.set_ylabel(img.shape[-1])
    ax.set_xticks([])
    ax.set_yticks([])
Ejemplo n.º 2
0
def main(argv=None):
    """Command line usage main function."""
    if argv is None:
        argv = sys.argv
    if len(argv) < 2:
        filename = askopenfilename(title='Select a CZI file',
                                   multiple=False,
                                   filetypes=[('CZI files', '*.czi')])
    else:
        filename = argv[1]
    if not filename:
        return

    timer = Timer()
    with CziFile(filename) as czi:
        timer.stop()
        print(czi)
        print()
        timer.print('Opening file: ')
        timer.start('Reading image:')
        data = czi.asarray()
        timer.print()

    from matplotlib import pyplot  # NOQA: delay import

    imshow(data, title=os.path.split(filename)[-1])
    pyplot.show()
Ejemplo n.º 3
0
 def StackInt(self, start, stop, step=0.2):
     no = int((stop - start) / step) + 1
     pos = start
     xs = self.ccd.image_size[0]
     ys = self.ccd.image_size[1]
     self.data = N.zeros((no, xs, ys), dtype=N.uint16)
     self.ccd.SetShutterMode(1)
     for p in range(no):
         self.zst.setPositionf(pos)
         q = self.ccd.Acquire()
         q = self.ccd.WaitForNewData()
         q = self.ccd.AbortAcquisition()
         self.data[p] = self.ccd.images
         pos += step
     self.ccd.AbortAcquisition()
     self.ccd.SetShutterMode(2)
     cur_pos = self.prior.getPosition()
     self.stackTags(cur_pos[0],
                    cur_pos[1],
                    start,
                    stop,
                    step,
                    function='Z-Stack')
     T.imshow(self.data, vmin=self.data.min(), vmax=self.data.max())
     return True
Ejemplo n.º 4
0
 def singleSnapExt(self, verbose=True):
     pos = self.zst.getPosition()
     xs = self.ccd.image_size[0]
     ys = self.ccd.image_size[1]
     self.data = N.zeros((xs, ys), dtype=N.uint16)
     self.ccd.SetShutterMode(1)
     self.ccd.Acquire()
     time.sleep(0.2)  # was 0.05
     daq.CCDTrig_run(self.handleA, self.handleB)
     self.ccd.WaitForNewData()
     self.data[:, :] = self.ccd.images
     self.ccd.AbortAcquisition()
     self.ccd.SetShutterMode(2)
     if verbose:
         T.imshow(self.data, vmin=self.data.min(), vmax=self.data.max())
     cur_pos = self.prior.getPosition()
     xx = cur_pos[0]
     yy = cur_pos[1]
     self.stackTags(xx,
                    yy,
                    z1=pos,
                    z2=pos,
                    zs=0.,
                    function='Single snap',
                    ps=89)
     return True
Ejemplo n.º 5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('in_path', help='input tiff file or directory')
    parser.add_argument('min_size',
                        help='minimum w*h*d',
                        nargs='?',
                        default=0,
                        type=int)
    args = parser.parse_args()

    files = []
    if os.path.isdir(args.in_path):
        for r, _, fnames in os.walk(args.in_path):
            for fname in fnames:
                files.append((r + '/' + fname))
    elif os.path.isfile(args.in_path):
        files.append(args.in_path)

    print('Found {} file{}'.format(len(files), '' if len(files) == 1 else 's'))
    for f in files:
        img = tifffile.imread(f)
        d, h, w = img.shape
        if d * h * w >= args.min_size:
            print(f, w, h, d)
            tifffile.imshow(img, title=f)
            # tifffile.imshow(img,title=f,cmap=plt.cm.magma)
            plt.show()
Ejemplo n.º 6
0
def VecZernDecomp(bpp, nx, radius, verbose=False, phase=True):
    ''' the output is the amplitude of the different Zernike components in radians
        where the Zernikes are normalized to an RMS amplitude of 1 '''
    #    if p==None:
    #        nx = bpp.shape[0]
    #        radius = nx/2
    #    else:
    #        nx = p.Nx#params['Nx']
    #        dx = p.dx#params['dx']
    #        wl = p.wl#params['wl']
    #        nap = p.na #params['na']
    #        n2 = p.n2# params['n2']
    #        dp = 1/(nx*dx)
    #        radius = (2*nap/wl)/2/dp
    #        factor = nx/2./radius
    #########################
    if phase:
        phi = bpp
    else:
        phi = N.angle(bpp)
    dx, dy = diff(phi, radius)
    if verbose:
        T.imshow(dx, vmax=dx.max(), vmin=dx.min())
        T.imshow(dy, vmax=dx.max(), vmin=dx.min())
    scoeff = [0]
    for j in range(1, Nzern):
        sx, sy = getS(j)
        t = (sx(nx, nx, radius) * dx - sy(nx, nx, radius) * dy).sum() / (
            N.pi * (radius)**2)  #changed to minus
        if verbose: print(j, t)
        scoeff.append(t)
    return N.array(getZc(scoeff))
def make_film(images):
    images = [normalise(i[:, 125:275, 125:275]) for i in images]

    print([i.max() for i in images])
    print([i.shape for i in images])
    film_data1 = np.concatenate(images[0:2], axis=2)
    print(film_data1.shape)
    film_data2 = np.concatenate(images[2:], axis=2)
    print(film_data2.shape)

    film_data = np.concatenate([film_data1, film_data2], axis=1)
    print(film_data.shape)

    film_data[film_data < 0] = 0
    print(film_data.shape)
    outpath = os.path.join(outdir, 'movie.gif')
    imageio.mimsave(outpath, film_data)
    quit()
    print(outpath)
    for frame in range(15, film_data.shape[0]):
        still_frame = film_data[frame]
        imshow(still_frame)
        plt.show()
        outpath = os.path.join(outdir, f'movie_still_{frame}.png')
        Image.fromarray(still_frame * 255).convert("L").save(outpath)

    print(outpath)
    quit()
Ejemplo n.º 8
0
def view(args):
    import tifffile as tf
    import matplotlib.pyplot as plt

    if args.file.endswith(".tif") or args.file.endswith(".tiff"):
        print(f"loading {args.file}")
        im = tf.imread(args.file)

    elif os.path.isdir(args.file):
        from glob import glob

        im0 = glob(
            os.path.join(args.file, "**",
                         "*{:04d}*Pos0*.tif".format(args.t)))[0]
        print("loading timepoint {}, pos {}".format(args.t, args.p))
        im = tf.imread(im0, series=args.p)

    if args.max:
        tf.imshow(
            im.max(0),
            vmax=im.max() * args.contrast,
            cmap="gray",
            photometric="minisblack",
        )
    else:
        tf.imshow(im,
                  vmax=im.max() * args.contrast,
                  cmap="gray",
                  photometric="minisblack")
    plt.show()
Ejemplo n.º 9
0
def plot_image(fig, ax, imageId, img_key, selected_channels=None):
    '''
    Plot get_images(imageId)[image_key] on axis/fig
    Optional: select which channels of the image are used (used for sixteen_band/ images)
    Parameters
    ----------
    img_key : str, {'3', 'P', 'N', 'A'}
        See get_images for description.
    '''
    images = get_images(imageId, img_key)
    img = images[img_key]
    title_suffix = ''
    if selected_channels is not None:
        img = img[selected_channels]
        title_suffix = ' (' + ','.join([ repr(i) for i in selected_channels ]) + ')'
    if len(img.shape) == 2:
        new_img = np.zeros((3, img.shape[0], img.shape[1]))
        new_img[0] = img
        new_img[1] = img
        new_img[2] = img
        img = new_img
    
    tiff.imshow(img, figure=fig, subplot=ax)
    ax.set_title(imageId + ' - ' + img_key + title_suffix)
    ax.set_xlabel(img.shape[-2])
    ax.set_ylabel(img.shape[-1])
    ax.set_xticks([])
    ax.set_yticks([])
Ejemplo n.º 10
0
 def TimeLapseExt(self, no=200, pol=0, verbose=True):
     pos = self.zst.getPosition()
     xs = self.ccd.image_size[0]
     ys = self.ccd.image_size[1]
     self.data = N.zeros((no, xs, ys), dtype=N.uint16)
     self.ccd.SetShutterMode(1)
     q = self.ccd.Acquire()
     time.sleep(0.01)
     self.zst.setPositionf(pos)
     self.pol.MoveAbs(pol)
     for p in range(no):
         self.zst.setPositionf(pos)
         time.sleep(0.01)
         daq.CCDTrig_run(self.handleA, self.handleB)
         q = self.ccd.WaitForNewData()
         print(p, q)
         self.data[p] = self.ccd.images
         time.sleep(0.01)
     self.ccd.AbortAcquisition()
     self.ccd.SetShutterMode(2)
     if verbose:
         T.imshow(self.data, vmin=self.data.min(), vmax=self.data.max())
     cur_pos = self.prior.getPosition()
     xx = cur_pos[0]
     yy = cur_pos[1]
     self.stackTags(xx,
                    yy,
                    pos,
                    pos,
                    zs=0.,
                    function='Time-Lapse widefield',
                    ps=89)
     return True
Ejemplo n.º 11
0
def lastimage(n):
    hdr = db[-n]
    for doc in hdr.documents(fill=True):
        data1 = doc[1].get('data')
        if data1 != None:
            light_img = data1['pe1c_image']

    dark_uid = hdr.start.get('sc_dk_field_uid')
    dk_hdrs = db(uid=dark_uid)
    for dk_hdr in dk_hdrs:
        for doc in dk_hdr.documents(fill=True):
            dk_data1 = doc[1].get('data')
            if dk_data1 != None:
                dk_img = dk_data1['pe1c_image']

    I = light_img - dk_img
    imshow(I, vmax=(I.sum() / (2048 * 2048)), cmap='jet')
    imsave(
        "/nsls2/xf28id1/xpdacq_data/user_data/tiff_base/" + "dark_sub_image" +
        ".tiff", light_img - dk_img)
    imsave(
        "/nsls2/xf28id1/xpdacq_data/user_data/tiff_base/" + "dark_image" +
        ".tiff", dk_img)
    imsave(
        "/nsls2/xf28id1/xpdacq_data/user_data/tiff_base/" + "light_image" +
        ".tiff", light_img)
Ejemplo n.º 12
0
 def show(self):
     if self.img is not None:
         tiff.imshow(self.img)
         plt.show()
     if self.RGB_mask is not None:
         tiff.imshow(self.RGB_mask)
         plt.show()
Ejemplo n.º 13
0
def noise_movie(frame_filter, width_filter, height_filter, is_plot=False):
    """
    creating a numpy array with shape [len(frame_filter), len(height_filter), len(width_filter)]

    this array is random noise filtered by these three filters in Fourier domain
    each pixel of the movie have the value in [0. - 1.]
    """

    raw_mov = np.random.rand(len(frame_filter), len(height_filter), len(width_filter))

    raw_mov_fft = np.fft.fftn(raw_mov)

    filter_x = np.repeat(np.array([width_filter]), len(height_filter), axis=0)
    filter_y = np.repeat(np.transpose(np.array([height_filter])), len(width_filter), axis=1)

    filter_xy = filter_x * filter_y

    for i in xrange(raw_mov_fft.shape[0]):
        raw_mov_fft[i] = frame_filter[i] * (raw_mov_fft[i] * filter_xy)

    filtered_mov = np.real(np.fft.ifftn(raw_mov_fft))

    movie = bas.array_nor(filtered_mov)

    if is_plot:
        tf.imshow(movie, vmin=0, vmax=1, cmap='gray')

    return movie
def P(image_id):

    filename = os.path.join(inDir, 'sixteen_band', '{}_P.tif'.format(image_id))
    img = tiff.imread(filename)
    img = np.rollaxis(img, 0, 3)
    tiff.imshow(stretch_n(pan.reshape(pan.shape[0], pan.shape[1], 1)))
    return img
Ejemplo n.º 15
0
def orthomat(jm):
    q = F.zeroArrF((jm, jm))
    for m in range(1, jm):
        for n in range(1, jm):
            q[m, n] = testortho(m, n)
    T.imshow(q)
    return q
Ejemplo n.º 16
0
 def StackExt(self,start,stop,step=0.2,verbose=True):
     init_loc=self.zst.getPosition()
     no = int((stop-start)/step)+1
     pos = start
     xs = self.ccd.image_size[0]
     ys = self.ccd.image_size[1]
     self.data = N.zeros((no,xs,ys), N.uint16)
     self.ccd.SetShutterMode(1)
     q = self.ccd.Acquire()
     time.sleep(0.2) # was 0.05
     for p in range(no):
         self.zst.setPositionf(pos)
         daq.CCDTrig_run(self.handleA,self.handleB)
         q = self.ccd.WaitForNewData()
         print(p,q)
         self.data[p] = self.ccd.images
         pos += step
         time.sleep(self.delay)
     self.ccd.AbortAcquisition()
     self.ccd.SetShutterMode(2)
     if verbose:
         T.imshow(self.data, vmin=self.data.min(), vmax=self.data.max())
     cur_pos = self.prior.getPosition()
     self.stackTags(cur_pos[0],cur_pos[1],start,stop,step,function='Z-Stack')
     self.zst.setPositionf(init_loc)
     return True
Ejemplo n.º 17
0
    def Stack_Sectioning(self,start,stop,step=0.2, verbose=True):
        no = int((stop-start)/step)+1
        pos = start
        xs = self.ccd.image_size[0]
        ys = self.ccd.image_size[1]
#        psz = qx.getordernum()
        psz = 3
        self.data = N.zeros((psz*no,xs,ys), dtype=N.uint16)
        self.ccd.SetShutterMode(1)
        q = self.ccd.Acquire()
        self.pol.MoveAbs(0)
        time.sleep(0.4)
        for p in range(no):
            self.zst.setPositionf(pos)
            for m in range(psz):
                qx.selecteorder(15+m)
                qx.activate()
                time.sleep(0.02)
                daq.CCDTrig_run(self.handleA,self.handleB)
                q = self.ccd.WaitForNewData()
                print (p,q)
                self.data[psz*p + m] = self.ccd.images
                qx.deactivate()
                time.sleep(0.02)
            pos += step
        self.ccd.AbortAcquisition()
        self.ccd.SetShutterMode(2)
        if verbose:
            T.imshow(self.data, vmin=self.data.min(), vmax=self.data.max())
        cur_pos = self.zst.getPosition()
        self.stackTags(cur_pos,start,stop,step,function='Z-Stack patterns')
        return True
def show_image(im, ms, number, name=""):
    """
    Outputs a plot with multiple subplots showing the original crop in RGB and the masks for all 10 classes.
    """
    image = np.zeros((im.shape[0], im.shape[1], 3))
    image[:, :, 0] = im[:, :, 0]  # red
    image[:, :, 1] = im[:, :, 1]  # green
    image[:, :, 2] = im[:, :, 2]  # blue
    classes = ["Buildings", "Misc. structures", "Road", "Track", "Trees", "Crops", "Waterway",
               "Standing Water", "Vehicle Large", "Vehicle Small"]
    f, (axarr) = plt.subplots(3, 4, sharey=True, figsize=(20,15))
    counter = 0
    for j in range(3):
        for k in range(4):
            if (j==0) & (k==0):
                tiff.imshow(image, figure=f, subplot=axarr[0,0])
                plt.grid("off")
                plt.title("Raw Image", size=22)
                continue
            elif (j==2) & (k==3):
                pass
            else:
                msk = ms[:,:,counter]
                tiff.imshow(255*np.stack([msk,msk,msk]), figure=f, subplot=axarr[j,k])
                plt.grid("off")
                # plt.title(name, size=22)
                plt.title("{} Mask".format(classes[counter]), size=22)
                counter += 1
    plt.grid("off")
    os.makedirs("../plots", exist_ok=True)
    plt.savefig("../plots/Crop_{}_{}.png".format(number, name), bbox_inches="tight", pad_inches=1)
    plt.clf()
    plt.cla()
    plt.close()
Ejemplo n.º 19
0
def show_raster(raster):
    import tifffile as tiff
    from matplotlib import pyplot as plt

    fig, ax = plt.subplots(figsize=(8, 8))

    tiff.imshow(raster, figure=fig, subplot=ax)
    plt.show()
Ejemplo n.º 20
0
 def compare_masks(self):
     if self.predicted_RGB_mask is not None:
         tiff.imshow(self.predicted_RGB_mask)
         plt.show()
         tiff.imshow(self.RGB_mask)
         plt.show()
     else:
         print('There is not predicted mask!')
Ejemplo n.º 21
0
 def show_grid(self, part='delta'):
     import tifffile
     if part == 'delta':
         tifffile.imshow(self.grid_delta)
     elif part == 'beta':
         tifffile.imshow(self.grid_beta)
     else:
         warnings.warn('Wrong part specified for show_grid.')
Ejemplo n.º 22
0
 def averageAll(self,verbose=True):
     nz,ny,nx = self.tmp.shape
     avg = N.zeros((ny,nx),dtype=N.float64)
     for i in range(nz):
         avg+=(self.tmp[i]/nz)
     if verbose:
         T.imshow(avg)
     self.avg = avg
Ejemplo n.º 23
0
def show_image_with_mask(imageId, maskAlpha=0.4, figsize=(10, 10)):
    fig, ax = plt.subplots(figsize=figsize)

    img = stretch_n(M(imageId))
    mask = generate_mask_for_image_and_class(img.shape[:2], imageId)

    tifffile.imshow(img, figure=fig, subplot=ax)
    ax.imshow(mask, alpha=maskAlpha)
    ax.axis("off")
    plt.show()
Ejemplo n.º 24
0
def testortho(j1, j2, verbose=False):
    nx = 256
    ny = 256
    rad = 100
    sx1, sy1 = getS(j1)
    sx2, sy2 = getS(j2)
    dp = (sx1(nx, ny, rad) * sx2(nx, ny, rad) +
          sy1(nx, ny, rad) * sy2(nx, ny, rad))
    if verbose: T.imshow(dp)
    return (dp.sum() / (N.pi * rad**2))  #(nx*ny))
Ejemplo n.º 25
0
def imshowpair(im1, im2, method=None, mip=False, **kwargs):
    # normalize
    if not im1.shape == im2.shape:
        raise ValueError("images must be same shape")

    if not mip:
        try:
            from tifffile import imshow
        except ImportError:
            imshow = plt.imshow
            mip = True
    else:
        imshow = plt.imshow
        if im1.ndim < 3:
            mip = False

    if method == "diff":
        imshow(imoverlay(im1, im2, "diff"), cmap="gray", vmin=0.2, vmax=0.8)
    elif method == "3D":
        im3 = imoverlay(im1, im2)
        fig, subpl, ax = imshow(im3, subplot=221)
        imshow(np.rot90(im3.max(1)), figure=fig, subplot=222)
        imshow(im3.max(2), figure=fig, subplot=223)
    else:  # falsecolor
        imshow(imoverlay(im1, im2))
    plt.show()
Ejemplo n.º 26
0
def show_image(imageId, fig=None, ax=None):
    do_show = False
    if fig is None or ax is None:
        fig, ax = plt.subplots(figsize=(10, 10))
        do_show = True
    img = M(imageId)
    img_color_stretched = stretch_n(img)
    tifffile.imshow(img_color_stretched, figure=fig, subplot=ax)
    ax.axis("off")
    if do_show:
        plt.show()
Ejemplo n.º 27
0
def show_image_channel(imageId, channel="M", fig=None, ax=None):
    do_show = False
    if fig is None or ax is None:
        fig, ax = plt.subplots(figsize=(10, 10))
        do_show = True
    img = M_sixteen_band(imageId, channel=channel)
    img_color_stretched = stretch_n(img)
    tifffile.imshow(img_color_stretched, figure=fig, subplot=ax)
    ax.axis("off")
    if do_show:
        plt.show()
Ejemplo n.º 28
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('in_path',help='input tiff file or directory')
    parser.add_argument('min_size',help='minimum w*h*d',nargs='?',default=0,type=int)
    args = parser.parse_args()

    files = []
    if os.path.isdir(args.in_path):
        for r,_,fnames in os.walk(args.in_path):
            for fname in fnames:
                files.append((r+'/'+fname))
    elif os.path.isfile(args.in_path):
        files.append(args.in_path)

    resol,stepsize,C = 256,64,0

    print('Found {} file{}'.format(len(files),'' if len(files)==1 else 's'))
    for f in files:

        img = tf.imread(f)
        d,h,w = img.shape
        print(f,w,h,d,args.min_size,img.nbytes//1e6)
        img = tf.transpose_axes(img, 'YXZ', 'XYZ')
        d,h,w = img.shape
        aux = 1
        cmap = matplotlib.cm.get_cmap('inferno',stepsize)
        bounds = list(range(0,resol, resol//stepsize))
        bounds.append(resol)
        norm = matplotlib.colors.BoundaryNorm(bounds, cmap.N)

        if C:
            npz = np.arange(resol).astype('uint8')
            z11 = np.array([3.17108944e-04, 9.92360110e-01, 1.61116136e+00])
            for i in range(len(npz)):
                aux = round(z11[0]*npz[i]*npz[i]+z11[1]*npz[i]+z11[2])
                if aux < 256 and aux > 0:
                    npz[i] = int(aux)
                elif aux > 255:
                    npz[i] = 255
                else:
                    npz[i] = 0

            with np.nditer(img, flags=['external_loop'], op_flags=['readwrite']) as it:
                for x in it:
                    x[...] = npz[x]

        print(f,w,h,d,args.min_size)

        if d*h*w>=args.min_size:
            tf.imshow(img,cmap = cmap, norm = norm, title=f,origin='lower')
            # tf.imshow(img,title=f,cmap=plt.cm.magma)
            plt.show()

        C = 1
Ejemplo n.º 29
0
def process_challenge_data(imname, image):
    image = normalise(image)
    image = np.fliplr(np.flipud(image))

    # lateral crop
    image = image[:, 250:400, 125:275]
    # sum along y
    image = image.mean(axis=1)
    image = image.astype(np.float32)
    imshow(image)
    plt.show()
    return image
Ejemplo n.º 30
0
def transform_img(impath, im_type):
    img = imread(impath)
    img = img / img.max()
    img = img[:, :, :]
    img = img[40:110, 240, 180:320].transpose()

    imshow(img)
    plt.title(im_type)
    plt.show()

    outname = os.path.join(outdir, f"{imname.replace('out.tif' ,'')}{im_type}_yz.png")
    Image.fromarray(img * 255).convert("L").save(outname)
    print(outname)
Ejemplo n.º 31
0
def plot_mask(mask_data, figure=None, subplot=111, title=None):
    """Adopted from https://www.kaggle.com/lopuhin/dstl-satellite-imagery-feature-detection/full-pipeline-demo-poly-pixels-ml-poly"""
    import matplotlib.pyplot as plt
    import tifffile as tiff

    mask_plot_data = 255 * np.stack([mask_data, mask_data, mask_data])

    tiff.imshow(mask_plot_data, figure=figure, subplot=subplot)

    if title is not None:
        plt.title(title)

    plt.show()
Ejemplo n.º 32
0
    def show_image(self, name):
        """Show image on image view

        :param name: image file path
        :type name: str
        """
        # size = self.imageView.get_allocation()
        # pixbuf = GdkPixbuf.Pixbuf.new_from_file_at_size(name, size.width, size.height)
        # self.imageView.set_from_pixbuf(pixbuf)
        old_viewport = self.imageScrolled.get_child()
        if old_viewport:
            old_viewport.destroy()
        old_viewport = self.imageBox.get_child()
        if old_viewport:
            old_viewport.destroy()
        with TiffFile(name) as img:
            fig = imshow(img.asarray())[0]
            canvas = FigureCanvas(fig)
            self.imageScrolled.add_with_viewport(canvas)
            toolbar = NavigationToolbar(canvas, self.win)
            self.imageBox.add_with_viewport(toolbar)
            pyplot.close(fig)

            self.shape = img.asarray().shape
            self.xmax.set_range(self.xmin.get_value_as_int() + 1, self.shape[0])
            self.ymax.set_range(self.ymin.get_value_as_int() + 1, self.shape[1])

        self.imageScrolled.show_all()
Ejemplo n.º 33
0
def run(config, show_output = False):
    """
    Runs the simulation using the given config.
    For more information on the available parameters, see configspecs.ini and the readme

    Args:
        config: dictionary
            the configuration dict outputed by the configobj library
        show_output: boolean
            if True, shows the output in a new window
    """
    gt_params = config['groundtruth']
    volume_dim = gt_params['bounds']
    voxel_dim = gt_params['voxel_dim']
    #Remove voxel dim so that we can pass gt_params to the load_gt function
    del gt_params['voxel_dim']

    print "Loading data..."
    gt_dataset = load_gt(**gt_params)

    print "Labeling..."
    labeling_params = config['labeling']
    labeled_volumes, labeled_cells = label(gt_dataset, volume_dim, voxel_dim, labeling_params)

    print "Imaging..."
    expansion_params = config['expansion']
    optics_params = config['optics']
    volumes = resolve(labeled_volumes, volume_dim, voxel_dim, expansion_params, optics_params)
    print "Saving..."
    #Save to desired output
    output_params = config['output']
    save(volumes, **output_params)
    save_gt(gt_dataset, labeled_cells, volume_dim, volumes[0].shape, voxel_dim,\
            expansion_params, optics_params, **output_params)
    print "Done!"
    if show_output:
        imshow(np.moveaxis(np.array(volumes), 0, 3))
        plt.show()
Ejemplo n.º 34
0
import pandas.io.sql as psql
import pandas.io.parsers as pp
import matplotlib.image as mpimg
import tifffile as tiff
import scipy.signal as signal
from matplotlib.colors import LogNorm


inPath = '/Users/mpopovic/Documents/Work/Projects/drosophila_wing_analysis/height_maps/'
inFile = 'HeightMa.png'
inFile = 'HM_Stitch_Time_196.tif'

a = tiff.imread(inPath+inFile)


tiff.imshow(a)
plt.show()

gx, gy = np.gradient(a)
gg = np.sqrt(gx**2+gy**2)
gx_cut = gx
gx.max()
bin = np.arange(18)
hist, bins = np.histogram(gx, bin)

tiff.imshow(smooth_30_gx[500:2000,500:1500], cmap='hot')
tiff.imshow(np.exp(smooth_20_gx), cmap='gist_rainbow')
tiff.imshow(np.exp(smooth_20_gy), cmap='gist_rainbow')
tiff.imshow(a, cmap='gist_rainbow')
plt.show()
N_EPOCHS = 100
BATCH_SIZE = 100
# ask Keras to save best weights (in terms of validation loss) into file:
model_checkpoint = ModelCheckpoint(filepath='weights_simple_unet_2.hdf5', monitor='val_loss', save_best_only=True)
# ask Keras to log each epoch loss:
csv_logger = CSVLogger('log_2.csv', append=True, separator=';')
# ask Keras to log info in TensorBoard format:, but right now we dont need to check the TF graph
#tensorboard = TensorBoard(log_dir='tensorboard_simple_unet/', write_graph=True, write_images=True)
# Fit:
np.random.seed(1)
model.fit(x_aug_input, y_aug_input, batch_size=BATCH_SIZE, epochs=N_EPOCHS,
          verbose=2, shuffle=True,
          callbacks=[model_checkpoint, csv_logger],
          validation_data=(x_val_aug_input, y_val_aug_input))


# In[ ]:

test_img_normalized = normalize(test_img)
test_img_t = test_img_normalized.transpose([1,2,0])  # keras uses last dimension for channels by default
predicted_mask = predict(test_img_t, model).transpose([2,0,1])  # channels first to plot
y_pict_2 = picture_from_mask(predicted_mask, threshold = 0.5)
tiff.imshow(y_pict_2)


# In[ ]:

tiff.imsave('predicted_mask.tif', (255*predicted_mask).astype('uint8'))
tiff.imsave('y_pict_2.tif', y_pict_2)