Beispiel #1
0
 def __call__(self, chunk, seg=None):
     """view chunk using cloudvolume view"""
     # cloudvolume use fortran order
     chunk = np.transpose(chunk)
     if seg:
         seg = np.transpose(seg)
         hyperview(chunk, seg)
     elif np.issubdtype(chunk.dtype,
                        np.floating) or chunk.dtype == np.uint8:
         # this is an image
         view(chunk)
     else:
         view(chunk, segmentation=True)
Beispiel #2
0
def view(filename):
    """Visualize a .swc or .npy file."""
    basename, ext = os.path.splitext(filename)

    if ext == ".swc":
        with open(filename, "rt") as swc:
            skel = Skeleton.from_swc(swc.read())

        skel.viewer()
    elif ext == ".npy":
        labels = np.load(filename)
        cloudvolume.view(labels, segmentation=True)
    else:
        print("kimimaro: {filename} was not a .swc or .npy file.")
Beispiel #3
0
def twod():
    img = Image.open('test2d.png')
    return np.array(img)[:, :, 0].T


def threed():
    labels = np.zeros((480, 480, 3), dtype=np.uint8)
    tmp = Image.open('test3d-1.png')
    labels[:, :, 0] = np.array(tmp)[:, :, 0].T
    tmp = Image.open('test3d-2.png')
    labels[:, :, 1] = np.array(tmp)[:, :, 0].T
    tmp = Image.open('test3d-3.png')
    labels[:, :, 2] = np.array(tmp)[:, :, 0].T
    return labels


labels = twod()
# labels = threed()

print(np.max(labels))
labels = np.asfortranarray(labels)

start = time.time()
# labels = scipy.ndimage.measurements.label(labels)[0]
labels = cc3d.connected_components(labels)
print(time.time() - start, "sec")

print(np.unique(labels).shape)

view(labels, segmentation=True)
from cloudvolume import CloudVolume, view

# 1. Initialize a CloudVolume object which will know how to read from this dataset layer.
cv = CloudVolume(
    'https://storage.googleapis.com/neuroglancer-public-data/kasthuri2011/image_color_corrected',
    progress=True,  # shows progress bar
    cache=True,  # cache to disk to avoid repeated downloads
    # parallel=True, # uncomment to try parallel download!
)

# 2. Download context around the point in the Neuroglancer link above
#    into a numpy array.
# argument one is the (x,y,z) coordinate from neuroglancer
# mip=resolution level (smaller mips are higher resolution, highest is 0)
# size is in voxels
img = cv.download_point((5188, 9096, 1198), mip=0, size=(512, 512, 64))

# 3. Visualize the image!
# Open your browser to https://localhost:8080 to view
# Press ctrl-C to continue script execution.
view(img)

# 4. When you're done experimenting, clean up the space we used on disk.
cv.cache.flush()
Beispiel #5
0
from cloudvolume import CloudVolume, view

cv = CloudVolume(
    'https://storage.googleapis.com/neuroglancer-public-data/kasthuri2011/ground_truth',
    progress=True,  # shows progress bar
    cache=True,  # cache to disk to avoid repeated downloads
    # parallel=True, # uncomment to try parallel download!
)

img = cv.download_point((5188, 9096, 1198), mip=0, size=(512, 512, 64))

# segmentation=True activates the segmentation mode
# of the microviewer. If it was False, it would display
# as a raw image, which might be very dark if the label
# values are small.
view(img, segmentation=True)

# Get as mesh object
mesh = cv.mesh.get(13)
# Save to disk at ./13.obj which can be visualized in MeshLab or Blender
cv.mesh.save(13, file_format='obj')

cv.cache.flush()