コード例 #1
0
def acquire_data(z_range):
    """ micro-manager data acquisition. Creates acquisition events for z-stack.
        This example: use custom events, not multi_d_acquisition because the 
        z-stage is not run from micro-manager but controlled via external DAQ."""
    with Acquisition(directory=None,
                     name=None,
                     show_display=True,
                     image_process_fn=grab_image) as acq:
        events = []
        for index, z_um in enumerate(
                np.linspace(z_range[0], z_range[1], z_range[2])):
            evt = {"axes": {"z_ext": index}, "z_ext": z_um}
            events.append(evt)
        acq.acquire(events)
コード例 #2
0
def acquire_multid(z_range):
    """ micro-manager data acquisition. Creates acquisition events for z-stack.
        This example: use multi_d_acquisition because the z-stage is run 
        from micro-manager.
        Unless hardware triggering is set up in micro-manager, this will be fairly slow:
        micro-manager does not sweep the z-stage, but acquires plane by plane. """
    with Acquisition(directory=None,
                     name=None,
                     show_display=False,
                     image_process_fn=grab_image) as acq:
        events = multi_d_acquisition_events(z_start=z_range[0],
                                            z_end=z_range[1],
                                            z_step=(z_range[1] - z_range[0]) /
                                            (z_range[2] - 1))
        acq.acquire(events)
コード例 #3
0
ファイル: trigger_z_stack.py プロジェクト: koschink/OPM
def main():

    bridge = Bridge()
    mmc = bridge.get_core()
    
    # Data set parameters
    path = Path('E://20201023//')
    name = 'test'

    # z stack parameters
    start_end_pos = -5
    mid_pos = 5
    step_size = .25
    relative = True

    # time series parameters
    exposure_time = 200  # in milliseconds
    
    num_z_positions = int(abs(mid_pos - start_end_pos)/step_size + 1)
    z_idx = list(range(num_z_positions))
    num_time_points = 10

    # setup cameras
    mmc.set_exposure(exposure_time)
    
    # setup z stage
    z_stage = mmc.get_focus_device()
    z_pos, pos_sequence = upload_stage_sequence(bridge, start_end_pos, mid_pos, step_size, relative)
    num_z_positions = len(pos_sequence)

    print(pos_sequence)

    # move to first position
    mmc.set_position(z_stage, pos_sequence[0])

    events = []
    z_idx_ = z_idx.copy()
    for i in range(num_time_points):
        for j in z_idx_:
            events.append({'axes': {'time':i, 'z': j}})
        z_idx_.reverse()

    with Acquisition(directory=path, name=name) as acq:
        acq.acquire(events)

    # turn off sequencing
    mmc.set_property(z_stage, "UseFastSequence", "No")
    mmc.set_property(z_stage, "UseSequence", "No")
コード例 #4
0
#     events = getattr(img_process_fn, 'events')
#     if len(events) != 0:
#         event_queue.put(events.pop(0))
#     return image, metadata

# with Acquisition('/Users/henrypinkard/megllandump', 'pythonacqtest',
#                   image_process_fn=img_process_fn,
#                  post_hardware_hook_fn=hook_fn) as acq:
#
#     acq.acquire(event_list[0])

# acq.await_completion()

# #magellan example
with Acquisition(magellan_acq_index=0,
                 post_hardware_hook_fn=hook_fn,
                 image_process_fn=img_process_fn,
                 debug=True) as acq:
    pass
acq.await_completion()

from pycromanager import Acquisition, multi_d_acquisition_events

# with Acquisition(directory='/Users/henrypinkard/megllandump', name='tcz_acq') as acq:
#     # Generate the events for a single z-stack
#     events = multi_d_acquisition_events(
#         num_time_points=3, time_interval_s=0,
#         channel_group='channel', channels=['DAPI', 'FITC'],
#         z_start=0, z_end=6, z_step=0.4,
#         order='tcz')
#     acq.acquire(events)
コード例 #5
0
import numpy as np
from pycromanager import Acquisition, multi_d_acquisition_events

with Acquisition('/Users/henrypinkard/megllandump', 'l_axis') as acq:
    #create one event for the image at each z-slice
    for time in range(5):
        z_stack = []
        for index, z_um in enumerate(np.arange(start=0, stop=10, step=0.5)):
            z_stack.append({
                'axes': {
                    'z': index,
                    'time': time
                },
                'z': z_um,
                'min_start_time': 5 * time
            })

        acq.acquire(z_stack, keep_shutter_open=True)
コード例 #6
0
def acquireImage(channelGroup, channelName, hook):

    x_array = []
    y_array = []
    z_array = []

    for idx in range(pos_list.get_number_of_positions()):
        pos = pos_list.get_position(idx)
        #pos.go_to_position(pos, mmc)

        x = pos_list.get_position(idx).get(0).x
        y = pos_list.get_position(idx).get(0).y
        z = pos_list.get_position(idx).get(1).x

        x_array.append(x)
        y_array.append(y)
        z_array.append(z)

    x_array = np.array(x_array)
    y_array = np.array(y_array)
    z_array = np.array(z_array)

    with Acquisition(directory=directoryPATH,
                     name=nameofSAVEDFILE,
                     post_hardware_hook_fn=hook,
                     post_camera_hook_fn=hook_fn) as acq:
        x = np.hstack([x_array[:, None]])
        y = np.hstack([y_array[:, None]])
        z = np.hstack([z_array[:, None]])
        #Generate the events for a single z-stack
        xyz = np.hstack([x_array[:, None], y_array[:, None], z_array[:, None]])
        events = multi_d_acquisition_events(xyz_positions=xyz,
                                            channel_group=channelGroup,
                                            channels=[channelName])
        acq.acquire(events)
        #acquire a 2 x 1 grid
        #acq.acquire({'row': 0, 'col': 0})
        #acq.acquire({'row': 1, 'col': 0})

    stackfolder = "**/*"
    folder = Path(directoryPATH)
    foldernames = []
    for name in folder.glob('saving_name_*'):
        print(name.stem)
        foldernames.append(name.stem)
    maximum = 1
    for file in foldernames:
        number = int(
            re.search(nameofSAVEDFILE + "_" + '(\d*)',
                      file).group(1))  # assuming filename is "filexxx.txt"
        # compare num to previous max, e.g.
        maximum = number if number > maximum else maximum  # set max = 0 before for-loop
        print(number)

    highest = nameofSAVEDFILE + "_" + str(maximum)

    data_path = os.path.join(folder, highest)

    dataset = Dataset(data_path)
    #dataset = acq.get_dataset()
    #dataset = acq.get_dataset()
    #print(dataset)
    #data_path=str(directoryPATH/saving_name)
    dataset = Dataset(data_path)
    print(dataset.axes)
    print("data_path", data_path)

    length = (len(xyz))

    dataset_metadata = dataset.read_metadata(channel=0, position=1)
    print(dataset_metadata)
    pos = dataset_metadata["Axes"]["position"]
    print(pos)
    if (dataset):

        sizeimg = dataset.read_image(channel=0, position=0)
        sizeimg = cv2.cvtColor(sizeimg, cv2.COLOR_GRAY2RGB)
        h, w, c = sizeimg.shape
    length = int(
        (sqrt(length)
         ))  #size of the grid (row or column should be same technically)
    blank_image = np.zeros((h * (math.ceil(math.sqrt(length)) + 2), w *
                            (math.ceil(math.sqrt(length)) + 2), 3), np.uint16)

    print("image size ", blank_image.shape)

    pixelsizeinum = dataset_metadata["PixelSizeUm"]  #get size of pixel in um
    print(pixelsizeinum)
    """
    for datarow in range(10):
        for datacolumn in range(10):
            metadata = dataset.read_metadata(row=datarow, col=datacolumn)
            if(metadata["Axes"]["position"]>=0):
                pos=metadata["Axes"]["position"]
                #print(pos)
                img = dataset.read_image(position=pos)
            
    
                img = cv2.cvtColor(img,cv2.COLOR_GRAY2RGB)
                cv2.imshow("test",img)
    """
    xtotaloffset = 0
    ytotaloffset = 0
    for dataposition in range(
            len(xyz)):  #do range for all positions in micromanager
        print(dataposition)
        metadata = dataset.read_metadata(channel=0, position=dataposition)
        img = dataset.read_image(channel=0, position=dataposition)
        img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
        img = cv2.flip(img, 1)
        xoffset_um = metadata["XPosition_um_Intended"]
        yoffset_um = metadata["YPosition_um_Intended"]

        print("Intended location is : ", xoffset_um, yoffset_um)
        # cv2.imshow("test",img)
        # cv2.waitKey(0)
        xoffset_px = (xoffset_um - dataset.read_metadata(
            channel=0, position=0)['XPosition_um_Intended']) / pixelsizeinum
        yoffset_px = (yoffset_um - dataset.read_metadata(
            channel=0, position=0)['YPosition_um_Intended']) / pixelsizeinum
        xoffset_px = int(xoffset_px)
        print("Xoffset ", xoffset_px)
        #print("img max X ",blank_image.shape[0])
        yoffset_px = int(yoffset_px)
        print("Yoffset ", yoffset_px)
        #print("img max Y ",blank_image.shape[1])

        alpha = 0
        blank_image[xoffset_px:xoffset_px + (img.shape[1]),
                    yoffset_px:yoffset_px + (img.shape[0])] = cv2.addWeighted(
                        blank_image[xoffset_px:xoffset_px + (img.shape[1]),
                                    yoffset_px:yoffset_px + (img.shape[0])],
                        alpha, img, 1 - alpha, 0)
        #blank_image[:yoffset_px+img.shape[0], :xoffset_px+img.shape[1]] = img
        #blank_image = cv2.addWeighted(blank_image[yoffset_px:yoffset_px+img.shape[0], xoffset_px:xoffset_px+img.shape[1]],img)

    ####################
    #printout only ignore
    ####################
    scale_percent = 5
    width = int(blank_image.shape[1] * scale_percent / 100)
    height = int(blank_image.shape[0] * scale_percent / 100)
    dim = (width, height)

    resized = cv2.resize(blank_image, dim, interpolation=cv2.INTER_AREA)
    '''
    #show image
    winname = "test"
    cv2.namedWindow(winname)        # Create a named window
    cv2.moveWindow(winname, 1000,1000)  # Move it to (40,30)

    cv2.imshow(winname, resized)
    cv2.waitKey(0)
    '''
    blank_image = cv2.cvtColor(blank_image, cv2.COLOR_BGR2GRAY)
    return blank_image, pixelsizeinum
コード例 #7
0
from pycromanager import Acquisition, multi_d_acquisition_events


def storage_monitor_fn(axes):
    dataset = acq.get_dataset()
    pixels = dataset.read_image(**axes)
    print(pixels)


dire = 'C:\\Users\\henry\\Desktop\\datadump'
with Acquisition(directory=dire,
                 name="tcz_acq",
                 debug=False,
                 storage_monitor_callback_fn=None) as acq:
    events = multi_d_acquisition_events(
        num_time_points=5,
        time_interval_s=0,
        order="tcz",
    )
    dataset = acq.get_dataset()
    acq.acquire(events)
コード例 #8
0
ファイル: multi_d_acq.py プロジェクト: rmeit/pycro-manager
from pycromanager import Acquisition, multi_d_acquisition_events

with Acquisition(directory='/Users/henrypinkard/megllandump',
                 name='tcz_acq',
                 debug=True) as acq:
    # Generate the events for a single z-stack
    events = multi_d_acquisition_events(num_time_points=5,
                                        time_interval_s=0,
                                        channel_group='Channel',
                                        channels=['DAPI', 'FITC'],
                                        z_start=0,
                                        z_end=6,
                                        z_step=0.4,
                                        order='tcz')
    acq.acquire(events)
コード例 #9
0
from pycromanager import Bridge, Acquisition
import numpy as np

bridge = Bridge()
# get object representing micro-magellan API
magellan = bridge.get_magellan()


def hook_fn(event):
    coordinates = np.array([event["x"], event["y"], event["z"]])

    return event


if __name__ == "__main__":
    # magellan example
    acq = Acquisition(magellan_acq_index=0, post_hardware_hook_fn=hook_fn)
    acq.await_completion()

pass
コード例 #10
0
ファイル: test_demo_config.py プロジェクト: koschink/OPM
def main():

    bridge = Bridge()
    core = bridge.get_core()

    # FOV parameters
    ROI = [1024, 0, 256, 1024]  #unit: pixels

    # camera exposure
    exposure_ms = 5  #unit: ms

    # set to high-res camera
    core.set_config('Camera', 'HighRes')

    # crop FOV
    core.set_roi(*ROI)

    # set exposure
    core.set_exposure(exposure_ms)

    # setup file name
    save_directory = Path('C:/data/test/')
    save_name = 'test_stages'

    # get handle to xy and z stages
    xy_stage = core.get_xy_stage_device()
    z_stage = core.get_focus_device()

    # move the stages to verify core can talk to them
    # positions chosen at random
    core.set_xy_position(100., 100.)
    core.wait_for_device(xy_stage)
    core.set_position(50.)
    core.wait_for_device(z_stage)

    # create events to hold all of the scan axis images during constant speed stage scan
    channel_configs = ['DAPI', 'FITC', 'Rhodamine', 'Cy5']
    events = []
    for y in range(2):
        for z in range(2):
            for c in range(len(channel_configs)):
                for x in range(2):
                    evt = {
                        'axes': {
                            'x': x,
                            'y': y,
                            'z': z
                        },
                        'x': 100,
                        'y': y * 1000,
                        'z': z * 100,
                        'channel': {
                            'group': 'Channel',
                            'config': channel_configs[c]
                        }
                    }
                    events.append(evt)

    # run acquisition
    # TO DO: properly handle an error here if camera driver fails to return expected number of images.
    with Acquisition(directory=save_directory,
                     name=save_name,
                     post_hardware_hook_fn=setup_scan_fn,
                     post_camera_hook_fn=hook_fn,
                     show_display=False,
                     max_multi_res_index=0,
                     debug=False) as acq:
        acq.acquire(events)
        acq.acquire(None)
        acq.await_completion()
コード例 #11
0
ファイル: democam.py プロジェクト: rmeit/pycro-manager
"""
This simple example uses pycromanager to vary exposure times with three repetitions
and can be run with Micro-manager's virtual DemoCamera / DCam device. The resulting
dataset is saved to 'democam_X/Full Resolution/democam_MagellanStack.tif` within the
current folder; consecutively numbered `X` separate individual runs of this script.
"""
from pycromanager import Acquisition

if __name__ == '__main__':

    exposures = [100, 200, 300, 400]
    with Acquisition(directory='.', name='democam') as acq:
        events = []
        for rep in range(3):
            for idx, exposure in enumerate(exposures):
                evt = {'axes': {'repetition': rep, 'exposure': idx},
                       'exposure': exposure}
                events.append(evt)

        acq.acquire(events)
コード例 #12
0
import numpy as np
from pycromanager import Acquisition, multi_d_acquisition_events

if __name__ == "__main__":

    with Acquisition("/Users/henrypinkard/megllandump", "l_axis") as acq:
        # create one event for the image at each z-slice
        events = []
        for time in range(5):
            for index, z_um in enumerate(np.arange(start=0, stop=10,
                                                   step=0.5)):
                evt = {
                    #'axes' is required. It is used by the image viewer and data storage to
                    # identify the acquired image
                    "axes": {
                        "l": index,
                        "time": time
                    },
                    # the 'z' field provides the z position in µm
                    "z": z_um,
                }
                events.append(evt)

        acq.acquire(events)
コード例 #13
0
        image2 = np.array(image, copy=True)
        image2 = np.swapaxes(image2, 0, 1)
        md_2 = copy.deepcopy(metadata)

        image[250:350, 100:300] = np.random.randint(0, 4999)

        if metadata["Channel"] == "DAPI":
            image[:100, :100] = 0
            image2[:100, :100] = 0
        else:
            image[-100:, -100:] = 0
            image2[-100:, -100:] = 0

        # metadata['Axes']['l'] = 0
        md_2["Channel"] = "A_new_channel"

        return [(image, metadata), (image2, md_2)]

    with Acquisition(directory="/Users/henrypinkard/megllandump",
                     name="tcz_acq",
                     image_process_fn=img_process_fn) as acq:
        # Generate the events for a single z-stack
        events = multi_d_acquisition_events(
            num_time_points=10,
            time_interval_s=0,
            channel_group="Channel",
            channels=["DAPI", "FITC"],
            order="tc",
        )
        acq.acquire(events)
コード例 #14
0
from pycromanager import Acquisition, multi_d_acquisition_events
import numpy as np


def hook_fn(event):

    return event


with Acquisition(
        directory="/Users/henrypinkard/megllandump",
        name="acquisition_name",
        pre_hardware_hook_fn=hook_fn,
) as acq:
    acq.acquire(multi_d_acquisition_events(10))
コード例 #15
0
from pycromanager import Acquisition, multi_d_acquisition_events
import numpy as np

if __name__ == "__main__":

    # this hook function can control the micro-manager core
    def img_process_fn(image, metadata, bridge, event_queue):

        if not hasattr(img_process_fn, "counter"):
            img_process_fn.counter = 0

        if img_process_fn.counter < 10:
            evt = {"axes": {"time": 0, "n": img_process_fn.counter}}
            img_process_fn.counter += 1
            image[250:350, 100:300] = img_process_fn.counter * 10

        else:
            evt = None
        event_queue.put(evt)

        return image, metadata

    acq = Acquisition(
        directory="/Users/henrypinkard/megllandump",
        name="acquisition_name",
        image_process_fn=img_process_fn,
    )

    # kick it off with a single event
    acq.acquire({"axes": {"time": 0}})
コード例 #16
0
    #this hook function can control the micro-manager core
    def img_process_fn(image, metadata, bridge, event_queue):

        if not hasattr(img_process_fn, 'counter'):
            img_process_fn.counter = 0

        if img_process_fn.counter < 10:
            evt = {
                'axes': {'time': 0, 'n': img_process_fn.counter}
            }
            img_process_fn.counter += 1
            image[250:350, 100:300] = img_process_fn.counter * 10

        else:
            evt = None
        event_queue.put(evt)




        return image, metadata

    acq = Acquisition(directory='/Users/henrypinkard/megllandump', name='acquisition_name',
                      image_process_fn=img_process_fn)

    #kick it off with a single event
    acq.acquire({'axes': {'time': 0}})



コード例 #17
0
    col = metadata["GridColumnIndex"]
    channel = metadata["Channel"]
    # other image axes (e.g. a z axis). 'position' axis is redundant to row and column indices
    axes = metadata["Axes"]
    # numpy array
    image

    # TODO: run callback function

    return image, metadata


with Acquisition(
        "/Users/henrypinkard/tmp",
        "tiled",
        tile_overlap=10,
        image_process_fn=image_callback_fn,
        pre_hardware_hook_fn=event_edit_fn,
        debug=True,
) as acq:
    # 10 pixel overlap between adjacent tiles
    acq.acquire({
        "row": 0,
        "col": 0,
        "Channel": {
            "group": "channel",
            "config": "DAPI"
        }
    })
    acq.acquire({
        "row": 0,
        "col": 0,
コード例 #18
0
ファイル: bug_test.py プロジェクト: rmeit/pycro-manager
from pycromanager import Bridge

from pycromanager import Acquisition, multi_d_acquisition_events


def img_process_fn(image, metadata):
    # modify the pixels by setting a 100 pixel square at the top left to 0

    image[:100, :100] = 0

    # propogate the image and metadata to the default viewer and saving classes

    return image, metadata


z_shg_center = 0

if __name__ == '__main__':  # this is important, don't forget it

    with Acquisition(directory='/Users/henrypinkard/megellandump/',
                     name='exp_2_mda',
                     image_process_fn=img_process_fn) as acq:
        events = multi_d_acquisition_events(z_start=z_shg_center - 20,
                                            z_end=z_shg_center + 20,
                                            z_step=5)

        acq.acquire(events)

#       acq.acquire()
コード例 #19
0
mm_app_path = '/Applications/Micro-Manager-2.0.0-gamma1'
# mm_app_path = 'C:/Program Files/Micro-Manager-2.0gamma'

config_file = mm_app_path + "/MMConfig_demo.cfg"

#Optional: specify your own version of java to run with
java_loc = "/Library/Internet Plug-Ins/JavaAppletPlugin.plugin/Contents/Home/bin/java"
# java_loc = None
start_headless(mm_app_path, config_file, java_loc=java_loc)

b = Bridge()
b.get_core().snap_image()
print(b.get_core().get_image())

save_dir = "/Users/henrypinkard/tmp"
# save_dir = "C:/Users/Henry Pinkard/Desktop/datadump"

with Acquisition(directory=save_dir, name="tcz_acq", debug=True) as acq:
    # Generate the events for a single z-stack
    events = multi_d_acquisition_events(
        num_time_points=5,
        time_interval_s=0,
        channel_group="Channel",
        channels=["DAPI", "FITC"],
        z_start=0,
        z_end=6,
        z_step=0.4,
        order="tcz",
    )
    acq.acquire(events)
コード例 #20
0
# %% Imports
from pycromanager import Core

from pycromanager import Bridge
from pycromanager import Acquisition, multi_d_acquisition_events
import numpy as np

core = Core()
print(core)
core.get_version_info()

# %% Acquisiton
with Acquisition(directory=r'D:\Downloads', name='test') as acq:
    events = multi_d_acquisition_events(channel_group='Channel',
                                        channels=['DAPI', 'FITC'],
                                        z_start=0,
                                        z_end=6,
                                        z_step=0.4,
                                        order='tcz')
    acq.acquire(events)

# %% Positions
コード例 #21
0
import numpy as np
from pycromanager import multi_d_acquisition_events, Acquisition


def img_process_fn(image, metadata):
    print(image)
    pass  # send them somewhere else, not default saving and display


with Acquisition(image_process_fn=img_process_fn) as acq:
    # Generate the events for a single z-stack
    events = multi_d_acquisition_events(
        num_time_points=10,
        time_interval_s=0,
        channel_group="channel",
        channels=["DAPI", "FITC"],
        z_start=0,
        z_end=6,
        z_step=0.4,
        order="tcz",
    )
    acq.acquire(events)
コード例 #22
0
    def img_process_fn(image, metadata):

        image2 = np.array(image, copy=True)
        image2 = np.swapaxes(image2, 0, 1)
        md_2 = copy.deepcopy(metadata)

        image[250:350, 100:300] = np.random.randint(0, 4999)

        if metadata['Channel'] == 'DAPI':
            image[:100, :100] = 0
            image2[:100, :100] = 0
        else:
            image[-100:, -100:] = 0
            image2[-100:, -100:] = 0

        # metadata['Axes']['l'] = 0
        md_2['Channel'] = 'A_new_channel'

        return [(image, metadata), (image2, md_2)]

    with Acquisition(directory='/Users/henrypinkard/megllandump',
                     name='tcz_acq',
                     image_process_fn=img_process_fn) as acq:
        # Generate the events for a single z-stack
        events = multi_d_acquisition_events(num_time_points=10,
                                            time_interval_s=0,
                                            channel_group='channel',
                                            channels=['DAPI', 'FITC'],
                                            order='tc')
        acq.acquire(events)
コード例 #23
0
def main():

    #------------------------------------------------------------------------------------------------------------------------------------
    #----------------------------------------------Begin setup of scan parameters--------------------------------------------------------
    #------------------------------------------------------------------------------------------------------------------------------------

    # lasers to use
    # 0 -> inactive
    # 1 -> active
    state_405 = 1
    state_488 = 0
    state_561 = 0
    state_635 = 1
    state_730 = 0

    # laser powers (0 -> 100%)
    power_405 = 10
    power_488 = 0
    power_561 = 0
    power_635 = 5
    power_730 = 0

    # exposure time
    exposure_ms = 5.

    # scan axis limits. Use stage positions reported by MM
    scan_axis_start_um = 6000.  #unit: um
    scan_axis_end_um = 16000.  #unit: um

    # tile axis limits. Use stage positions reported by MM
    tile_axis_start_um = -1300  #unit: um
    tile_axis_end_um = -1350.  #unit: um

    # height axis limits. Use stage positions reported by MM
    height_axis_start_um = 320.  #unit: um
    height_axis_end_um = 360.  #unit:  um

    # FOV parameters
    # ONLY MODIFY IF NECESSARY
    ROI = [0, 1024, 1599, 255]  #unit: pixels

    # setup file name
    save_directory = Path('E:/20201211/')
    save_name = 'shaffer_lung'

    #------------------------------------------------------------------------------------------------------------------------------------
    #----------------------------------------------End setup of scan parameters----------------------------------------------------------
    #------------------------------------------------------------------------------------------------------------------------------------

    bridge = Bridge()
    core = bridge.get_core()

    # turn off lasers
    core.set_config('Coherent-State', 'off')
    core.wait_for_config('Coherent-State', 'off')

    # set camera into 16bit readout mode
    # give camera time to change modes if necessary
    core.set_property('Camera', 'ReadoutRate', '100MHz 16bit')
    time.sleep(1)

    # set camera into low noise readout mode
    # give camera time to change modes if necessary
    core.set_property('Camera', 'Gain', '2-CMS')
    time.sleep(1)

    # set camera to trigger first mode
    # give camera time to change modes if necessary
    core.set_property('Camera', 'Trigger Timeout (secs)', 300)
    time.sleep(1)

    # set camera to internal trigger
    # give camera time to change modes if necessary
    core.set_property('Camera', 'TriggerMode', 'Internal Trigger')
    time.sleep(1)

    # change core timeout for long stage moves
    time.sleep(1)

    # crop FOV
    #core.set_roi(*ROI)

    # set exposure
    core.set_exposure(exposure_ms)

    # get actual framerate from micromanager properties
    # TO DO: fix need for user to have manually run an exposure with correct crop to get this value
    actual_readout_ms = float(core.get_property(
        'Camera', 'ActualInterval-ms'))  #unit: ms

    # camera pixel size
    pixel_size_um = .115  # unit: um

    # scan axis setup
    scan_axis_step_um = 0.2  # unit: um
    scan_axis_step_mm = scan_axis_step_um / 1000.  #unit: mm
    scan_axis_start_mm = scan_axis_start_um / 1000.  #unit: mm
    scan_axis_end_mm = scan_axis_end_um / 1000.  #unit: mm
    scan_axis_range_um = np.abs(scan_axis_end_um -
                                scan_axis_start_um)  # unit: um
    scan_axis_range_mm = scan_axis_range_um / 1000  #unit: mm
    actual_exposure_s = actual_readout_ms / 1000.  #unit: s
    scan_axis_speed = np.round(scan_axis_step_mm / actual_exposure_s,
                               2)  #unit: mm/s
    scan_axis_positions = np.rint(scan_axis_range_mm /
                                  scan_axis_step_mm).astype(
                                      int)  #unit: number of positions

    # tile axis setup
    tile_axis_overlap = 0.2  #unit: percentage
    tile_axis_range_um = np.abs(tile_axis_end_um -
                                tile_axis_start_um)  #unit: um
    tile_axis_range_mm = tile_axis_range_um / 1000  #unit: mm
    tile_axis_ROI = ROI[2] * pixel_size_um  #unit: um
    tile_axis_step_um = np.round((tile_axis_ROI) * (1 - tile_axis_overlap),
                                 2)  #unit: um
    tile_axis_step_mm = tile_axis_step_um / 1000  #unit: mm
    tile_axis_positions = np.rint(tile_axis_range_mm /
                                  tile_axis_step_mm).astype(
                                      int)  #unit: number of positions
    # if tile_axis_positions rounded to zero, make sure we acquire at least one position
    if tile_axis_positions == 0:
        tile_axis_positions = 1

    # height axis setup
    height_axis_overlap = 0.2  #unit: percentage
    height_axis_range_um = np.abs(height_axis_end_um -
                                  height_axis_start_um)  #unit: um
    height_axis_range_mm = height_axis_range_um / 1000  #unit: mm
    #height_axis_ROI = ROI[3]*pixel_size_um*np.sin(30*(np.pi/180.)) #unit: um TO DO: Why is overlap so large when using oblique pixel height??
    height_axis_ROI = ROI[3] * pixel_size_um  #unit: um
    height_axis_step_um = np.round(
        (height_axis_ROI) * (1 - height_axis_overlap), 2)  #unit: um
    height_axis_step_mm = height_axis_step_um / 1000  #unit: mm
    height_axis_positions = np.rint(height_axis_range_mm /
                                    height_axis_step_mm).astype(
                                        int)  #unit: number of positions
    # if height_axis_positions rounded to zero, make sure we acquire at least one position
    if height_axis_positions == 0:
        height_axis_positions = 1

    # get handle to xy and z stages
    xy_stage = core.get_xy_stage_device()
    z_stage = core.get_focus_device()

    # Setup Tiger controller to pass signal when the scan stage cross the start position to the PLC
    plcName = 'PLogic:E:36'
    propPosition = 'PointerPosition'
    propCellConfig = 'EditCellConfig'
    #addrOutputBNC3 = 35 # BNC3 on the PLC front panel
    addrOutputBNC1 = 33  # BNC1 on the PLC front panel
    addrStageSync = 46  # TTL5 on Tiger backplane = stage sync signal

    # connect stage sync signal to BNC output
    core.set_property(plcName, propPosition, addrOutputBNC1)
    core.set_property(plcName, propCellConfig, addrStageSync)

    # turn on 'transmit repeated commands' for Tiger
    core.set_property('TigerCommHub', 'OnlySendSerialCommandOnChange', 'No')

    # set tile axis speed for all moves
    command = 'SPEED Y=.1'
    core.set_property('TigerCommHub', 'SerialCommand', command)

    # check to make sure Tiger is not busy
    ready = 'B'
    while (ready != 'N'):
        command = 'STATUS'
        core.set_property('TigerCommHub', 'SerialCommand', command)
        ready = core.get_property('TigerCommHub', 'SerialResponse')
        time.sleep(.500)

    # set scan axis speed for large move to initial position
    command = 'SPEED X=.1'
    core.set_property('TigerCommHub', 'SerialCommand', command)

    # check to make sure Tiger is not busy
    ready = 'B'
    while (ready != 'N'):
        command = 'STATUS'
        core.set_property('TigerCommHub', 'SerialCommand', command)
        ready = core.get_property('TigerCommHub', 'SerialResponse')
        time.sleep(.500)

    # turn off 'transmit repeated commands' for Tiger
    core.set_property('TigerCommHub', 'OnlySendSerialCommandOnChange', 'Yes')

    # move scan scan stage to initial position
    core.set_xy_position(scan_axis_start_um, tile_axis_start_um)
    core.wait_for_device(xy_stage)
    core.set_position(height_axis_start_um)
    core.wait_for_device(z_stage)

    # turn on 'transmit repeated commands' for Tiger
    core.set_property('TigerCommHub', 'OnlySendSerialCommandOnChange', 'No')

    # set scan axis speed to correct speed for continuous stage scan
    # expects mm/s
    command = 'SPEED X=' + str(scan_axis_speed)
    core.set_property('TigerCommHub', 'SerialCommand', command)

    # check to make sure Tiger is not busy
    ready = 'B'
    while (ready != 'N'):
        command = 'STATUS'
        core.set_property('TigerCommHub', 'SerialCommand', command)
        ready = core.get_property('TigerCommHub', 'SerialResponse')
        time.sleep(.500)

    # set scan axis to true 1D scan with no backlash
    command = '1SCAN X? Y=0 Z=9 F=0'
    core.set_property('TigerCommHub', 'SerialCommand', command)

    # check to make sure Tiger is not busy
    ready = 'B'
    while (ready != 'N'):
        command = 'STATUS'
        core.set_property('TigerCommHub', 'SerialCommand', command)
        ready = core.get_property('TigerCommHub', 'SerialResponse')
        time.sleep(.500)

    # set range and return speed (5% of max) for scan axis
    # expects mm
    command = '1SCANR X=' + str(scan_axis_start_mm) + ' Y=' + str(
        scan_axis_end_mm) + ' R=10'
    core.set_property('TigerCommHub', 'SerialCommand', command)

    # check to make sure Tiger is not busy
    ready = 'B'
    while (ready != 'N'):
        command = 'STATUS'
        core.set_property('TigerCommHub', 'SerialCommand', command)
        ready = core.get_property('TigerCommHub', 'SerialResponse')
        time.sleep(.500)

    # turn off 'transmit repeated commands' for Tiger
    core.set_property('TigerCommHub', 'OnlySendSerialCommandOnChange', 'Yes')

    # construct boolean array for lasers to use
    channel_states = [state_405, state_488, state_561, state_635, state_730]
    channel_powers = [power_405, power_488, power_561, power_635, power_730]

    # set lasers to user defined power
    core.set_property('Coherent-Scientific Remote',
                      'Laser 405-100C - PowerSetpoint (%)', channel_powers[0])
    core.set_property('Coherent-Scientific Remote',
                      'Laser 488-150C - PowerSetpoint (%)', channel_powers[1])
    core.set_property('Coherent-Scientific Remote',
                      'Laser OBIS LS 561-150 - PowerSetpoint (%)',
                      channel_powers[2])
    core.set_property('Coherent-Scientific Remote',
                      'Laser 637-140C - PowerSetpoint (%)', channel_powers[3])
    core.set_property('Coherent-Scientific Remote',
                      'Laser 730-30C - PowerSetpoint (%)', channel_powers[4])

    print('Number of X positions: ' + str(scan_axis_positions))
    print('Number of Y tiles: ' + str(tile_axis_positions))
    print('Number of Z slabs: ' + str(height_axis_positions))

    #time.sleep(10)

    for y in range(tile_axis_positions):
        # calculate tile axis position
        tile_position_um = tile_axis_start_um + (tile_axis_step_um * y)

        # move XY stage to new tile axis position
        core.set_xy_position(scan_axis_start_um, tile_position_um)
        core.wait_for_device(xy_stage)

        for z in range(height_axis_positions):
            # calculate height axis position
            height_position_um = height_axis_start_um + (height_axis_step_um *
                                                         z)

            # move Z stage to new height axis position
            core.set_position(height_position_um)
            core.wait_for_device(z_stage)

            # create events to execute scan across this z plane
            events = []

            for c in range(len(channel_states)):
                for x in range(
                        scan_axis_positions + 10
                ):  #TO DO: Fix need for extra frames in ASI setup, not here.
                    if channel_states[c] == 1:
                        if (c == 0):
                            evt = {
                                'axes': {
                                    'z': x
                                },
                                'channel': {
                                    'group': 'Coherent-State',
                                    'config': '405nm'
                                }
                            }
                        elif (c == 1):
                            evt = {
                                'axes': {
                                    'z': x
                                },
                                'channel': {
                                    'group': 'Coherent-State',
                                    'config': '488nm'
                                }
                            }
                        elif (c == 2):
                            evt = {
                                'axes': {
                                    'z': x
                                },
                                'channel': {
                                    'group': 'Coherent-State',
                                    'config': '561nm'
                                }
                            }
                        elif (c == 3):
                            evt = {
                                'axes': {
                                    'z': x
                                },
                                'channel': {
                                    'group': 'Coherent-State',
                                    'config': '637nm'
                                }
                            }
                        elif (c == 4):
                            evt = {
                                'axes': {
                                    'z': x
                                },
                                'channel': {
                                    'group': 'Coherent-State',
                                    'config': '730nm'
                                }
                            }

                        events.append(evt)

            # set camera to trigger first mode for stage synchronization
            # give camera time to change modes
            core.set_property('Camera', 'TriggerMode', 'Trigger first')
            time.sleep(1)

            # update save_name with current Z plane
            save_name_z = save_name + '_y' + str(y).zfill(4) + '_z' + str(
                z).zfill(4)

            # run acquisition at this Z plane
            with Acquisition(directory=save_directory,
                             name=save_name_z,
                             post_hardware_hook_fn=post_hook_fn,
                             post_camera_hook_fn=camera_hook_fn,
                             show_display=False,
                             max_multi_res_index=0) as acq:
                acq.acquire(events)

                # added this code in an attempt to clean up resources, given the ZMQ error we are getting when using two hooks
                acq.acquire(None)
                acq.await_completion()

            # try to clean up acquisition so that AcqEngJ releases directory. This way we can move it to the network storage
            # in the background.
            acq = None

            # turn off lasers
            core.set_config('Coherent-State', 'off')
            core.wait_for_config('Coherent-State', 'off')

            # set camera to internal trigger
            # this is necessary to avoid PVCAM driver issues that we keep having for long acquisitions.
            # give camera time to change modes
            core.set_property('Camera', 'TriggerMode', 'Internal Trigger')
            time.sleep(1)
コード例 #24
0
import numpy as np
from pycromanager import Acquisition, multi_d_acquisition_events

with Acquisition("/Users/henrypinkard/tmp", "l_axis") as acq:
    # create one event for the image at each z-slice
    events = []
    for time in range(5):
        for index, z_um in enumerate(np.arange(start=0, stop=10, step=0.5)):
            evt = {
                #'axes' is required. It is used by the image viewer and data storage to
                # identify the acquired image
                "axes": {
                    "l": index,
                    "time": time
                },
                # the 'z' field provides the z position in µm
                "z": z_um,
            }
            events.append(evt)

    acq.acquire(events)
コード例 #25
0
mm_app_path = '/Applications/Micro-Manager-2.0.0-gamma1'
# mm_app_path = 'C:/Program Files/Micro-Manager-2.0gamma'

config_file = mm_app_path + "/MMConfig_demo.cfg"

#Optional: specify your own version of java to run with
java_loc = "/Library/Internet Plug-Ins/JavaAppletPlugin.plugin/Contents/Home/bin/java"
# java_loc = None
start_headless(mm_app_path, config_file, java_loc=java_loc, port=5000)

with Bridge(port=5000) as b:
    b.get_core().snap_image()
    print(b.get_core().get_image())

save_dir = "/Users/henrypinkard/tmp"
# save_dir = "C:/Users/Henry Pinkard/Desktop/datadump"

with Acquisition(directory=save_dir, name="tcz_acq", port=5000) as acq:
    # Generate the events for a single z-stack
    events = multi_d_acquisition_events(
        num_time_points=5,
        time_interval_s=0,
        channel_group="Channel",
        channels=["DAPI", "FITC"],
        z_start=0,
        z_end=6,
        z_step=0.4,
        order="tcz",
    )
    acq.acquire(events)
コード例 #26
0
    def image_callback_fn(image, metadata):
        row = metadata['GridRowIndex']
        col = metadata['GridColumnIndex']
        channel = metadata['Channel']
        # other image axes (e.g. a z axis). 'position' axis is redundant to row and column indices
        axes = metadata['Axes']
        # numpy array
        image

        # TODO: run callback function

        return image, metadata

    with Acquisition('/Users/henrypinkard/megllandump',
                     'tiled',
                     tile_overlap=10,
                     image_process_fn=image_callback_fn,
                     pre_hardware_hook_fn=event_edit_fn) as acq:
        # 10 pixel overlap between adjacent tiles

        acq.acquire({
            'row': 0,
            'col': 0,
            'channel': {
                'group': 'channel',
                'config': 'DAPI'
            }
        })
        acq.acquire({
            'row': 0,
            'col': 0,
コード例 #27
0
import numpy as np
from pycromanager import multi_d_acquisition_events, Acquisition

if __name__ == '__main__':

    def external_trigger_fn(event):

        #TODO: send signal to external device here

        return event

    with Acquisition(directory='/Users/henrypinkard/megllandump',
                     name='tcz_acq',
                     post_camera_hook_fn=external_trigger_fn) as acq:
        # Generate the events for a single z-stack
        events = multi_d_acquisition_events(num_time_points=10,
                                            time_interval_s=0,
                                            channel_group='channel',
                                            channels=['DAPI', 'FITC'],
                                            z_start=0,
                                            z_end=6,
                                            z_step=0.4,
                                            order='tcz')
        acq.acquire(events)
コード例 #28
0
from pycromanager import Acquisition, multi_d_acquisition_events, Bridge
import numpy as np

if __name__ == "__main__":

    def hook_fn(event):
        # if np.random.randint(4) < 2:
        #     return event
        return event

    def img_process_fn(image, metadata):
        image[250:350, 100:300] = np.random.randint(0, 4999)
        return image, metadata

    # magellan example
    acq = Acquisition(
        magellan_acq_index=0,
        post_hardware_hook_fn=hook_fn,
        image_process_fn=img_process_fn,
        debug=True,
    )
    acq.await_completion()
コード例 #29
0
ファイル: multi_d_acq.py プロジェクト: syaffa/pycro-manager
from pycromanager import Acquisition, multi_d_acquisition_events


with Acquisition(directory="/Users/henrypinkard/tmp", name="tcz_acq", debug=True) as acq:
    # Generate the events for a single z-stack
    events = multi_d_acquisition_events(
        num_time_points=5,
        time_interval_s=0,
        channel_group="Channel",
        channels=["DAPI", "FITC"],
        z_start=0,
        z_end=6,
        z_step=0.4,
        order="tcz",
    )
    acq.acquire(events)
コード例 #30
0
from pycromanager import Acquisition

if __name__ == '__main__':

    with Acquisition('/Users/henrypinkard/megllandump', 'tiled', tile_overlap=10) as acq:
        #10 pixel overlap between adjacent tiles

        acq.acquire({'row': 0, 'col': 0})
        acq.acquire({'row': 1, 'col': 0})
        acq.acquire({'row': 0, 'col': 1})

        dataset = acq.get_dataset()
        dataset.has_image(row=0, col=0, resolution_level=1)
        dataset.read_image(row=0, col=0, resolution_level=1)