示例#1
0
    def __init__(self, image_path, th_value):

        self.readImage(image_path)
        self.height, self.width, _ = self.im.shape
        self.changed = np.zeros((self.height, self.width), np.double)
        self.currentRegion = 0
        self.iterations = 0

        #segmentation shape
        self.segmentation_s = np.zeros((self.height, self.width, 3),
                                       dtype='uint8')

        self.heap = Basic.Basic()
        self.thresh = float(th_value)
示例#2
0
def createNewBasic(self, wherex, wherey, screenCoordinates=1):
    self.fromClass = None
    self.toClass = None
    # try the global constraints...
    res = self.ASGroot.preCondition(ASG.CREATE)
    if res:
        self.constraintViolation(res)
        self.mode = self.IDLEMODE
        return

    new_semantic_obj = Basic(self)
    res = new_semantic_obj.preCondition(ASGNode.CREATE)
    if res: return self.constraintViolation(res)
    new_semantic_obj.preAction(ASGNode.CREATE)

    ne = len(self.ASGroot.listNodes["Basic"])
    if new_semantic_obj.keyword_:
        new_semantic_obj.keyword_.setValue(
            new_semantic_obj.keyword_.toString() + str(ne))
    if screenCoordinates:
        new_obj = graph_Basic(self.UMLmodel.canvasx(wherex),
                              self.UMLmodel.canvasy(wherey), new_semantic_obj)
    else:  # already in canvas coordinates
        new_obj = graph_Basic(wherex, wherey, new_semantic_obj)
    new_obj.DrawObject(self.UMLmodel, self.editGGLabel)
    self.UMLmodel.addtag_withtag("Basic", new_obj.tag)
    new_semantic_obj.graphObject_ = new_obj
    self.ASGroot.addNode(new_semantic_obj)
    res = self.ASGroot.postCondition(ASG.CREATE)
    if res:
        self.constraintViolation(res)
        self.mode = self.IDLEMODE
        return

    res = new_semantic_obj.postCondition(ASGNode.CREATE)
    if res:
        self.constraintViolation(res)
        self.mode = self.IDLEMODE
        return
    new_semantic_obj.postAction(ASGNode.CREATE)

    self.mode = self.IDLEMODE
    if self.editGGLabel:
        self.statusbar.event(StatusBar.TRANSFORMATION, StatusBar.CREATE)
    else:
        self.statusbar.event(StatusBar.MODEL, StatusBar.CREATE)
    return new_semantic_obj
示例#3
0
# For extracting better edges, we combine several traditional algorithms.
# We provide more flexible paramters.

import cv2
import numpy
import Basic
import CloseAreaDetect
import CannyOperator

if __name__ == "__main__":
    OpBasic = Basic.Basic(IMG_PATH="G:/Deecamp/building.jpg",
                          Save_PATH="G:/Deecamp/test/gradient.jpg")
    OpCanny = CannyOperator.ImageChange(Static_Low_Threshold=30,
                                        Static_High_Threshold=80)
    OpDetect = CloseAreaDetect.CloseAreaDetect(
        Save_PATH="G:/Deecamp/test/closeArea.jpg", Gray_Threshold=50)

    img = cv2.imread(OpBasic.IMG_PATH)
    # 1. Do 'Gradient'(dilation - erosion).
    img = OpBasic.Gradient(img, KernelSize=3, SaveImage=True)
    # img = OpBasic.Closing( img, ErodeIter = 1, DilateIter = 1, KernelSize = 3)
    # 2. Do 'Canny'. Press 'Space' to move on.
    # img = OpCanny.StaticCanny(img)
    img = OpCanny.DynamicCanny(img, img_path=OpBasic.Save_PATH)
    # 3. Detect closed areas.
    img = OpDetect.CloseArea(img,
                             GrayWhiteChange=True,
                             MinAreaSize=100,
                             SaveImage=False)
示例#4
0
def main():

    Interpreter.batchMode = True

    if (lambda_flat == 0) ^ (lambda_dark == 0):
        print ("ERROR: Both of lambda_flat and lambda_dark must be zero,"
               " or both non-zero.")
        return
    lambda_estimate = "Automatic" if lambda_flat == 0 else "Manual"

    print "Loading images..."

    # Use BioFormats reader directly to determine dataset dimensions without
    # reading every single image. The series count (num_images) is the one value
    # we can't easily get any other way, but we might as well grab the others
    # while we have the reader available.
    bfreader = ImageReader()
    bfreader.id = str(filename)
    num_images = bfreader.seriesCount
    num_channels = bfreader.sizeC
    width = bfreader.sizeX
    height = bfreader.sizeY
    bfreader.close()

    # The internal initialization of the BaSiC code fails when we invoke it via
    # scripting, unless we explicitly set a the private 'noOfSlices' field.
    # Since it's private, we need to use Java reflection to access it.
    Basic_noOfSlices = Basic.getDeclaredField('noOfSlices')
    Basic_noOfSlices.setAccessible(True)
    basic = Basic()
    Basic_noOfSlices.setInt(basic, num_images)

    # Pre-allocate the output profile images, since we have all the dimensions.
    ff_image = IJ.createImage("Flat-field", width, height, num_channels, 32);
    df_image = IJ.createImage("Dark-field", width, height, num_channels, 32);

    print("\n\n")

    # BaSiC works on one channel at a time, so we only read the images from one
    # channel at a time to limit memory usage.
    for channel in range(num_channels):
        print "Processing channel %d/%d..." % (channel + 1, num_channels)
        print "==========================="

        options = ImporterOptions()
        options.id = str(filename)
        options.setOpenAllSeries(True)
        # concatenate=True gives us a single stack rather than a list of
        # separate images.
        options.setConcatenate(True)
        # Limit the reader to the channel we're currently working on. This loop
        # is mainly why we need to know num_images before opening anything.
        for i in range(num_images):
            options.setCBegin(i, channel)
            options.setCEnd(i, channel)
        # openImagePlus returns a list of images, but we expect just one (a
        # stack).
        input_image = BF.openImagePlus(options)[0]

        # BaSiC seems to require the input image is actually the ImageJ
        # "current" image, otherwise it prints an error and aborts.
        WindowManager.setTempCurrentImage(input_image)
        basic.exec(
            input_image, None, None,
            "Estimate shading profiles", "Estimate both flat-field and dark-field",
            lambda_estimate, lambda_flat, lambda_dark,
            "Ignore", "Compute shading only"
        )
        input_image.close()

        # Copy the pixels from the BaSiC-generated profile images to the
        # corresponding channel of our output images.
        ff_channel = WindowManager.getImage("Flat-field:%s" % input_image.title)
        ff_image.slice = channel + 1
        ff_image.getProcessor().insert(ff_channel.getProcessor(), 0, 0)
        ff_channel.close()
        df_channel = WindowManager.getImage("Dark-field:%s" % input_image.title)
        df_image.slice = channel + 1
        df_image.getProcessor().insert(df_channel.getProcessor(), 0, 0)
        df_channel.close()

        print("\n\n")

    template = '%s/%s-%%s.tif' % (output_dir, experiment_name)
    ff_filename = template % 'ffp'
    IJ.saveAsTiff(ff_image, ff_filename)
    ff_image.close()
    df_filename = template % 'dfp'
    IJ.saveAsTiff(df_image, df_filename)
    df_image.close()

    print "Done!"
def main():

    Interpreter.batchMode = True

    if (lambda_flat == 0) ^ (lambda_dark == 0):
        print ("ERROR: Both of lambda_flat and lambda_dark must be zero,"
               " or both non-zero.")
        return
    lambda_estimate = "Automatic" if lambda_flat == 0 else "Manual"

    print "Loading images..."

    options = ImporterOptions()
    options.setId(str(filename))
    options.setOpenAllSeries(True)
    options.setConcatenate(True)
    options.setSplitChannels(True)
    imps = BF.openImagePlus(options)

    num_channels = len(imps)
    w = imps[0].getWidth()
    h = imps[0].getHeight()
    ff_imp = IJ.createImage("Flat-field", w, h, num_channels, 32);
    df_imp = IJ.createImage("Dark-field", w, h, num_channels, 32);

    basic = Basic()
    Basic_noOfSlices = Basic.getDeclaredField('noOfSlices')
    Basic_noOfSlices.setAccessible(True)

    for channel, imp in enumerate(imps):
        title = imp.getTitle()
        print "Processing:", title
        x, y, c, z, t = imp.getDimensions()
        assert z == 1 and c == 1
        imp.setDimensions(1, t, 1)

        WindowManager.setTempCurrentImage(imp)
        Basic_noOfSlices.setInt(basic, t)
        basic.exec(
            imp, None, None,
            "Estimate shading profiles", "Estimate both flat-field and dark-field",
            lambda_estimate, lambda_flat, lambda_dark,
            "Ignore", "Compute shading only"
        )
        ff_channel = WindowManager.getImage('Flat-field:' + title)
        ff_channel.copy()
        ff_imp.setSlice(channel + 1)
        ff_imp.paste()
        ff_channel.close()

        df_channel = WindowManager.getImage('Dark-field:' + title)
        df_channel.copy()
        df_imp.setSlice(channel + 1)
        df_imp.paste()
        df_channel.close()

        imp.close()

    # Setting the active slice back to 1 seems to fix an issue where
    # the last slice was empty in the saved TIFFs. Not sure why.
    ff_imp.setSlice(1)
    df_imp.setSlice(1)

    ff_filename = '%s/%s-ffp-basic.tif' % (output_dir, experiment_name)
    IJ.saveAsTiff(ff_imp, ff_filename)
    ff_imp.show()
    ff_imp.close()

    df_filename = '%s/%s-dfp-basic.tif' % (output_dir, experiment_name)
    IJ.saveAsTiff(df_imp, df_filename)
    df_imp.show()
    df_imp.close()

    print "Done!"
示例#6
0
    "#mobsters", "#nova"
]  #First channel in Array will be the active game channel, 2nd channel is the OPs channel

#Two bots are required. First bot will be the main game bot, while the second one is the missions bot
bots = [[rizon, Utility.Identity("mobsters", "mobster", None, ajoin)],
        [rizon, Utility.Identity("DonVito", "dv", None, ajoin)]]

eng = Engine.Engine(event, bots, scheduler,
                    logger)  #event, bots, sched, logger
eng.soft_start()  # This is a mobster's mod
bots = eng.get_bots()
#(CORE) modules we are loading
scheduler.schedule_event(AuthSys.AuthSys(eng))
scheduler.schedule_event(KeepAlive.KeepAlive(eng))
event.add_mod(SpamFilter.SpamFilter(bots[0], eng))  #SpamFilter (mobsters only)
event.add_mod(Basic.Basic(eng))

## GAME WORK

#create the game instance
mobs = Mobster.Mobster(eng, bots[0], ajoin[0],
                       Security.Security(bots[0], ajoin))
msnclk = MissionClock.MissionClock(bots[1], mobs.host_channel)
regen = Regen.Regen(mobs, bots[0])

#Module Instances
event.add_mod(mobs)  #Mobster Game (mobsters only)
event.add_mod(LegalAffairs.LegalAffairs(bots[0], mobs,
                                        regen))  #Mobs (mobsters only)
event.add_mod(Missions.Missions(bots[1], mobs, msnclk))  #DonVito
def main():

    Interpreter.batchMode = True

    if (lambda_flat == 0) ^ (lambda_dark == 0):
        print ("ERROR: Both of lambda_flat and lambda_dark must be zero,"
               " or both non-zero.")
        return
    lambda_estimate = "Automatic" if lambda_flat == 0 else "Manual"

    #import pdb; pdb.set_trace()
    print "Loading images..."
    filenames = enumerate_filenames(pattern)
    num_channels = len(filenames)
    num_images = len(filenames[0])
    image = Opener().openImage(filenames[0][0])
    width = image.width
    height = image.height
    image.close()

    # The internal initialization of the BaSiC code fails when we invoke it via
    # scripting, unless we explicitly set a the private 'noOfSlices' field.
    # Since it's private, we need to use Java reflection to access it.
    Basic_noOfSlices = Basic.getDeclaredField('noOfSlices')
    Basic_noOfSlices.setAccessible(True)
    basic = Basic()
    Basic_noOfSlices.setInt(basic, num_images)

    # Pre-allocate the output profile images, since we have all the dimensions.
    ff_image = IJ.createImage("Flat-field", width, height, num_channels, 32);
    df_image = IJ.createImage("Dark-field", width, height, num_channels, 32);

    print("\n\n")

    # BaSiC works on one channel at a time, so we only read the images from one
    # channel at a time to limit memory usage.
    for channel in range(num_channels):
        print "Processing channel %d/%d..." % (channel + 1, num_channels)
        print "==========================="

        stack = ImageStack(width, height, num_images)
        opener = Opener()
        for i, filename in enumerate(filenames[channel]):
            print "Loading image %d/%d" % (i + 1, num_images)
            image = opener.openImage(filename)
            stack.setProcessor(image.getProcessor(), i + 1)
        input_image = ImagePlus("input", stack)

        # BaSiC seems to require the input image is actually the ImageJ
        # "current" image, otherwise it prints an error and aborts.
        WindowManager.setTempCurrentImage(input_image)
        basic.exec(
            input_image, None, None,
            "Estimate shading profiles", "Estimate both flat-field and dark-field",
            lambda_estimate, lambda_flat, lambda_dark,
            "Ignore", "Compute shading only"
        )
        input_image.close()

        # Copy the pixels from the BaSiC-generated profile images to the
        # corresponding channel of our output images.
        ff_channel = WindowManager.getImage("Flat-field:%s" % input_image.title)
        ff_image.slice = channel + 1
        ff_image.getProcessor().insert(ff_channel.getProcessor(), 0, 0)
        ff_channel.close()
        df_channel = WindowManager.getImage("Dark-field:%s" % input_image.title)
        df_image.slice = channel + 1
        df_image.getProcessor().insert(df_channel.getProcessor(), 0, 0)
        df_channel.close()

        print("\n\n")

    template = '%s/%s-%%s.tif' % (output_dir, experiment_name)
    ff_filename = template % 'ffp'
    IJ.saveAsTiff(ff_image, ff_filename)
    ff_image.close()
    df_filename = template % 'dfp'
    IJ.saveAsTiff(df_image, df_filename)
    df_image.close()

    print "Done!"
示例#8
0
import Basic
import sys

Q = Basic.Basic()
Input = []

while True:
    Input.extend(list(sys.stdin.readline().rstrip().split()))

    if Input[0] == "empty":
        print(Q.IsEmpty())
    elif Input[0] == "size":
        print(Q.Size())
    elif Input[0] == "front":
        print(Q.Front())
    elif Input[0] == "back":
        print(Q.Back())
    elif Input[0] == "pop":
        print(Q.Pop())
    else:
        print(Q.Push(int(Input[1])))

    Input.clear()
示例#9
0
def main():

    Interpreter.batchMode = True

    if (lambda_flat == 0) ^ (lambda_dark == 0):
        print ("ERROR: Both of lambda_flat and lambda_dark must be zero,"
               " or both non-zero.")
        return
    lambda_estimate = "Automatic" if lambda_flat == 0 else "Manual"

    print "Loading images..."
    filenames = enumerate_filenames(pattern)
    if len(filenames) == 0:
        return
    # This is the number of channels inferred from the filenames. The number
    # of channels in an individual image file will be determined below.
    num_channels = len(filenames)
    num_images = len(filenames[0])
    image = Opener().openImage(filenames[0][0])
    if image.getNDimensions() > 3:
        print "ERROR: Can't handle images with more than 3 dimensions."
    (width, height, channels, slices, frames) = image.getDimensions()
    # The third dimension could be any of these three, but the other two are
    # guaranteed to be equal to 1 since we know NDimensions is <= 3.
    image_channels = max((channels, slices, frames))
    image.close()
    if num_channels > 1 and image_channels > 1:
        print (
            "ERROR: Can only handle single-channel images with {channel} in"
            " the pattern, or multi-channel images without {channel}. The"
            " filename patterns imply %d channels and the images themselves"
            " have %d channels." % (num_channels, image_channels)
        )
        return
    if image_channels == 1:
        multi_channel = False
    else:
        print (
            "Detected multi-channel image files with %d channels"
            % image_channels
        )
        multi_channel = True
        num_channels = image_channels
        # Clone the filename list across all channels. We will handle reading
        # the individual image planes for each channel below.
        filenames = filenames * num_channels

    # The internal initialization of the BaSiC code fails when we invoke it via
    # scripting, unless we explicitly set a the private 'noOfSlices' field.
    # Since it's private, we need to use Java reflection to access it.
    Basic_noOfSlices = Basic.getDeclaredField('noOfSlices')
    Basic_noOfSlices.setAccessible(True)
    basic = Basic()
    Basic_noOfSlices.setInt(basic, num_images)

    # Pre-allocate the output profile images, since we have all the dimensions.
    ff_image = IJ.createImage("Flat-field", width, height, num_channels, 32);
    df_image = IJ.createImage("Dark-field", width, height, num_channels, 32);

    print("\n\n")

    # BaSiC works on one channel at a time, so we only read the images from one
    # channel at a time to limit memory usage.
    for channel in range(num_channels):
        print "Processing channel %d/%d..." % (channel + 1, num_channels)
        print "==========================="

        stack = ImageStack(width, height, num_images)
        opener = Opener()
        for i, filename in enumerate(filenames[channel]):
            print "Loading image %d/%d" % (i + 1, num_images)
            # For multi-channel images the channel determines the plane to read.
            args = [channel + 1] if multi_channel else []
            image = opener.openImage(filename, *args)
            stack.setProcessor(image.getProcessor(), i + 1)
        input_image = ImagePlus("input", stack)

        # BaSiC seems to require the input image is actually the ImageJ
        # "current" image, otherwise it prints an error and aborts.
        WindowManager.setTempCurrentImage(input_image)
        basic.exec(
            input_image, None, None,
            "Estimate shading profiles", "Estimate both flat-field and dark-field",
            lambda_estimate, lambda_flat, lambda_dark,
            "Ignore", "Compute shading only"
        )
        input_image.close()

        # Copy the pixels from the BaSiC-generated profile images to the
        # corresponding channel of our output images.
        ff_channel = WindowManager.getImage("Flat-field:%s" % input_image.title)
        ff_image.slice = channel + 1
        ff_image.getProcessor().insert(ff_channel.getProcessor(), 0, 0)
        ff_channel.close()
        df_channel = WindowManager.getImage("Dark-field:%s" % input_image.title)
        df_image.slice = channel + 1
        df_image.getProcessor().insert(df_channel.getProcessor(), 0, 0)
        df_channel.close()

        print("\n\n")

    template = '%s/%s-%%s.tif' % (output_dir, experiment_name)
    ff_filename = template % 'ffp'
    IJ.saveAsTiff(ff_image, ff_filename)
    ff_image.close()
    df_filename = template % 'dfp'
    IJ.saveAsTiff(df_image, df_filename)
    df_image.close()

    print "Done!"