Esempio n. 1
0
def main():
    print 'executing...'
    im = util.open('in/image1.png')
    openingFile = "out/opening-" + datetime.now().strftime('%Y%m%d%H%M%S') + ".png"
    arr = util.toBytes(im)

    start = time.time()
    arr = grayScale.averaging(arr)
    print 'grayscale : ' + str(time.time() - start)

    start = time.time()
    arr = smoothening.median(arr, im.width, im.height, 2)
    print 'median : ' + str(time.time() - start)

    start = time.time()
    arr = util.uniformedThresholding(arr, im.width, im.height, (125, 125, 125))
    print 'uniformedThresholding : ' + str(time.time() - start)

    start = time.time()
    window = [(0, 0, 0)] * 3 * 3
    opened = opening(arr, im.width, im.height, window, 3, 3, window, 3, 3)
    print 'opening : ' + str(time.time() - start)

    util.save(openingFile, opened, im.mode, im.size)
    print 'done executing...'
Esempio n. 2
0
def saveChunks(inputDir, outputDir, relevanceThreshold=255):


    print "saveChunks from %s to %s " % (inputDir, outputDir)
    imageIndex = 1
    for fileName in os.listdir(inputDir):
        path = inputDir + '/' + fileName
        f, file_extension = os.path.splitext(path)

        if file_extension == '.png':
            maskImage = ImageUtils.preprocessMask(Image.open(path),210)
            chunks = ImageUtils.splitImageInChunks(maskImage, chunkWidth, chunkHeight)
            for index, c in enumerate(chunks):
                if(chunkIsRelevant(c)):
                    c.save("%s/chunk-%s-%s-%s%s" % (outputDir, imageIndex, int(index % 20), int(index / 20), file_extension))
            imageIndex+=1
            continue

        if not isfile(path) or file_extension != '.jpg':
            continue


        print "- opening "+path

        srcImage = ImageUtils.preprocessImage(Image.open(path), 240)

        if file_extension == '.jpg':
            chunks = ImageUtils.splitImageInChunks(srcImage, chunkWidth, chunkHeight)

        for index, c in enumerate(chunks):
            savePath = "%s/chunk-%s-%s-%s%s" % (outputDir, imageIndex, int(index % 20), int(index / 20), file_extension)
            if(chunkIsRelevant(c, relevanceThreshold)):
                c.save(savePath, quality=95)

    print "savechunks - finished"
 def transform(self):
     self.target = self.component.transform()
     hessian = Hessian()
     img_max_eigenvalues, img_min_eigenvalues = hessian.calculate(
         self.target, self.scale)
     utils = ImageUtils()
     return utils.max(img_max_eigenvalues, img_min_eigenvalues)
Esempio n. 4
0
def print_remove(image_path):
    src = teacher_remove(image_path)
    image = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)

    ret1, th1 = cv2.threshold(image, 200, 255, cv2.THRESH_BINARY)

    ImageUtils.show_images([image, th1], 1)
Esempio n. 5
0
def main():
    print 'executing...'
    im = util.open('in/image1.png')
    openingFile = "out/opening-" + datetime.now().strftime(
        '%Y%m%d%H%M%S') + ".png"
    arr = util.toBytes(im)

    start = time.time()
    arr = grayScale.averaging(arr)
    print 'grayscale : ' + str(time.time() - start)

    start = time.time()
    arr = smoothening.median(arr, im.width, im.height, 2)
    print 'median : ' + str(time.time() - start)

    start = time.time()
    arr = util.uniformedThresholding(arr, im.width, im.height, (125, 125, 125))
    print 'uniformedThresholding : ' + str(time.time() - start)

    start = time.time()
    window = [(0, 0, 0)] * 3 * 3
    opened = opening(arr, im.width, im.height, window, 3, 3, window, 3, 3)
    print 'opening : ' + str(time.time() - start)

    util.save(openingFile, opened, im.mode, im.size)
    print 'done executing...'
Esempio n. 6
0
 def getDuring(self):
     print("getDuring")
     timeInfo = ImageUtils.ocr(
         ImageUtils.image_to_bytes(
             ImageUtils.screenshotByImage("./boxue_img/play_player.png", 60,
                                          0, 45, 26)))
     during = timeUtils.t2s(timeInfo['words_result'][0]['words'])
     print(during)
     return during
Esempio n. 7
0
 def getDuring(self):
     timeInfo = ImageUtils.ocr(
         ImageUtils.image_to_bytes(
             ImageUtils.screenshotByImage('./img/player_during.png', 0, 0,
                                          150, 40)))
     during = timeUtils.handleOcrTime(
         timeInfo['words_result'][0]['words'].split('/'))
     print(during)
     return during
Esempio n. 8
0
def getImageAndMaskDifferenceChunks(imgPath):
    srcImage, maskImage = getImageAndMask(imgPath)

    srcImage = ImageUtils.preprocessImage(srcImage,240)
    maskImage = ImageUtils.preprocessMask(maskImage, 210)

    imageChunks = ImageUtils.splitImageInChunks(srcImage, chunkWidth, chunkHeight)
    maskChunks = ImageUtils.splitImageInChunks(maskImage, chunkWidth, chunkHeight)

    return ImageUtils.getDifferenceChunks(imageChunks, maskChunks)
Esempio n. 9
0
def main():
    im = util.open('in/image1.png')
    arr = util.toBytes(im)
    out = []
    fileMedian = "out/m-" + datetime.now().strftime('%Y%m%d%H%M%S') + ".png"
    print("Executing... ")
    start = time.time()
    out = median(arr, im.width, im.height, 2)
    print 'median : ' + str(time.time() - start)
    util.save(fileMedian, out, im.mode, im.size)
    print("done saving ")
Esempio n. 10
0
 def ComputeMeanStd(self,img):
     gray = np.zeros((3,3),dtype=np.uint8)
     if ImageUtils.IsGrayImage(img):
         gray = img.copy()
         gray = cv2.cvtColor(img,cv2.COLOR_RGB2GRAY)
     else:
         gray = ImageUtils.GrayImage(img)
     th, binGray = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
     blurBin = cv2.blur(binGray, (5,5))
     meanPre, stddevPre = ImageUtils.GetMeanStd(gray, binGray, 'pre')
     meanBack, stddevBack = ImageUtils.GetMeanStd(gray, binGray, 'back')
     return meanPre, stddevPre, meanBack, stddevBack
Esempio n. 11
0
def teacher_remove(image_path):
    src = cv2.imread(image_path)  # shape: (1080, 1863, 3)
    image = src.copy()
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    # CLAHE (Contrast Limited Adaptive Histogram Equalization)
    ImageUtils.clahe_hsv(hsv)
    mask = ImageUtils.get_hsvmask(hsv, 160, 70, 70, 30)
    mask = ImageUtils.erode_dilate(mask, 3)

    result = ImageUtils.swap_color(image, mask, 255)
    # ImageUtils.show_images([src, result], 1)
    return result
Esempio n. 12
0
    def process_item(self, item, spider):
        formate = item['cover'][item['cover'].rindex('.'):]

        dirPath = 'cover/' + item['artist_name']
        filePath = 'cover/' + item['artist_name'] + '/' + item['album_name'] + formate

        if os.path.exists(filePath):
            raise DropItem("Duplicate item found: %s" % item['album_name'])
        else:
            if not os.path.exists(dirPath):
                os.makedirs(dirPath)

            f = urllib2.urlopen(item['cover']) 
            with open(filePath, "wb") as code:
                code.write(f.read()) 
                f.close()
                code.close()

            info = ImageUtils.get_image_info(filePath)

            item['cover_path'] = filePath
            item['cover_color'] = info[0]
            item['cover_width'] = info[1]
            item['cover_height'] = info[2]

            return item
Esempio n. 13
0
    def fft(self):
        """
        Converts the color to l-values, then calculates the 2 dimensional
        Fourier transform.
        Displays the values shifted to the center of the image, on a
        logarithmic scale.
        """

        gray = ImageUtils.rgb2grey_fixed(self.image_array)

        transformed = numpy.fft.fft2(gray)
        fshift = numpy.fft.fftshift(transformed)
        fshift = 20 * numpy.log(numpy.abs(fshift))
        fshift += fshift.min()
        fshift *= (255 / fshift.max())

        pyplot.figure(1)
        pyplot.subplot(121)
        pyplot.imshow(gray, cmap="gray")
        pyplot.title("Input image")

        pyplot.subplot(122)
        pyplot.imshow(fshift, cmap="Set1")
        pyplot.title("Fourier transformation")
        pyplot.show()
Esempio n. 14
0
def handle_extract_face_features():
    face = ImageUtils.decode_image_buffer(
        bytearray(request.json['face']['data']))
    aligned_face = RecogitionService.align_face(face)
    face_features = RecogitionService.extract_features(aligned_face)

    return jsonify({"model": list(face_features)})
Esempio n. 15
0
	def read_processed(self):
		frame = None
		with self.captureLock:
			frame = self.processing_frame	
		frame = ImageUtils.resize_mjpeg(frame)
		ret, jpeg = cv2.imencode('.jpg', frame)
		return jpeg.tostring()
Esempio n. 16
0
    def sobel(self):
        """
        Performs the sobel operator in x and y direction and combines the
        output. The output is normalized by using contrast_stretch.
        """

        gray = ImageUtils.rgb2grey_fixed(self.image_array) / 255

        sobel_x = ndimage.sobel(gray, 0)
        sobel_y = ndimage.sobel(gray, 1)
        sobel_xy = numpy.hypot(sobel_x, sobel_y)
        sobel_xy = ImageUtils.normalize_intensity_p298(sobel_xy)
        # sobel_xy *= (255 / sobel_xy.max())
        sobel_xy = ImageUtils.rgb2grey_fixed(sobel_xy)

        self._update_image_data(sobel_xy, "Sobel edge-detection")
Esempio n. 17
0
def trainOnImage(imageId, model, cbs, modelParams):

    logging.info("Training on image: {0}".format(imageId))

    (img, mask) = ImageUtils.loadImage(imageId)
    (imgs, classes, masks) = genPatches(img, mask, modelParams)

    (x_train, x_cv, y_train, y_cv) = scv.train_test_split(imgs,
                                                          masks,
                                                          test_size=0.2)
    y_train_cat = np_utils.to_categorical(y_train.flatten(),
                                          modelParams.nb_classes)
    y_train_cat = y_train_cat.reshape(
        (y_train.shape[0], y_train.shape[1] * y_train.shape[2],
         modelParams.nb_classes))

    model.fit(x_train,
              y_train_cat,
              nb_epoch=modelParams.epochs,
              batch_size=modelParams.batchSize,
              callbacks=cbs,
              validation_split=0.2)

    logging.info("Training completed, evaluating model")

    y_cv_cat = np_utils.to_categorical(y_cv.flatten(), modelParams.nb_classes)
    y_cv_cat = y_cv_cat.reshape(
        (y_cv.shape[0], y_cv.shape[1] * y_cv.shape[2], modelParams.nb_classes))

    loss = model.evaluate(x_cv, y_cv_cat, batch_size=modelParams.batchSize)

    logging.info("Loss: {0}".format(str(loss)))

    return model
Esempio n. 18
0
    def grayscale(self):
        """
        Converts RGB values of the image to luminosity values, thus giving
        the image a 'grayscale' effect.
        """

        grey = ImageUtils.rgb2grey_fixed(self.image_array)
        self._update_image_data(grey, "Grayscale")
Esempio n. 19
0
    def contrast_stretch(self):
        """
        Rescales the intensity values of the image based on the 2 and 98
        percentiles.
        """

        normalized = ImageUtils.normalize_intensity_p298(self.image_array)
        self._update_image_data(normalized, "Contrast stretch")
Esempio n. 20
0
def TraverseLine(img, w, h, fromDirection, linePointThresold, directions, count, continuesDo = None):
    def callContinus():
        if continuesDo is None:
            return True

        newDirections = []
        for direction in directions:
            newDirections.append((-direction[0], -direction[1]))

        return continuesDo(newDirections, count)

    if w <= 1 or w >= img.width - 1:
        return callContinus()

    if h <= 1 or h >= img.height - 1:
        return callContinus()

    count += 1
    c = img.getpixel((w, h))
    similars = ImageUtils.getSimilarNeighbors(img, c, (w, h), [(0,0), fromDirection])
    similarCount = len(similars)
    if similarCount > 1:
        if count >= linePointThresold:
            if continuesDo is None:
                img.putpixel((w, h), color.BLACK_COLOR)
                return True
            elif callContinus():
                img.putpixel((w, h), color.BLACK_COLOR)
                return True
        return False

    if 0 == similarCount:
        if callContinus():
            img.putpixel((w, h), color.BLACK_COLOR)
            return True
        return False

    sw, sh = similars[0]
    if 0 == len(directions):
        directions = similars
    elif 1 == len(directions):
        dw, dh = directions[0]
        distW, distH = (sw - dw, sh - dh)
        dist = distW * distW + distH * distH
        if 0 == dist:
            pass
        elif 1 == dist:
            directions.append((sw, sh))
        else:
            return False
    elif (sw, sh) in directions:
        pass
    else:
        return False

    if TraverseLine(img, w + sw, h + sh, (-sw, -sh), linePointThresold, directions, count, continuesDo):
        img.putpixel((w, h), color.BLACK_COLOR)
        return True
def feature():
    req_parm = request.json
    print type(req_parm)
    import faceUtils
    import ImageUtils
    image_array = ImageUtils.base64_to_array(req_parm[u'hello'])
    feature = faceUtils.face_feature(image_array)
    # face_cuts = faceUtils.face_cut(image_array)
    return json.dumps(feature, ensure_ascii=False)
Esempio n. 22
0
    def laplace(self, sigma=1):
        """
        Performs the laplacian filter based on gaussian derivatives.
        This allows for controlling the level of detail in the output image.
        """

        gray = ImageUtils.rgb2grey_fixed(self.image_array)
        laplace = ndimage.gaussian_laplace(gray, sigma)
        self._update_image_data(laplace, "Laplace edge-detection")
Esempio n. 23
0
 def getSplitRatio(self, remove=True):
     import ImageUtils as iu
     lowestMagnification = self.magnifications[
         -2]  # Sometimes the lowest magnification image is truncated
     imgPath = self.split(lowestMagnification, compression=COMPRESSION_NONE)
     splitRatio = iu.getSplitLeftRightColumnRatio(imgPath)
     if remove:
         os.remove(imgPath)
     return splitRatio
Esempio n. 24
0
def getImageMask(img, model):
    gall_t = ImageUtils.genPatches(img.shape[1:], (img_dim_y, img_dim_x), img_dim_x)
    (imgs_t, classes_t, masks_t) = ImageUtils.prepareDataSets(gall_t, img, mask)
    coords = [x for x in ImageUtils.genPatches(img.shape[1:], (img_dim_y, img_dim_x), img_dim_x)]
    all_rez = model.predict(imgs_t, batch_size=4)

    rez1 = np.array(all_rez)
    rez1[:,:,0:1] *= 0.1
    rez_img = np.argmax(rez1, axis = 2)
    rez_img = rez_img.reshape((-1, img_dim_y, img_dim_x))

    mask_rez = np.zeros(img.shape[1:])

    for i in range(len(coords)):
        (y, x, h, w) = coords[i]
        mask_rez[y:(y+h), x:(x+h)] = rez_img[i]

    return mask_rez
Esempio n. 25
0
def clean(img, linePointThresold = 5):
    for h in range(1, img.height - 1):
        for w in range(1, img.width - 1):
            c = img.getpixel((w, h))
            if color.isBackground(c):
                img.putpixel((w, h), color.BLACK_COLOR)
            else:
                similars = ImageUtils.getSimilarNeighbors(img, c, (w, h), [(0,0)])
                similarCount = len(similars)
                if 0 == similarCount:
                    if len(ImageUtils.getBackgrondNeighbors(img, c, (w, h))) == 7:
                        img.putpixel((w, h), color.BLACK_COLOR)
                    continue

                count = 1
                if 1 == similarCount:
                    ww, hh = similars[0]
                    if TraverseLine(img, w + ww, h + hh, (-ww, -hh), linePointThresold, similars, count):
                        img.putpixel((w, h), color.BLACK_COLOR)
                elif 2 == similarCount:
                    ww, hh = similars[0]
                    ww1, hh1 = similars[1]

                    if (ww * ww1 + hh * hh1) >= 0:
                        img.putpixel((w, h), color.BLACK_COLOR)
                        continue

                    if (1, 0) not in similars or (-1, 1) not in similars:
                        if hh1 > hh or (hh1 == hh and ww1 > ww):
                            tmpW = ww; tmpH = hh
                            ww = ww1; hh = hh1
                            ww1 = tmpW; hh1 = tmpH

                        if TraverseLine(img, w + ww, h + hh, (-ww, -hh), linePointThresold, similars, count):
                            img.putpixel((w, h), color.BLACK_COLOR)
                            img.putpixel((w + ww1, h + hh1), color.BLACK_COLOR)

                        continue


                    from functools import partial
                    continueDo = partial(TraverseLine, img, w + ww1, h + hh1, (-ww1, -hh1), linePointThresold)
                    if TraverseLine(img, w + ww, h + hh, (-ww, -hh), linePointThresold, [similars[0], (-ww1, -hh1)], count, continueDo):
                        img.putpixel((w, h), color.BLACK_COLOR)
Esempio n. 26
0
    def read_processed(self):
        frame = None
        with self.captureLock:
            frame = self.processing_frame
        while frame == None:  # If there are problems, keep retrying until an image can be read.
            with self.captureLock:
                frame = self.processing_frame

        frame = ImageUtils.resize_mjpeg(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tostring()
Esempio n. 27
0
    def read_jpg(self):
        """We are using Motion JPEG, and OpenCV captures raw images,
		so we must encode it into JPEG in order to stream frames to
		the client. It is nessacery to make the image smaller to
		improve streaming performance"""

        capture_blocker = self.captureEvent.wait()
        frame = self.captureFrame
        frame = ImageUtils.resize_mjpeg(frame)
        ret, jpeg = cv2.imencode('.jpg', frame)
        return jpeg.tostring()
Esempio n. 28
0
def handle_recognize_face_from_features():
    features = np.array(request.json['model'])
    face_to_recognize = ImageUtils.decode_image_buffer(
        bytearray(request.json['face']['data']))
    aligned_face = RecogitionService.align_face(face_to_recognize)
    face_to_recognize_features = RecogitionService.extract_features(
        aligned_face)
    are_same = RecogitionService.recognize(features,
                                           face_to_recognize_features)

    return jsonify({"areSame": are_same})
Esempio n. 29
0
    def __getitem__(self, index):
        fg_index = index
        bg_index = index % len(self.backgrounds)

        # fetching foreground and background images
        fg_img, label = self.foregrounds[fg_index]
        bg_img, _ = self.backgrounds[bg_index]

        # add alpha channel to foreground
        fg_img = self.add_alpha(fg_img)

        # random cropping background
        bg_height = np.random.randint(bg_img.shape[0] // 2, bg_img.shape[0])
        bg_width = np.random.randint(bg_img.shape[1] // 2, bg_img.shape[1])
        x0 = self.safe_random(0, bg_img.shape[1] - bg_width)
        y0 = self.safe_random(0, bg_img.shape[0] - bg_height)

        bg_crop = bg_img[y0:y0 + bg_height, x0:x0 + bg_width]
        bg_img = cv2.resize(bg_crop,
                            self.target_size,
                            interpolation=cv2.INTER_AREA)

        # random squeezing foreground
        fg_width = fg_img.shape[1]
        fg_height = fg_img.shape[0]

        random_w_squeeze = 1 - np.random.rand() * 0.2
        random_h_squeeze = 1 - np.random.rand() * 0.2
        tgt_fg_width = min(self.target_size[0],
                           int(fg_width * random_w_squeeze))
        tgt_fg_height = min(self.target_size[1],
                            int(fg_height * random_h_squeeze))

        fg_img = cv2.resize(fg_img, (tgt_fg_width, tgt_fg_height),
                            interpolation=cv2.INTER_AREA)

        # augmentation of BG/FG
        fg_img = self.augment(fg_img)
        bg_img = self.augment(bg_img)

        # alpha-blending
        blended = self.blend(fg_img, bg_img)

        # augmentation of composite
        blended = self.augment_blended(blended)

        rgb_tensor = ImageUtils.image_to_tensor(blended, unsqueeze=False)

        # getting label index
        label_index = self.foregrounds.labels.index(label)

        label_tensor = torch.LongTensor([label_index])

        return {'rgb': rgb_tensor, 'label': label_tensor}
Esempio n. 30
0
def handleEdge(sortedWindow):
    output = sortedWindow
    idx = 0
    resort = 0
    while output[idx] == util.downSignal():
        output[idx] = output[len(output) - 1 - idx]
        idx = idx + 1
        resort = 1
    if resort == 1:
        output = sorted(output, key=itemgetter(0, 1, 2))
    return output
Esempio n. 31
0
def test(image_path):
    src = cv2.imread(image_path)  # shape: (1080, 1863, 3)
    image = src.copy()
    hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
    #CLAHE (Contrast Limited Adaptive Histogram Equalization)
    ImageUtils.clahe_hsv(hsv)
    mask = ImageUtils.get_hsvmask(hsv, 170, 90, 64, 15)
    mask = ImageUtils.erode_dilate(mask)
    result = ImageUtils.swap_color(image, mask, 255)
    ImageUtils.show_images([src, mask, result], 3)
Esempio n. 32
0
def main():
    print 'Executing Histograms.main'
    im = util.open('in/captcha.png')
    fileName = "out/normalized-" + datetime.now().strftime('%Y%m%d%H%M%S') + ".png"
    fileName2 = "out/equalized-" + datetime.now().strftime('%Y%m%d%H%M%S') + ".png"
    arr = util.toBytes(im)
    arr = grayScale.averaging(arr)
    util.save("out/tempGray.png", arr, im.mode, im.size)
    # doesn't matter which color index bec. it's gray.
    normalized = normalize(arr, im.width, im.height, 0)
    equalized = equalize(arr, im.width, im.height, 0)
    util.save(fileName, normalized, im.mode, im.size)
    util.save(fileName2, equalized, im.mode, im.size)
    print 'Done executing Histograms.main'
Esempio n. 33
0
def main():
    im = util.open('in/captcha.png')
    arr = util.toBytes(im)
    fileAverage = "out/a-" + datetime.now().strftime('%Y%m%d%H%M%S') + ".png"
    fileLuminance = "out/l-" + datetime.now().strftime('%Y%m%d%H%M%S') + ".png"
    fileLightness = "out/li-" + datetime.now().strftime('%Y%m%d%H%M%S') + ".png"
    print("Executing... ")
    averaged = averaging(arr)
    luminous = luminousity(arr)
    lightnessed = lightness(arr)
    util.save(fileAverage, averaged, im.mode, im.size)
    util.save(fileLuminance, luminous, im.mode, im.size)
    util.save(fileLightness, lightnessed, im.mode, im.size)
    print("done saving ")
Esempio n. 34
0
def median2(input, w, h, ww, wh):
    output = []
    window = [util.downSignal()] * ww * wh
    for row in range(h):
        for col in range(w):
            origin = row * w + col
            for windowY in range(wh):
                for windowX in range(ww):
                    y = row + windowY - (wh / 2)
                    x = col + windowX - (ww / 2)
                    # The following nested conditions makes sure that the
                    # kernel's origin doesn't go out of the image's boundaries.
                    if (x >= 0 and y >= 0):
                        if (x < w and y < h):
                            index = y * w + x
                            wIndex = windowY * ww + windowX
                            window[wIndex] = input[index]
            window = sorted(window, key=itemgetter(0, 1, 2))
            if (util.isEdge(origin, w, h)):
                window = handleEdge(window)
            output.append(window[len(window) / 2])
    return output
Esempio n. 35
0
def main():
    im = util.open('in/captcha.png')
    arr = util.toBytes(im)
    fileAverage = "out/a-" + datetime.now().strftime('%Y%m%d%H%M%S') + ".png"
    fileLuminance = "out/l-" + datetime.now().strftime('%Y%m%d%H%M%S') + ".png"
    fileLightness = "out/li-" + datetime.now().strftime(
        '%Y%m%d%H%M%S') + ".png"
    print("Executing... ")
    averaged = averaging(arr)
    luminous = luminousity(arr)
    lightnessed = lightness(arr)
    util.save(fileAverage, averaged, im.mode, im.size)
    util.save(fileLuminance, luminous, im.mode, im.size)
    util.save(fileLightness, lightnessed, im.mode, im.size)
    print("done saving ")
Esempio n. 36
0
def activateOnImage(fnn, layerpath, saveWrongChunks=False, breakOnError=True, saveWrongImages=False):
    try:
        chunks = getImageAndMaskDifferenceChunks(layerpath)
    except IOError as e:
        print e
        return
    except ValueError as e:
        print e
        return

    fileName = os.path.basename(layerpath)

    index=1
    needToSaveImage = False
    for c in chunks:
        cFlattened = np.asarray(c).flatten()
        estimate = fnn.activate(cFlattened)
        if estimate[0] < 0.7:
            needToSaveImage = True
            print "estimated error on %s. probability OK / KO: %s/%s" % (fileName, estimate[0], estimate[1])
            if saveWrongChunks:
                c.save(outputPath+"/chunk-%s.jpg" %(index))

            if breakOnError:
                break
            index+=1

    if needToSaveImage and saveWrongImages:
        srcImage, maskImage = getImageAndMask(layerpath)
        srcImage.save(outputPath+"/image-%s-original-image.jpg" %(index))
        maskImage.save(outputPath+"/image-%s-original-mask.png" %(index))

        srcImage = ImageUtils.preprocessImage(srcImage,240)
        maskImage = ImageUtils.preprocessMask(maskImage, 210)
        srcImage.save(outputPath+"/image-%s-elab-image.jpg" %(index))
        maskImage.save(outputPath+"/image-%s-elab-mask.png" %(index))

    return
Esempio n. 37
0
def main():
    print 'Executing Histograms.main'
    im = util.open('in/captcha.png')
    fileName = "out/normalized-" + datetime.now().strftime(
        '%Y%m%d%H%M%S') + ".png"
    fileName2 = "out/equalized-" + datetime.now().strftime(
        '%Y%m%d%H%M%S') + ".png"
    arr = util.toBytes(im)
    arr = grayScale.averaging(arr)
    util.save("out/tempGray.png", arr, im.mode, im.size)
    # doesn't matter which color index bec. it's gray.
    normalized = normalize(arr, im.width, im.height, 0)
    equalized = equalize(arr, im.width, im.height, 0)
    util.save(fileName, normalized, im.mode, im.size)
    util.save(fileName2, equalized, im.mode, im.size)
    print 'Done executing Histograms.main'
Esempio n. 38
0
def SelectSameElementEx(origin, me, w, h, c, excludes, thresold = 0.5):
    if w <= WMargin or w >= (origin.width - WMargin):
        return 0

    if h < HMargin or h >= (origin.height - HMargin):
        return 0

    if not hasattr(me, 'left'):
        me.left = w
        me.right = w
        me.top = h
        me.bottom = h
    else:
        if w < me.left:
            me.left = w
        elif w > me.right:
            me.right = w

        if h < me.top:
            me.top = h
        elif h > me.bottom:
            me.bottom = h

    count = 1
    similars = ImageUtils.getSimilarNeighborsEx(origin, c, (w, h), excludes, thresold)
    s1 = []
    distMean = 0
    for (ww, hh, dist) in similars:
        cc = origin.getpixel((w + ww,h ++ hh))
        if color.isBackground(cc):
            continue

        distMean += dist
        cc = origin.getpixel((w + ww, h + hh))
        s1.append((ww, hh, cc))

        origin.putpixel((w + ww, h + hh), color.BLACK_COLOR)
        me.putpixel((w + ww, h + hh), cc)

    if 0 == len(s1):
        return count
    distMean /= len(s1)
    thresold = thresold * SAME_ELEMENT_THRESOLD_ALPHA + (1 - SAME_ELEMENT_THRESOLD_ALPHA) * distMean
    #print('%s thresold=%lf, distMean=%lf' % (FILEANME, thresold, distMean))

    for (ww, hh, cc) in s1:
        count += SelectSameElementEx(origin, me, w + ww, h + hh, cc, [(0, 0), (-ww, -hh)], thresold)

    return count
Esempio n. 39
0
    def threshold(self, threshold, mode="binary"):

        gray = ImageUtils.rgb2grey_fixed(self.image_array)

        print(mode)

        if mode == "binary":

            for i in range(len(gray)):
                gray[i] = [(0 if x < threshold else 255) for x in gray[i]]

        elif mode == "binary_inverted":

            for i in range(len(gray)):
                gray[i] = [(255 if x < threshold else 0) for x in gray[i]]

        elif mode == "drop_smaller_to_zero":

            for i in range(len(gray)):
                gray[i] = [(0 if x < threshold else x) for x in gray[i]]

        elif mode == "raise_smaller_to_max":

            for i in range(len(gray)):
                gray[i] = [(255 if x < threshold else x) for x in gray[i]]

        elif mode == "drop_bigger_to_zero":

            for i in range(len(gray)):
                gray[i] = [(0 if x > threshold else x) for x in gray[i]]

        elif mode == "raise_bigger_to_max":

            for i in range(len(gray)):
                gray[i] = [(255 if x > threshold else x) for x in gray[i]]

        elif mode == "clip_bigger":

            for i in range(len(gray)):
                gray[i] = [(threshold if x > threshold else x)
                           for x in gray[i]]

        elif mode == "raise_smaller":

            for i in range(len(gray)):
                gray[i] = [(threshold if x < threshold else x)
                           for x in gray[i]]

        self._update_image_data(gray, "Threshold")
Esempio n. 40
0
def generateSamples(trainImages, modelParams):
    while True:
        # Choose an image
        idx = np.random.randint(0, len(trainImages))
        logging.info("Loading image: {0}".format(trainImages[idx]))
        (img, mask) = ImageUtils.loadImage(trainImages[idx])

        #img = cv2.GaussianBlur(img.reshape(img.shape[1:]), (5,5), 0).reshape(img.shape)

        # Make patches
        (imgs, classes, masks) = genPatches(img, mask, modelParams)

        nSamples = imgs.shape[0]
        idxs = np.random.permutation(np.arange(nSamples))[:modelParams.samples]
        logging.info("Subsamples: {0}".format(idxs))

        for i in range(modelParams.samples):
            yield (imgs[idxs[i]], masks[idxs[i]])
def detectColor(color):
    return ImageUtils.detectColor(image, color, 15, 75)
#extract peak frequencies
smallcm = IU.Circularmask(autoc,s/2,3)
pkfft = IU.PeakFilteredFFT(autoc,cxy,sigma=50)
pkfft_abs = np.abs(pkfft)*smallcm
#imshow_z(pkfft_abs)

#get reci lattice vectors
pos = IU.FindPeakPos(pkfft_abs)
pos_zero = pos[:,3:5] - s/2
pos_zero = np.round(pos_zero,2)
bs = VF.find_basis_set(pos_zero.T)
r1,r2 = VF.get_reci_vecs2(bs[:,0],bs[:,1],1024)

fig1 = imshow_z(filt2s,vmin=0,vmax=350)
IU.draw_circles_along_vec(fig1,r1,[512,512],[1024,1024])
IU.draw_circles_along_vec(fig1,r2,[512,512],[1024,1024])

##########################################################################
#Extract peaks
##########################################################################
#sort the peaks by strength and recenter some of them
#thevecs1 = fill_space_with_basis2D(autoc,r1,r2,PY.array([512,512]))
thevecs1 = VF.fill_space_with_basis2D_2(autoc,r1,r2,np.array([512,512]))
#fig1 = imshow_z(filt2s,vmin=0,vmax=250)
#IU.draw_circles_at_vec(fig1,thevecs1)

#slightly shift the centroids and collec the intensities
cents,sums = IU.DiffPeakSums(filt2s,thevecs1)
fig1 = imshow_z(filt2s,vmin=0,vmax=250)
IU.draw_circles_at_vec(fig1,thevecs1,color='w')