Exemplo n.º 1
0
def transform(dir):
    # Pull a list of all .png files in the CWD
    image_list = glob.glob(dir + '/*.png')

    # Iterate through Images and perform 29 mutations of each.
    for i in image_list:
        filename, extension = os.path.splitext(i)
        image = Image.open(i)
        count = 0
        for j in range(-16, 17, 2):
            for k in range(-16, 17, 2):
                # invert so backgroud values are 0
                new_image = invert(image)

                # Deform image based on k
                new_image = deform(new_image, k)

                # Rotate image based on j
                new_image = rotate(new_image, j)

                new_image = new_image.crop(new_image.getbbox())

                # invert back
                new_image = invert(new_image)

                # Save new mutation to a new file
                new_filename = filename + "_" + str(count) + extension
                new_image.save(new_filename)
                new_image.close()
                count += 1
        image.close()
        os.remove(filename + extension)
Exemplo n.º 2
0
def corner_randomly_transform(image,
                              corner,
                              offset_x=0,
                              offset_y=0,
                              x_radius=CORNER_RADIUS,
                              y_radius=CORNER_RADIUS,
                              angle=0):
    draft_radius = int(np.sqrt(x_radius**2 + y_radius**2) + 2)
    # 1. offset
    real_corner = (corner[0] + offset_x, corner[1] + offset_y)
    draft_image = image.crop(
        (real_corner[0] - draft_radius, real_corner[1] - draft_radius,
         real_corner[0] + draft_radius, real_corner[1] + draft_radius))
    # 2. rotate. It fills borders black; so we need invert it twice
    draft_image = invert(invert(draft_image).rotate(angle, expand=1))

    # 3. crop corner
    corner_image = draft_image.crop((draft_image.size[0] // 2 - x_radius,
                                     draft_image.size[1] // 2 - y_radius,
                                     draft_image.size[0] // 2 + x_radius,
                                     draft_image.size[1] // 2 + y_radius))

    # scale
    return corner_image.resize(
        (CORNER_FINAL_RADIUS * 2, CORNER_FINAL_RADIUS * 2))
Exemplo n.º 3
0
    def run_one(fname, outname):
        N = 1

        im = img.open(fname)
        #im = im.filter(ImageFilter.BLUR)
        im = im.resize((600, 600), img.ANTIALIAS)
        im = im.convert('L')
        im = invert(im)


        x = np.asarray(im)
        y = x 

        size = 3

        for i in range(N):
            y = morph.dilate(y, morph.sedisk(size))
            y = morph.close(y, morph.sedisk(size))

        jm = img.fromarray(y)
        jm = invert(jm)

        jm = jm.resize((400, 400), img.ANTIALIAS)

        jm.save(outname)
Exemplo n.º 4
0
def invert_image(image: Image) -> Image:
    if image.mode == 'RGBA':
        r, g, b, a = image.split()
        rgb_image = Image.merge('RGB', (r, g, b))
        inverted_image = invert(rgb_image)
        r2, g2, b2 = inverted_image.split()
        return Image.merge('RGBA', (r2, g2, b2, a))
    else:
        return invert(image)
Exemplo n.º 5
0
def extract_images(img_id, img_dir, size, mode, debug=False):
    image_path = os.path.join(img_dir, img_id + '.tiff')
    image = openslide.OpenSlide(image_path)
    w0, h0 = image.level_dimensions[0]
    view = size
    thumbnail = invert(image.get_thumbnail((view, view)))
    img = np.array(thumbnail).mean(2)
    w1, h1 = thumbnail.size
    im = PIL.Image.new('RGB', (size, size))
    im.paste(
        thumbnail,
        (random.randrange(size + 1 - w1), random.randrange(size + 1 - h1)))
    num = {32: 24}
    images = []
    if debug:
        fig, ax = plt.subplots(1)
        ax.imshow(img)
    for level, n in num.items():
        r = view // level
        label = skimage.measure.block_reduce(img, (r, r), np.mean)
        xs, ys = topk(label, level)
        ll = list(range(level))
        if mode == 'train': random.shuffle(ll)
        ll = ll[:n]
        pts = [(x, y) for x, y in zip(xs[ll], ys[ll])]
        for x, y in pts:
            s0 = max(w0, h0)
            l = image.get_best_level_for_downsample(s0 // (level * size))
            s = max(image.level_dimensions[l])
            ix, iy = x * s0 // level, y * s0 // level
            crop_size = s // level
            if mode == 'train':
                t = s0 // level // 4
                ix += random.randrange(-t, t)
                iy += random.randrange(-t, t)
                crop_size = int(
                    random.uniform(0.8 * crop_size, 1.2 * crop_size))
            im = image.read_region((iy, ix), l, (crop_size, crop_size))

            if debug:
                rect = patches.Rectangle((y * r, x * r),
                                         r,
                                         r,
                                         linewidth=1,
                                         edgecolor='r',
                                         facecolor='none')
                ax.add_patch(rect)
            im = invert(
                im.resize((size, size), PIL.Image.BILINEAR).convert('RGB'))
            images += [im]

    if debug:
        for im in images:
            plt.figure()
            plt.imshow(np.array(im))

    return images
Exemplo n.º 6
0
    def packImages(self, red, green, blue, alpha):
        r = Image.open(red) if red != "" else Image.new(
            mode="RGB", size=(1, 1), color=self.redDef.get())
        g = Image.open(green) if green != "" else Image.new(
            mode="RGB", size=(1, 1), color=self.greenDef.get())
        b = Image.open(blue) if blue != "" else Image.new(
            mode="RGB", size=(1, 1), color=self.blueDef.get())

        if r.mode == "I;16":
            r.mode = "I"
            r = r.point(lambda i: i * (1. / 256))

        if g.mode == "I;16":
            g.mode = "I"
            g = g.point(lambda i: i * (1. / 256))

        if b.mode == "I;16":
            b.mode = "I"
            b = b.point(lambda i: i * (1. / 256))

        if alpha != "":
            a = Image.open(alpha)
            if a.mode == "I;16":
                a.mode = "I"
                a = a.point(lambda i: i * (1. / 256))

        if self.redInvert.get():
            r = invert(r.convert("RGB"))
        if self.greenInvert.get():
            g = invert(g.convert("RGB"))
        if self.blueInvert.get():
            b = invert(b.convert("RGB"))
        if self.alphaInvert.get() and alpha != "":
            a = invert(a.convert("RGB"))

        size = max(r.size, g.size, b.size) if alpha == "" else max(
            r.size, g.size, b.size, a.size)

        if alpha != "":
            return Image.merge("RGBA", [
                r.resize(size).convert("L"),
                g.resize(size).convert("L"),
                b.resize(size).convert("L"),
                a.resize(size).convert("L")
            ])
        else:
            return Image.merge("RGB", [
                r.resize(size).convert("L"),
                g.resize(size).convert("L"),
                b.resize(size).convert("L")
            ])
Exemplo n.º 7
0
    def invert_colors(image) -> Image:
        """
        Function to invert colors of an image

        Parameters:
        image (str): Path to the image

        Returns:
        Image object for further use
        """
        try:
            image = image.convert('RGBA')
            r, g, b, a = image.split()
            rgb_image = Image.merge('RGB', (r, g, b))

            inverted_image = invert(rgb_image)

            r2, g2, b2 = inverted_image.split()

            final_transparent_image = Image.merge('RGBA', (r2, g2, b2, a))
            image = final_transparent_image
            return image
        except Exception as exc:
            print("Error in invert_colors")
            print(exc)
            return None
Exemplo n.º 8
0
def put_grapheme(canvas, x, y, file_info, name, rotate=False):
    '''Put a grapheme into a logogram, and write the resulting darknet
    bounding box annotation.'''

    canvas_w = canvas.width
    canvas_h = canvas.height

    sim = Image.open(file_info['filename']).convert("RGBA")
    w = sim.width
    h = sim.height
    if rotate:
        if random.random() > 0.5:
            sim = sim.transpose(Image.FLIP_LEFT_RIGHT)
        turns = random.randint(0, 4)
        sim = sim.rotate(turns * 90, resample=Image.BILINEAR, expand=True)
        if turns % 2 > 0:
            w, h = h, w

    if x + w > canvas_w:
        x -= x + w - canvas_w + 2
    if y + h > canvas_h:
        y -= y + h - canvas_h + 2

    # Convert white pixels to transparent
    sim.putalpha(invert(sim.convert("L")))

    canvas.alpha_composite(sim, (x, y))
    return {
        'tags':
        file_info['tags'],
        'box': [(x + w / 2) / canvas_w, (y + h / 2) / canvas_h, w / canvas_w,
                h / canvas_h]
    }
Exemplo n.º 9
0
def do_convert(filename, img_out_path):
    ds = pydicom.dcmread(filename)
    img_an = ds.AccessionNumber

    # Debug Info
    ds_an = ds.AccessionNumber if 'AccessionNumber' in ds else None
    ds_pi = ds.PhotometricInterpretation if 'PhotometricInterpretation' in ds else None
    ds_ww = ds.WindowWidth if 'WindowWidth' in ds else None
    ds_wc = ds.WindowCenter if 'WindowCenter' in ds else None
    ds_ri = ds.RescaleIntercept if 'RescaleIntercept' in ds else None
    ds_rs = ds.RescaleSlope if 'RescaleSlope' in ds else None
    print(filename, "AccNo:", ds_an, "PI:", ds_pi, "WW:", ds_ww, "WC:", ds_wc, "RI:", ds_ri, "RS:", ds_rs)
    img_out_fullpath = join(img_out_path, img_an + '.png')
    if not exists(dirname(img_out_fullpath)):
        try:
            makedirs(dirname(img_out_fullpath))
        except OSError as exc: # Guard against race condition
            if exc.errno != errno.EEXIST:
                raise

    """
    #writer.writerow({'Image Index': img_an + '.png', 'No Finding': '1'})
    """
    img = read_dcm_to_image(ds)

    small_img = img.resize((1024, 1024))
    small_img = small_img.convert('L')

    # MONOCHROME
    if ds.PhotometricInterpretation == 'MONOCHROME1':
        small_img = invert(small_img)

    small_img.save(img_out_fullpath)
Exemplo n.º 10
0
    def recover_rotate(self, image: Image, angle: int):
        """
        Rotates the input image back to fit the original image, \
            and fills in the parts that are missing due to rotation.
        Assumes that the angle of rotation is provided, and that the image \
            has not been expanded when rotated (i.e. corners have been cut off).
        Also ssumes that no other modification has been made to the input.

        :param image: input image
        :type image: PIL.Image
        :param angle: the degree that the image was rotated (CCW)
        :type angle: int

        :return: the input image modified to match the original image
        :rtype: PIL.Image
        """
        out = image.rotate(-angle)
        '''
        for coord in product(range(self.original.size[1]), range(self.original.size[0])):
            if out.getpixel(coord) != 0:
                out.putpixel(coord, self.original.getpixel(coord))
        '''
        #mask = out.convert(mode="1",dither=None).convert('L')  # insufficient masking
        mask = out.point(lambda x: 255*int(x>0))
        mask = invert(mask).convert('1')
        out.paste(self.original,mask=mask)

        return out
Exemplo n.º 11
0
def imagefile2array(filename):
    ''' Returns tuple of (width, height, string_definition). '''
    img = Image.open(filename)
    if img.mode == 'RGBA':
        img.load()
        bmp = Image.new("RGB", img.size,
                        (255, 255, 255))  # create new white image
        r, g, b, a = img.split()
        bmp = Image.composite(img, bmp, a)  # create a composite
    elif img.mode == 'LA':
        img.load()
        bmp = Image.new("RGB", img.size,
                        (255, 255, 255))  # create new white image
        l, a = img.split()
        bmp = Image.composite(img, bmp, a)  # create a composite
    else:
        bmp = img
    bmp = invert(bmp)
    bmp = bmp.rotate(-90).transpose(Image.FLIP_LEFT_RIGHT)
    s = bmp.convert('1').tostring()
    byte_height = bmp.size[0] / 8 + (bmp.size[0] % 8 > 0)
    matrix = group(list(s), byte_height)
    matrix = transpose(matrix)
    bmp_array = ''.join(['\\x{0:02x}'.format(ord(x)) for x in flatten(matrix)])
    return (bmp.size[1], bmp.size[0], bmp_array
            )  # bitmap was rotated, need to swap width and height
Exemplo n.º 12
0
    async def invert_user_avatar(self,
                                 ctx,
                                 *,
                                 member: Optional[Member] = None):
        """Get the inverted avatar of the member"""

        # Get member mentioned or set to author
        member = ctx.author if not member else member

        # Invert the image
        attach = await member.avatar_url.read()
        image = Image.open(io.BytesIO(attach)).convert('RGB')
        inverted = invert(image)

        # Save new inverted image as bytes
        file = io.BytesIO()
        inverted.save(file, format='PNG')
        file.seek(0)

        # Send image in an embed
        f = File(file, "inverted.png")
        embed = Embed(colour=self.bot.admin_colour,
                      timestamp=datetime.datetime.utcnow())
        embed.set_author(name=f"{member}'s Avatar | Inverted",
                         icon_url=member.avatar_url)
        embed.set_image(url="attachment://inverted.png")
        embed.set_footer(text=f"Requested by {ctx.author}",
                         icon_url=ctx.author.avatar_url)

        await ctx.send(file=f, embed=embed)
Exemplo n.º 13
0
def invert_image_path(path):
    image_file = Image.open(path)  # open colour image
    image_file = image_file.convert('L').resize([220, 155])
    image_file = invert(image_file)
    image_array = np.array(image_file)
    image_array[image_array >= 50] = 255
    image_array[image_array < 50] = 0
    return image_array
Exemplo n.º 14
0
 def bw_transform_special(img):
     #Tratamiento especial para imágenes
     image = rgb2gray(img)
     image = invert(image)
     image = adjust_gamma(image, 2)
     image = adjust_sigmoid(image, .95)
     threshold_image = threshold_otsu(image)
     image = image > threshold_image
     return image
Exemplo n.º 15
0
def data_URL_to_number_array(data_URL):
    base64_img = data_URL.split(',', 1)[1]
    img = Image.open(BytesIO(base64.b64decode(base64_img))).convert('L')
    img = img.resize((20, 20))
    img = invert(img)
    img = ImageOps.autocontrast(img)
    img = ImageOps.expand(img, border=4)
    seq = list(img.getdata())
    return [[seq[i + 28 * j] for i in range(28)] for j in range(28)]
Exemplo n.º 16
0
Arquivo: rgb.py Projeto: imoea/puzzles
def generate(args):
    """ generate puzzle """
    def img_to_spr(img):
        """ split image into 10x10 sprites """

        return [
            img.crop((x, y, x + 10, y + 10))
            for y in range(0, img.size[1], 10)
            for x in range(0, img.size[0], 10)
        ]

    def color_spr(spr, color):
        """ replace black in sprite with another color """

        color = np.array(color)
        x, y, z = *spr.size, 3  # sprite dimensions
        spr = np.array(spr).reshape((x * y, z))  # reshape
        spr = np.hstack([color if sum(p) == 0 else p for p in spr]).reshape(
            (x, y, z)).astype(np.uint8)  # color
        return Image.fromarray(spr)

    # ascii characters corresponding to `tileset.png`
    ascii_chr = ' !"#$%&\'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~'
    # map A-Z -> 1-26
    chr_num = dict(zip(ascii_chr[33:59], range(1, 27)))
    chr_num[' '] = 27

    # black letters on white background (by inverting the colours)
    img = invert(Image.open('tileset.png').convert('1').convert('RGB'))
    ascii_spr = img_to_spr(img)[32:127]
    spr = dict(zip(ascii_chr, ascii_spr))

    # append padding so width % 3 == 0
    if len(args.code) % 3:
        code = args.code + ' ' * (3 - len(args.code) % 3)
    else:
        code = args.code

    # insert the clue
    puz = Image.new('RGB', (10 * 3, 10 * (len(code) // 3 + 1)))
    puz.paste(color_spr(spr['R'], (255, 0, 0)), (0, 0))  # R
    puz.paste(color_spr(spr['G'], (0, 255, 0)), (10, 0))  # G
    puz.paste(color_spr(spr['B'], (0, 0, 255)), (20, 0))  # B

    # create the puzzle
    for i, char in enumerate(code):
        x, y = i % 3, i // 3 + 1  # determine the letter position
        color = np.random.randint(256, size=3)  # randomise the RGB values
        color[x] = chr_num[char]  # set the channel to the alphabet number
        puz.paste(color_spr(spr['RGB'[x]], color),
                  (10 * x, 10 * y))  # insert the code

    puz.save(args.fname)
    print("Clue: proper channels")
    print("Puzzle saved:", args.fname)
Exemplo n.º 17
0
def invert_image(image_file):
    image_file = image_file.convert('L').resize([220, 155])
    image_file = invert(image_file)
    image_array = np.array(image_file)
    for i in range(image_array.shape[0]):
        for j in range(image_array.shape[1]):
            if image_array[i][j] <= 50:
                image_array[i][j] = 0
            else:
                image_array[i][j] = 255
    return image_array
Exemplo n.º 18
0
 def process_image(img, sig_param=.85):
     image = rgb2gray(img)
     image = pyramid_expand(image, order=3)
     theta = ImageSignalDigitalizer.skew_detect(image)
     image = rotate(image,
                    np.rad2deg(theta) + ImageSignalDigitalizer.CONST_ANGLE,
                    resize=True)
     image = invert(image)
     image = adjust_sigmoid(image, sig_param)
     image = ImageSignalDigitalizer.bw_transform(image)
     image = resize(image, (2048, 2048 * 2))
     return image
Exemplo n.º 19
0
def edge_extract(image: Image, point_from: tuple, point_to: tuple,
                 edge_width: int):
    edge = image.crop((
        min(point_from[0], point_to[0]) - 2 * edge_width,
        min(point_from[1], point_to[1]) - 2 * edge_width,
        max(point_from[0], point_to[0]) + 2 * edge_width,
        max(point_from[1], point_to[1]) + 2 * edge_width,
    ))
    vect_edge = np.array(point_from) - np.array(point_to)
    edge_len = np.linalg.norm(vect_edge)
    angle = np.arccos(np.dot(vect_edge, [0, 1]) / edge_len) * 180 / np.pi
    # we don't know if we should rotate on +angle or -angle.
    # if angle between vect and OX is < 90, then we 'll rotate on +angle;
    # else, on -angle
    if np.dot(vect_edge, [1, 0]) < 0:
        angle = -angle
    edge = invert(edge)
    edge = edge.rotate(angle=angle, expand=True)
    edge = invert(edge)
    new_center = np.array(edge.size) // 2
    edge = edge.crop(
        (new_center[0] - edge_width / 2, new_center[1] - int(edge_len / 2),
         new_center[0] + edge_width / 2, new_center[1] + int(edge_len / 2)))
    return edge
Exemplo n.º 20
0
    async def _progress(self, percent):
        image = Image.new("RGB", (self.width, 12))

        draw = ImageDraw.Draw(image)

        draw.pieslice([(3, 3), (11, 11)],
                      start=90,
                      end=270,
                      fill="#fff",
                      outline="#fff")
        draw.rectangle((5, 3, int((percent * (self.width - 8)) / 100) + 5, 11),
                       outline="#fff",
                       fill="#fff")

        image = invert(image)
        return image
Exemplo n.º 21
0
    def latexToImage(self, formula):
        image = Image.open(self.ownpnglatex(r"$"+formula+r"$", 'tmpFormula.png'))

        image = invert(image)
        image = image.convert("RGBA")
        datas = image.getdata()

        newData = []
        for item in datas:
            if item[0] == 0 and item[1] == 0 and item[2] == 0:
                newData.append((255, 255, 255, 0))
            else:
                newData.append(item)

        image.putdata(newData)
        return image
Exemplo n.º 22
0
def find_bounds(im_names):
    print('finding max bounds in {}'.format(im_names))
    max_bounds = [
        0,
    ] * 4  # left, upper, right, lower
    min_bounds = [
        999999,
    ] * 4
    for im_name in im_names:
        with Image.open(im_name) as im:
            bounds = invert(im).getbbox()
            max_bounds = [max(bounds[i], max_bounds[i]) for i in range(4)]
            min_bounds = [min(bounds[i], min_bounds[i]) for i in range(4)]
    return (min(max_bounds[0], min_bounds[0]), min(max_bounds[1],
                                                   min_bounds[1]),
            max(max_bounds[2], min_bounds[2]), max(max_bounds[3],
                                                   min_bounds[3]))
Exemplo n.º 23
0
def do_convert(filename, img_out_path, img_out_width=1024, img_out_square=True, use_ori_fname=False):
    (ori_fname, ori_fname_ext) = os.path.splitext(os.path.basename(filename))
    ori_fname = ori_fname + ori_fname_ext if ori_fname_ext.lower() != '.png' else ori_fname
    ds = pydicom.dcmread(filename)
    img_an = ds.AccessionNumber

    # Debug Info
    ds_an = ds.AccessionNumber if 'AccessionNumber' in ds else None
    ds_pi = ds.PhotometricInterpretation if 'PhotometricInterpretation' in ds else None
    ds_ww = ds.WindowWidth if 'WindowWidth' in ds else None
    ds_wc = ds.WindowCenter if 'WindowCenter' in ds else None
    ds_ri = ds.RescaleIntercept if 'RescaleIntercept' in ds else None
    ds_rs = ds.RescaleSlope if 'RescaleSlope' in ds else None
    ds_uid = ds.SOPInstanceUID if 'SOPInstanceUID' in ds else None
    #print(filename, "AccNo:", ds_an, "PI:", ds_pi, "WW:", ds_ww, "WC:", ds_wc, "RI:", ds_ri, "RS:", ds_rs, "ori_fname:", ori_fname, "use_ori_fname:", use_ori_fname)
    out_fname = ds_an if ds_an and not use_ori_fname else ori_fname
    img_out_fullpath = join(img_out_path, out_fname + '.png')
    if not exists(dirname(img_out_fullpath)):
        try:
            makedirs(dirname(img_out_fullpath))
        except OSError as exc: # Guard against race condition
            if exc.errno != errno.EEXIST:
                raise

    try:
        img = read_dcm_to_image(ds)
    except:
        return

    # calculate resize image width/height
    img_width, img_height = img.size
    img_out_width = int(img_out_width)
    if img_out_square:
        img_out_height = img_out_width
    else:
        img_out_height = int( img_out_width / float(img_width) * float(img_height) )

    small_img = img.resize((img_out_width, img_out_height))
    small_img = small_img.convert('L')

    # MONOCHROME
    if ds.PhotometricInterpretation == 'MONOCHROME1':
        small_img = invert(small_img)

    small_img.save(img_out_fullpath)
Exemplo n.º 24
0
def convert_img(imagename):
    image = PIL.Image.open('decos/' + imagename + '.png')
    image = invert(image)

    thresh = 200

    def fn(x):
        return 255 if x > thresh else 0

    image = image.convert('L').point(fn, mode='1')
    image = image.filter(ImageFilter.EDGE_ENHANCE_MORE)
    # inverted_convert.save('images/'+imagename+'-inv.png')

    pil_image = image.convert('RGB')
    open_cv_image = array(pil_image)
    open_cv_image = open_cv_image[:, :, ::-1].copy()

    return (open_cv_image)
Exemplo n.º 25
0
def style_image(image_path, model_path):

    image = Image.open(image_path)
    width, height = image.size
    alpha = image.convert('RGBA').split()[-1]

    # @TODO - import the mean color...
    mean_color = Image.new("RGB", image.size, (124, 116, 103))

    rgb_image = image.convert('RGB')
    rgb_image.paste(mean_color, mask=invert(alpha))

    cuda_available = torch.cuda and torch.cuda.is_available()
    device = torch.device("cuda" if cuda_available else "cpu")

    model = torch.load(model_path, map_location=device)

    image_filename = os.path.basename(image_path)
    model_filename = os.path.basename(model_path)
    model_name = os.path.splitext(model_filename)[0]

    os.makedirs(f"images/outputs/{model_name}", exist_ok=True)

    transform = style_transform()

    # Define model and load model checkpoint
    transformer = TransformerNet().to(device)
    transformer.load_state_dict(model)
    transformer.eval()

    # Prepare input
    image_tensor = Variable(transform(rgb_image)).to(device)
    image_tensor = image_tensor.unsqueeze(0)

    # Stylize image
    with torch.no_grad():
        output_tensor = depro(transformer(image_tensor))

    stylized_image = F.to_pil_image(output_tensor) \
        .convert('RGBA') \
        .crop((0, 0, width, height))

    stylized_image.putalpha(alpha)
    stylized_image.save(f"images/outputs/{model_name}/{image_filename}", 'PNG')
Exemplo n.º 26
0
def do_predict(models, path):
    results = []
    ds, img = read_dcm_to_image(path)
    acc_no = ds.get('AccessionNumber', None)

    for model in models:
        # check if exists
        model_name = model.model_name
        model_ver = model.model_ver
        weight_name = model.weight_name
        weight_ver = model.weight_ver
        category = model.category

        global _use_db
        if _use_db:
            is_exist = check_if_pred_exists(acc_no, model_name, model_ver,
                                            weight_name, weight_ver, category)
            if is_exist:
                print("{} {} {} exists, skip.".format(acc_no, model_name,
                                                      weight_name))
                continue

        small_img = img.resize((model.width, model.height))
        small_img = small_img.convert('L')

        # MONOCHROME
        if ds.PhotometricInterpretation == 'MONOCHROME1':
            small_img = invert(small_img)

        #exam_time = ds.get('AcquisitionDate', None)
        small_img_arr = np.array(small_img.convert('RGB'))
        prob = predict_image(small_img_arr, model.obj)
        result = {
            'acc_no': acc_no,
            'model_name': model_name,
            'model_ver': model_ver,
            'weight_name': weight_name,
            'weight_ver': weight_ver,
            'category': category,
            'probability': prob[0][0]
        }
        results.append(result)
    return results
Exemplo n.º 27
0
    async def invert(self, ctx):
        """Display inverted version of image uploaded"""

        if ctx.message.attachments:
            for attachments in ctx.message.attachments:
                attach = await attachments.read()
                image = Image.open(io.BytesIO(attach)).convert('RGB')
                inverted = invert(image)

                # Save new grayscale image as bytes
                file = io.BytesIO()
                inverted.save(file, format='PNG')
                file.seek(0)

                await ctx.message.delete()
                # Send Grayscale Image
                await ctx.send(file=discord.File(file, "inverted.png"))

        else:
            await self.bot.generate_embed(ctx, desc="**Image Not Detected!**")
Exemplo n.º 28
0
def get_name(img):
    img_arr = np.asarray(img)
    h, w, ch = img_arr.shape
    img_arr = img_arr[:h // 2, w // 2:]
    
    blue_mask = infer_blue_mask(img_arr)
    uy, ly, lx, rx = max_length_rectangle(blue_mask)
    
    # TODO certain blue color
    
    img_arr = img_arr[uy:ly, lx:rx]
    white_mask = infer_white_mask(img_arr)
    uy, ly, lx, rx = max_length_rectangle(white_mask)
    name_img = Image.fromarray(img_arr[:, :lx])
    name_img = invert(name_img).convert('L')
    name = pytesseract.image_to_string(name_img,
                                       lang='eng',
                                       config='--psm 7')
    name = name.split('\n')[0]
    # TODO no name found
    return name
Exemplo n.º 29
0
def load_cropped_image(src_path, output_size, grey_scale, invert_color=False):
    def _crop_background(numpy_src):
        def _get_vertex(img):
            index = 0
            for i, items in enumerate(img):
                if items.max() != 0:  # activate where background is '0'
                    index = i
                    break

            return index

        numpy_src_y1 = _get_vertex(numpy_src)
        numpy_src_y2 = len(numpy_src) - _get_vertex(np.flip(numpy_src, 0))
        numpy_src_x1 = _get_vertex(np.transpose(numpy_src))
        numpy_src_x2 = len(numpy_src[0]) - _get_vertex(
            np.flip(np.transpose(numpy_src), 0))

        return numpy_src_x1, numpy_src_y1, numpy_src_x2, numpy_src_y2

    if grey_scale:
        src_image = Image.open(src_path, 'r').convert('L')
        if invert_color:
            src_image = invert(src_image)  # invert color

        numpy_image = np.asarray(src_image.getdata(),
                                 dtype=np.float64).reshape(
                                     (src_image.size[1], src_image.size[0]))
        numpy_image = np.asarray(
            numpy_image, dtype=np.uint8)  # if values still in range 0-255

        pil_image = Image.fromarray(numpy_image, mode='L')
        x1, y1, x2, y2 = _crop_background(numpy_image)
        pil_image = pil_image.crop((x1, y1, x2, y2))
        pil_image = pil_image.resize([output_size, output_size])

    else:
        pil_image = Image.open(src_path, 'r')

    return pil_image
Exemplo n.º 30
0
def recolor(o_path, t_path, r_path):
    path = Path(__file__).resolve().parent.joinpath(o_path)
    texpath = Path(__file__).resolve().parent.joinpath(t_path)
    savepath = Path(__file__).resolve().parent.joinpath(r_path)

    image = invert(Image.open(path)).convert('HSV')
    texture = Image.open(texpath).resize(image.size).convert('HSV')

    data = image.load()
    texdata = texture.load()

    for i in range(image.size[0]):
        for j in range(image.size[1]):
            xy = (i, j)
            pixel = (texdata[i, j][0], texdata[i, j][1], data[i, j][2])
            image.putpixel(xy, pixel)

    image.convert('RGB').save(savepath)

    original_image = mpimg.imread(path)
    texture_image = mpimg.imread(texpath)
    resulting_image = mpimg.imread(savepath)