コード例 #1
0
def change_color(img, factor):
    image = Image.fromarray(img)
    data = np.array(ImageEnhance.Color(image).enhance(factor).getdata(),
                    dtype="uint8").reshape(img.shape)
    return data
コード例 #2
0
    def __init__(self,
                 p1,
                 operation1,
                 magnitude_idx1,
                 p2,
                 operation2,
                 magnitude_idx2,
                 fillcolor=(128, 128, 128)):
        ranges = {
            "shearX": np.linspace(0, 0.3, 10),
            "shearY": np.linspace(0, 0.3, 10),
            "translateX": np.linspace(0, 150 / 331, 10),
            "translateY": np.linspace(0, 150 / 331, 10),
            "rotate": np.linspace(0, 30, 10),
            "color": np.linspace(0.0, 0.9, 10),
            "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
            "solarize": np.linspace(256, 0, 10),
            "contrast": np.linspace(0.0, 0.9, 10),
            "sharpness": np.linspace(0.0, 0.9, 10),
            "brightness": np.linspace(0.0, 0.9, 10),
            "autocontrast": [0] * 10,
            "equalize": [0] * 10,
            "invert": [0] * 10
        }

        # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
        def rotate_with_fill(img, magnitude):
            rot = img.convert("RGBA").rotate(magnitude)
            rot = Image.composite(
                rot, Image.new("RGBA", rot.size, (fillcolor[0], ) * 4), rot)
            return rot.convert(img.mode)

        func = {
            "shearX":
            lambda img, magnitude: img.transform(img.size,
                                                 Image.AFFINE,
                                                 (1, magnitude * random.choice(
                                                     [-1, 1]), 0, 0, 1, 0),
                                                 Image.BICUBIC,
                                                 fillcolor=fillcolor),
            "shearY":
            lambda img, magnitude: img.transform(img.size,
                                                 Image.AFFINE,
                                                 (1, 0, 0, magnitude * random.
                                                  choice([-1, 1]), 1, 0),
                                                 Image.BICUBIC,
                                                 fillcolor=fillcolor),
            "translateX":
            lambda img, magnitude: img.transform(
                img.size,
                Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice(
                    [-1, 1]), 0, 1, 0),
                fillcolor=fillcolor),
            "translateY":
            lambda img, magnitude: img.transform(
                img.size,
                Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.
                               choice([-1, 1])),
                fillcolor=fillcolor),
            "rotate":
            lambda img, magnitude: rotate_with_fill(img, magnitude),
            "color":
            lambda img, magnitude: ImageEnhance.Color(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "posterize":
            lambda img, magnitude: ImageOps.posterize(img, magnitude),
            "solarize":
            lambda img, magnitude: ImageOps.solarize(img, magnitude),
            "contrast":
            lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "sharpness":
            lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "brightness":
            lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "autocontrast":
            lambda img, magnitude: ImageOps.autocontrast(img),
            "equalize":
            lambda img, magnitude: ImageOps.equalize(img),
            "invert":
            lambda img, magnitude: ImageOps.invert(img)
        }

        self.p1 = p1
        self.operation1 = func[operation1]
        self.magnitude1 = ranges[operation1][magnitude_idx1]
        self.p2 = p2
        self.operation2 = func[operation2]
        self.magnitude2 = ranges[operation2][magnitude_idx2]
コード例 #3
0
def saturation(pic, amount):
    img = Image.open(pic)
    img = img.convert("RGBA")
    color = ImageEnhance.Color(img)
    img = color.enhance(amount)
    preview(img)
コード例 #4
0
ファイル: image_preprocess.py プロジェクト: NavidMai/OCR
# -*- coding: UTF-8 -*-
from PIL import Image
from PIL import ImageEnhance
import cv2

image = Image.open('Image/t.jpg')

# Brightened
enh_bri = ImageEnhance.Brightness(image)
brightness = 1.5
image_brightened = enh_bri.enhance(brightness)
image_brightened.show()

# Colored
enh_col = ImageEnhance.Color(image_brightened)
color = 1.5
image_colored = enh_col.enhance(color)
image_colored.show()

# Contrasted
enh_con = ImageEnhance.Contrast(image_colored)
contrast = 1.5
image_contrasted = enh_con.enhance(contrast)
image_contrasted.show()

# Sharped
enh_sha = ImageEnhance.Sharpness(image_contrasted)
sharpness = 3.0
image_sharped = enh_sha.enhance(sharpness)
image_sharped.show()
コード例 #5
0
def imgEnhColorR(img, st=0.6, ed=1.4):
    c = random.uniform(st, ed)
    return ImageEnhance.Color(img).enhance(c)
コード例 #6
0
ファイル: Cubettize.py プロジェクト: fabriziomonge/kubettize
    cube = st.sidebar.slider('definizione', 20, 60, 30)
    #     cube = int(cube)
    contrast = st.sidebar.slider('Contrasto', 1, 10, 10) / 10
    colore = st.sidebar.slider('Colore', 1, 10, 10) / 10
    luminosita = st.sidebar.slider('Luminosità', 1, 10, 10) / 10
    nitidezza = st.sidebar.slider('Nitidezza', 1, 10, 10) / 10

    imgSmall = img.resize((cube, cube), resample=Image.BILINEAR)

    # Scale back up using NEAREST to original size
    result = imgSmall.resize(img.size, Image.NEAREST)

    # Save
    enhancer = ImageEnhance.Contrast(result)
    result = enhancer.enhance(contrast)

    enhancer2 = ImageEnhance.Color(result)
    result1 = enhancer2.enhance(colore)

    enhancer3 = ImageEnhance.Brightness(result1)
    result2 = enhancer3.enhance(luminosita)

    enhancer4 = ImageEnhance.Brightness(result2)
    result3 = enhancer3.enhance(luminosita)

    #     st.write(contrast, colore, luminosita )

    #     fig = plt.imshow(result)
    st.write('''### Immagine modificata''')
    st.image(result3, use_column_width=True)
コード例 #7
0
ファイル: functional.py プロジェクト: nunenuh/craft.pytorch
def color(img: np.ndarray, value=1.0):
    img = Image.fromarray(img)
    img = ImageEnhance.Color(img).enhance(value)
    img = np.asarray(img)
    return img
コード例 #8
0
def color(im, k=3):
    enhancer = ImageEnhance.Color(im)
    return enhancer.enhance(k)
コード例 #9
0
import os
import numpy as np
from PIL import Image, ImageEnhance
import itertools

#read image
image = Image.open('test.jpg')
print(image.format, image.size, image.mode)

#parameter combination
green_p = np.arange(0.1, 1.0, 0.05)
blue_p = np.arange(0.1, 1.0, 0.05)
p_pair = list(itertools.product(green_p, blue_p))

#makedir

#test
for i in range(len(p_pair)):
    temp = np.array(image)
    temp[:, :, 0] = temp[:, :, 0] * 0.8  #Red
    temp[:, :, 1] = (temp[:, :, 1] +
                     (255 - np.amax(temp[:, :, 1]))) * p_pair[i][0]  #Green
    temp[:, :, 2] = (temp[:, :, 2] +
                     (255 - np.amax(temp[:, :, 2]))) * p_pair[i][1]  #Blue
    temp = Image.fromarray(temp)
    temp = ImageEnhance.Color(temp).enhance(0.8)
    temp = ImageEnhance.Brightness(temp).enhance(1.5)
    temp = ImageEnhance.Contrast(temp).enhance(0.8)
    temp.save(str(i) + '.bmp')
コード例 #10
0
def desaturate(image, amount=.5):
    """Reduce vibrance."""
    enhanced = ImageEnhance.Color(image)
    return enhanced.enhance(amount)
コード例 #11
0
def saturate(image, amount=1.5):
    """Increase vibrance."""
    enhanced = ImageEnhance.Color(image)
    return enhanced.enhance(amount)
コード例 #12
0
def colorize(img, colorize_percentage):
    return ImageEnhance.Color(img).enhance(colorize_percentage / 100)
コード例 #13
0
def main():
    parser = argparse.ArgumentParser(
        description=textwrap.dedent(__doc__),
        formatter_class=argparse.RawDescriptionHelpFormatter,
        )
    parser.add_argument('-v', '--verbose', action='store_true',
        help='More verbose output')
    parser.add_argument('--version', action='store_true',
        #'Display the version of the tool/package and exit.'
        help=argparse.SUPPRESS)
    parser.add_argument('--debug', action='store_true',
        #help='Enable debugging output',
        help=argparse.SUPPRESS,
        )

    device_parser = argparse.ArgumentParser(add_help=False)
    device_parser.add_argument('device', nargs='?',
        help='The device string. Typically the IP address of the oscilloscope. '
             'Will try to discover a single (!) scope on the network if you leave it out.')

    subparsers = parser.add_subparsers(dest='action', metavar='<action>',
        help="Action to perform on the scope:")

    # ds1054z discover
    action_desc = 'Discover and list scopes on your network and exit'
    discover_parser = subparsers.add_parser('discover',
        description=action_desc, help=action_desc)
    # ds1054z info
    action_desc = 'Print information about your oscilloscope'
    cmd_parser = subparsers.add_parser('info', parents=[device_parser],
        description=action_desc, help=action_desc)
    # ds1054z cmd
    action_desc = 'Send an SCPI command to the oscilloscope'
    cmd_parser = subparsers.add_parser('cmd',
        description=action_desc, help=action_desc)
    cmd_parser.add_argument('command', metavar=':SCPI:CMD',
        help="The command to execute. If the command contains a '?' the answer "
             "will be read from the device and printed to stdout.")
    late_parents(cmd_parser, parents=[device_parser])
    # ds1054z save-screen
    action_desc = 'Save an image of the screen'
    save_screen_parser = subparsers.add_parser('save-screen', parents=[device_parser],
        description=action_desc, help=action_desc)
    save_screen_parser.add_argument('--filename', '-f', metavar='IMG_FILENAME',
        help='The filename template for the image')
    save_screen_parser.add_argument('--overlay', '-o', metavar='RATIO', type=float, default=0.5,
        help='Dim on-screen controls in --save-screen with a mask (default ratio: 0.5)')
    save_screen_parser.add_argument('--printable', '-p', action='store_true',
        help='Make the screenshot more printer-friendly')
    # ds1054z save-data
    action_desc = 'Save the waveform data to a file'
    save_data_parser = subparsers.add_parser('save-data', parents=[device_parser],
        description=action_desc, help=action_desc)
    save_data_parser.add_argument('--filename', '-f',
        metavar='FILENAME', default='ds1054z-scope-values_{ts}.csv',
        help='The filename template for the data file. '
             'The kind of file is determined by its filename extension. '
             'Defaults to: ds1054z-scope-values_{ts}.csv')
    save_data_parser.add_argument('--mode', default='NORMal', choices=('NORMal', 'MAXimum', 'RAW'),
        help='The mode determins whether you will be reading the 1200 displayed samples (NORMal) '
             'or stopping the scope and reading out the full memory (RAW). '
             'MAXimum either reads the full memory if the scope is already stopped '
             'or the 1200 displayed samples otherwise.'
             'Defaults to NORMal.')
    save_data_parser.add_argument('--without-time', action='store_false', dest='with_time',
        help="If specified, it will save the data without the extra column "
             "of time values that's being added by default")
    # ds1054z settings
    action_desc = 'View and change settings of the oscilloscope'
    settings_parser = subparsers.add_parser('settings', parents=[device_parser],
        description=action_desc, help=action_desc)
    settings_parser.add_argument('--timebase', type=float,
        help="Change the timebase of the oscilloscope to this value (in seconds/div).")
    settings_parser.add_argument('--timebase-offset', type=float,
        help="Change the timebase offset of the oscilloscope to this value (in seconds).")
    # ds1054z properties
    action_desc = 'Query properties of the DS1054Z instance'
    properties_parser = subparsers.add_parser('properties', description=action_desc, help=action_desc)
    properties_parser.add_argument('properties', metavar='PROPERTIES', type=comma_sep,
        help="The properties to query separated by a comma, like: 'idn,memory_depth_internal_total'. "
             "Asking for a single one will also work, off course.")
    late_parents(properties_parser, parents=[device_parser])
    # ds1054z run
    action_desc = 'Start the oscilloscope data acquisition'
    run_parser = subparsers.add_parser('run', parents=[device_parser],
        description=action_desc, help=action_desc)
    # ds1054z stop
    action_desc = 'Stop the oscilloscope data acquisition'
    stop_parser = subparsers.add_parser('stop', parents=[device_parser],
        description=action_desc, help=action_desc)
    # ds1054z single
    action_desc = 'Set the oscilloscope to the single trigger mode.'
    single_parser = subparsers.add_parser('single', parents=[device_parser],
        description=action_desc, help=action_desc)
    # ds1054z tforce
    action_desc = 'Generate a trigger signal forcefully.'
    tforce_parser = subparsers.add_parser('tforce', parents=[device_parser],
        description=action_desc, help=action_desc)
    # ds1054z shell
    action_desc = 'Start an interactive shell to control your scope.'
    tforce_parser = subparsers.add_parser('shell', parents=[device_parser],
        description=action_desc, help=action_desc)
    # ds1054z measure
    action_desc = 'Measure a value on a channel'
    measure_parser = subparsers.add_parser('measure', parents=[device_parser],
        description=action_desc, help=action_desc)
    measure_parser.add_argument('--channel', '-c', choices=(1, 2, 3, 4), type=int, required=True,
        help='Channel from which to take the measurement')
    measure_parser.add_argument('--type', '-t', choices=('CURRent', 'MAXimum', 'MINimum', 'AVERages', 'DEViation'), default='CURRent')
    measure_parser.add_argument('item', choices=('vmax', 'vmin', 'vpp', 'vtop', 'vbase', 'vamp', 'vavg', 'vrms', 'overshoot', 'preshoot', 'marea', 'mparea', 'period', 'frequency', 'rtime', 'ftime', 'pwidth', 'nwidth', 'pduty', 'nduty', 'rdelay', 'fdelay', 'rphase', 'fphase', 'tvmax', 'tvmin', 'pslewrate', 'nslewrate', 'vupper', 'vmid', 'vlower', 'variance', 'pvrms'),
        help='Value to measure')
    args = parser.parse_args()

    if args.version:
        print(pkg_resources.get_distribution("ds1054z").version)
        sys.exit(0)

    if args.debug:
        logging.basicConfig(level=logging.DEBUG)

    if not args.action:
        parser.print_help(sys.stderr)
        sys.stderr.write('\nERROR: Please choose an action.\n\n')
        sys.exit(2)

    if args.action == 'discover':
        try:
            from ds1054z.discovery import discover_devices
        except:
            print('Discovery depends on the zeroconf Python package which is missing.')
            sys.exit(1)
        devices = discover_devices()
        for device in devices:
            if args.verbose:
                print("Found a {model} with the IP Address {ip}.".format(**device))
            else:
                print("{ip}".format(**device))
        sys.exit(0)

    if not args.device:
        try:
            from ds1054z.discovery import discover_devices
        except:
            print("Please specify a device to connect to. Auto-discovery doesn't "
                  "work because the zeroconf Python package is missing.")
            sys.exit(1)
        devices = discover_devices()
        if len(devices) < 1:
            print("Couln't discover any device on the network. Exiting.")
            sys.exit(1)
        elif len(devices) > 1:
            print("Discovered multiple devices on the network:")
            print("\n".join("{model} {ip}".format(**dev) for dev in devices))
            print("Please specify the device you would like to connect to.")
            sys.exit(1)
        else: # len(devices) == 0
            if args.verbose: print("Found a scope: {model} @ {ip}".format(**devices[0]))
            args.device = devices[0]['ip']
    ds = DS1054Z(args.device)

    if args.action == 'info':
        fmt = "\nVendor:   {0}\nProduct:  {1}\nSerial:   {2}\nFirmware: {3}\n"
        print(fmt.format(ds.vendor, ds.product, ds.serial, ds.firmware))

    if args.action == 'cmd':
        if '?' in args.command:
            print(ds.query(args.command))
        else:
            ds.write(args.command)

    if args.action in ('run', 'stop', 'single', 'tforce'):
        getattr(ds, args.action)()

    if args.action == 'settings':
        if args.timebase:
            ds.timebase_scale = args.timebase
        if args.timebase_offset:
            ds.timebase_offset = args.timebase_offset
        wp = ds.waveform_preamble_dict
        if args.verbose:
            displayed_channels = ds.displayed_channels
            print("Sample Rate: {0}Sa/s".format(DS1054Z.format_si_prefix(ds.sample_rate)))
            print("Timebase: {0}s/div".format(DS1054Z.format_si_prefix(ds.timebase_scale)))
            print("Timebase Offset: {0}s".format(DS1054Z.format_si_prefix(ds.timebase_offset)))
            ds.set_waveform_mode('NORMal')
            tv = ds.waveform_time_values
            t_from = DS1054Z.format_si_prefix(tv[0],  unit='s')
            t_to =   DS1054Z.format_si_prefix(tv[-1], unit='s')
            print("The time axis goes from {0} to {1}".format(t_from, t_to))
            print("Displayed Channels: {0}".format(' '.join(displayed_channels)))
            for channel in displayed_channels:
                print("  Channel {0}:".format(channel))
                print("    Scale: {0}V/div".format(DS1054Z.format_si_prefix(ds.get_channel_scale(channel))))
                print("    Offset: {0}V".format(ds.get_channel_offset(channel)))
                print("    Probe Ratio: {}".format(ds.get_probe_ratio(channel)))
                print("    ---".format(DS1054Z.format_si_prefix(ds.get_channel_scale(channel))))
        else:
            print('sample_rate={}'.format(ds.sample_rate))
            print('timebase_scale={}'.format(ds.timebase_scale))
            print('timebase_offset={}'.format(ds.timebase_offset))
            print('displayed_channels={}'.format(','.join(ds.displayed_channels)))

    if args.action == 'properties':
        for prop in args.properties:
            val = getattr(ds, prop)
            if args.verbose:
                print('{0}: {1}'.format(prop, val))
            else:
                if type(val) in (list, tuple):
                    print(' '.join(str(v) for v in val))
                else:
                    print(val)

    if args.action == 'save-screen':
        try:
            from PIL import Image, ImageOps, ImageEnhance
        except ImportError:
            parser.error('Please install Pillow (or the older PIL) to use --save-screen')
        # formatting the filename
        if args.filename: fmt = args.filename
        else: fmt = 'ds1054z-scope-display_{ts}.png'
        ts = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
        filename = fmt.format(ts=ts)
        # need to find out file extension for Pillow on Windows...
        ext = os.path.splitext(filename)[1]
        if not ext: parser.error('could not detect the image file type extension from the filename')
        # getting and saving the image
        im = Image.open(io.BytesIO(ds.display_data))
        overlay_filename = pkg_resources.resource_filename("ds1054z","resources/overlay.png")
        overlay = Image.open(overlay_filename)
        alpha_100_percent =  Image.new(overlay.mode, overlay.size, color=(0,0,0,0))
        overlay = Image.blend(alpha_100_percent, overlay, args.overlay)
        im.putalpha(255)
        im = Image.alpha_composite(im, overlay)
        if args.printable:
            im = Image.merge("RGB", im.split()[0:3])
            im = ImageOps.invert(im)
            im = ImageEnhance.Color(im).enhance(0)
            im = ImageEnhance.Brightness(im).enhance(0.95)
            im = ImageEnhance.Contrast(im).enhance(2)
            im = im.convert('L')
            im = im.point(lambda x: x if x<252 else 255)
        else:
            im = im.convert('RGB')
        im.save(filename, format=ext[1:])
        if not args.verbose: print(filename)
        else: print("Saved file: " + filename)

    if args.action == 'save-data':
        ts = time.strftime("%Y-%m-%d_%H-%M-%S", time.localtime())
        filename = args.filename.format(ts=ts)
        ext = os.path.splitext(filename)[1]
        if not ext: parser.error('could not detect the file type extension from the filename')
        kind = ext[1:]
        if kind in ('csv', 'txt'):
            import csv
            data = []
            channels = ds.displayed_channels
            for channel in channels:
                data.append(ds.get_waveform_samples(channel, mode=args.mode))
            if args.with_time:
                data.insert(0, ds.waveform_time_values_decimal)
            lengths = [len(samples) for samples in data]
            if len(set(lengths)) != 1:
                logger.error('Different number of samples read for different channels!')
                sys.exit(1)
            zip_longest
            def csv_open(filename):
                if sys.version_info >= (3, 0):
                    return open(filename, 'w', newline='')
                else:
                    return open(filename, 'wb')
            with csv_open(filename) as csv_file:
                delimiter = ',' if kind == 'csv' else '\t'
                csv_writer = csv.writer(csv_file, delimiter=delimiter)
                if args.with_time:
                    csv_writer.writerow(['TIME'] + channels)
                else:
                    csv_writer.writerow(channels)
                for vals in zip_longest(*data):
                    if args.with_time:
                        vals = [vals[0]] + ['{:.2e}'.format(val) for val in vals[1:]]
                    else:
                        vals = ['{:.2e}'.format(val) for val in vals]
                    csv_writer.writerow(vals)
        else:
            parser.error('This tool cannot handle the requested --type')
        if not args.verbose: print(filename)
        else: print("Saved file: " + filename)

    if args.action == 'shell':
        try:
            import atexit
            import readline
            histfile = os.path.join(os.path.expanduser("~"), ".ds1054z_history")
            try:
                readline.read_history_file(histfile)
            except IOError as e:
                if e.errno != errno.ENOENT:
                    raise e
            atexit.register(readline.write_history_file, histfile)
        except ImportError:
            pass
        run_shell(ds)

    if args.action == 'measure':
        v = ds.get_channel_measurement(args.channel, args.item, type=args.type)
        if v is not None:
            print(v)
コード例 #14
0
def color(image, factor):
  factor = (factor/MAX_LEVEL) * 1.8 + 0.1
  """Equivalent of PIL Color."""
  image = Image.fromarray(image)
  image = ImageEnhance.Color(image).enhance(factor)
  return np.asarray(image)
コード例 #15
0
    def __getitem__(self, idx):
        boxes = self.boxes[idx * BATCH_SIZE:(idx + 1) * BATCH_SIZE]

        batch_images = np.zeros((len(boxes), IMAGE_SIZE, IMAGE_SIZE, 3),
                                dtype=np.float32)
        batch_boxes = np.zeros((len(boxes), GRID_SIZE, GRID_SIZE, 5),
                               dtype=np.float32)
        for i, row in enumerate(boxes):
            path, x0, y0, x1, y1 = row

            with Image.open(path) as img:
                if self.rnd_rescale:
                    old_width = img.width
                    old_height = img.height

                    rescale = np.random.uniform(low=0.6, high=1.4)
                    new_width = int(old_width * rescale)
                    new_height = int(old_height * rescale)

                    img = img.resize((new_width, new_height))

                    x0 *= new_width / old_width
                    y0 *= new_height / old_height
                    x1 *= new_width / old_width
                    y1 *= new_height / old_height

                if self.rnd_crop:
                    start_x = np.random.randint(0,
                                                high=np.floor(0.15 *
                                                              img.width))
                    stop_x = img.width - np.random.randint(
                        0, high=np.floor(0.15 * img.width))
                    start_y = np.random.randint(0,
                                                high=np.floor(0.15 *
                                                              img.height))
                    stop_y = img.height - np.random.randint(
                        0, high=np.floor(0.15 * img.height))

                    img = img.crop((start_x, start_y, stop_x, stop_y))

                    x0 = max(x0 - start_x, 0)
                    y0 = max(y0 - start_y, 0)
                    x1 = min(x1 - start_x, img.width)
                    y1 = min(y1 - start_y, img.height)

                    if np.abs(x1 - x0) < 5 or np.abs(y1 - y0) < 5:
                        print(
                            "\nWarning: cropped too much (obj width {}, obj height {}, img width {}, img height {})\n"
                            .format(x1 - x0, y1 - y0, img.width, img.height))

                if self.rnd_flip:
                    elem = np.random.choice([0, 90, 180, 270, 1423, 1234])
                    if elem % 10 == 0:
                        x = x0 - img.width / 2
                        y = y0 - img.height / 2

                        x0 = img.width / 2 + x * np.cos(
                            np.deg2rad(elem)) - y * np.sin(np.deg2rad(elem))
                        y0 = img.height / 2 + x * np.sin(
                            np.deg2rad(elem)) + y * np.cos(np.deg2rad(elem))

                        x = x1 - img.width / 2
                        y = y1 - img.height / 2

                        x1 = img.width / 2 + x * np.cos(
                            np.deg2rad(elem)) - y * np.sin(np.deg2rad(elem))
                        y1 = img.height / 2 + x * np.sin(
                            np.deg2rad(elem)) + y * np.cos(np.deg2rad(elem))

                        img = img.rotate(-elem)
                    else:
                        if elem == 1423:
                            img = img.transpose(Image.FLIP_TOP_BOTTOM)
                            y0 = img.height - y0
                            y1 = img.height - y1

                        elif elem == 1234:
                            img = img.transpose(Image.FLIP_LEFT_RIGHT)
                            x0 = img.width - x0
                            x1 = img.width - x1

                image_width = img.width
                image_height = img.height

                tmp = x0
                x0 = min(x0, x1)
                x1 = max(tmp, x1)

                tmp = y0
                y0 = min(y0, y1)
                y1 = max(tmp, y1)

                x0 = max(x0, 0)
                y0 = max(y0, 0)

                y0 = min(y0, image_height)
                x0 = min(x0, image_width)
                y1 = min(y1, image_height)
                x1 = min(x1, image_width)

                if self.rnd_color:
                    enhancer = ImageEnhance.Color(img)
                    img = enhancer.enhance(np.random.uniform(low=0.5,
                                                             high=1.5))

                    enhancer2 = ImageEnhance.Brightness(img)
                    img = enhancer.enhance(np.random.uniform(low=0.7,
                                                             high=1.3))

                img = img.resize((IMAGE_SIZE, IMAGE_SIZE))
                img = img.convert('RGB')
                img = np.array(img, dtype=np.float32)

                if self.rnd_multiply:
                    img[..., 0] = np.floor(
                        np.clip(
                            img[..., 0] * np.random.uniform(low=0.8, high=1.2),
                            0.0, 255.0))
                    img[..., 1] = np.floor(
                        np.clip(
                            img[..., 1] * np.random.uniform(low=0.8, high=1.2),
                            0.0, 255.0))
                    img[..., 2] = np.floor(
                        np.clip(
                            img[..., 2] * np.random.uniform(low=0.8, high=1.2),
                            0.0, 255.0))

                batch_images[i] = preprocess_input(img.copy())

            x_c = (GRID_SIZE / image_width) * (x0 + (x1 - x0) / 2)
            y_c = (GRID_SIZE / image_height) * (y0 + (y1 - y0) / 2)

            floor_y = math.floor(y_c)
            floor_x = math.floor(x_c)

            batch_boxes[i, floor_y, floor_x, 0] = (y1 - y0) / image_height
            batch_boxes[i, floor_y, floor_x, 1] = (x1 - x0) / image_width
            batch_boxes[i, floor_y, floor_x, 2] = y_c - floor_y
            batch_boxes[i, floor_y, floor_x, 3] = x_c - floor_x
            batch_boxes[i, floor_y, floor_x, 4] = 1

            if self.debug:
                changed = img.astype(np.uint8)
                if not os.path.exists("__debug__"):
                    os.makedirs("__debug__")

                changed = Image.fromarray(changed)

                x_c = (floor_x +
                       batch_boxes[i, floor_y, floor_x, 3]) / GRID_SIZE
                y_c = (floor_y +
                       batch_boxes[i, floor_y, floor_x, 2]) / GRID_SIZE

                y0 = IMAGE_SIZE * (y_c -
                                   batch_boxes[i, floor_y, floor_x, 0] / 2)
                x0 = IMAGE_SIZE * (x_c -
                                   batch_boxes[i, floor_y, floor_x, 1] / 2)
                y1 = y0 + IMAGE_SIZE * batch_boxes[i, floor_y, floor_x, 0]
                x1 = x0 + IMAGE_SIZE * batch_boxes[i, floor_y, floor_x, 1]

                draw = ImageDraw.Draw(changed)
                draw.rectangle(((x0, y0), (x1, y1)), outline="green")

                changed.save(os.path.join("__debug__", os.path.basename(path)))

        return batch_images, batch_boxes
コード例 #16
0
ファイル: rdpscraper.py プロジェクト: zxc2007/rdpscraper
     contrast = ImageEnhance.Contrast(img)
     img = contrast.enhance(0.9)
     #color = ImageEnhance.Color(img)
     #img = color.enhance(0.)
     string = pytesseract.image_to_string(img)
     #    output = "test.txt"
     #    with open(output, 'w+') as f:
     #        for line in string:
     #            f.write('\n'.join(line))
     #            f.write('\n')
     #print "found"
 else:
     img = img.resize([int(2.2 * s) for s in img.size])
     enhancer = ImageEnhance.Sharpness(img)
     img = enhancer.enhance(0.2)
     color = ImageEnhance.Color(img)
     img = color.enhance(0)
     #bright = ImageEnhance.Brightness(img)
     #img = bright.enhance(0.5)
     #contrast = ImageEnhance.Contrast(img)
     #img = contrast.enhance(0.2)
     string = pytesseract.image_to_string(img)
     #output = "test.txt"
     #with open(output, 'w+') as f:
     #    for line in string:
     #        f.write('\n'.join(line))
     #        f.write('\n')
     #print "found"
 #print(pytesseract.image_to_string(img))
 #print "------------------------------------------------------------------------------------------------------------\n"
 output = pytesseract.image_to_string(img)
コード例 #17
0
ファイル: auto_augment.py プロジェクト: zjamy-hust/LaMCTS
def color(img, magnitude):
    magnitudes = np.linspace(0.1, 1.9, 11)
    img = ImageEnhance.Color(img).enhance(
        random.uniform(magnitudes[magnitude], magnitudes[magnitude + 1]))
    return img
コード例 #18
0
ファイル: base_aug.py プロジェクト: xxxgp/deepvac
 def __call__(self, img):
     self.auditInput(img)
     img = self.cv2pillow(img)
     img = ImageEnhance.Color(img).enhance(np.random.uniform(0.8, 1.3))
     return self.pillow2cv(img)
コード例 #19
0
 def __init__(self, Numbers=None, max_Magnitude=None):
     self.transforms = [
         'autocontrast', 'equalize', 'rotate', 'solarize', 'color',
         'posterize', 'contrast', 'brightness', 'sharpness', 'shearX',
         'shearY', 'translateX', 'translateY'
     ]
     if Numbers is None:
         self.Numbers = len(self.transforms) // 2
     else:
         self.Numbers = Numbers
     if max_Magnitude is None:
         self.max_Magnitude = 10
     else:
         self.max_Magnitude = max_Magnitude
     fillcolor = 128
     self.ranges = {
         # these  Magnitude   range , you  must test  it  yourself , see  what  will happen  after these  operation ,
         # it is no  need to obey  the value  in  autoaugment.py
         "shearX": np.linspace(0, 0.3, 10),
         "shearY": np.linspace(0, 0.3, 10),
         "translateX": np.linspace(0, 0.2, 10),
         "translateY": np.linspace(0, 0.2, 10),
         "rotate": np.linspace(0, 360, 10),
         "color": np.linspace(0.0, 0.9, 10),
         "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
         "solarize": np.linspace(256, 231, 10),
         "contrast": np.linspace(0.0, 0.5, 10),
         "sharpness": np.linspace(0.0, 0.9, 10),
         "brightness": np.linspace(0.0, 0.3, 10),
         "autocontrast": [0] * 10,
         "equalize": [0] * 10,
         "invert": [0] * 10
     }
     self.func = {
         "shearX":
         lambda img, magnitude: img.transform(img.size,
                                              Image.AFFINE,
                                              (1, magnitude * random.choice(
                                                  [-1, 1]), 0, 0, 1, 0),
                                              Image.BICUBIC,
                                              fill=fillcolor),
         "shearY":
         lambda img, magnitude: img.transform(img.size,
                                              Image.AFFINE,
                                              (1, 0, 0, magnitude * random.
                                               choice([-1, 1]), 1, 0),
                                              Image.BICUBIC,
                                              fill=fillcolor),
         "translateX":
         lambda img, magnitude: img.transform(
             img.size,
             Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice(
                 [-1, 1]), 0, 1, 0),
             fill=fillcolor),
         "translateY":
         lambda img, magnitude: img.transform(
             img.size,
             Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.
                            choice([-1, 1])),
             fill=fillcolor),
         "rotate":
         lambda img, magnitude: self.rotate_with_fill(img, magnitude),
         # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
         "color":
         lambda img, magnitude: ImageEnhance.Color(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "posterize":
         lambda img, magnitude: ImageOps.posterize(img, magnitude),
         "solarize":
         lambda img, magnitude: ImageOps.solarize(img, magnitude),
         "contrast":
         lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "sharpness":
         lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "brightness":
         lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
             1 + magnitude * random.choice([-1, 1])),
         "autocontrast":
         lambda img, magnitude: ImageOps.autocontrast(img),
         "equalize":
         lambda img, magnitude: img,
         "invert":
         lambda img, magnitude: ImageOps.invert(img)
     }
コード例 #20
0
ファイル: data_transforms.py プロジェクト: wxian3/dla
 def __call__(self, image, *args):
     alpha = 1.0 + np.random.uniform(-self.var, self.var)
     image = ImageEnhance.Color(image).enhance(alpha)
     return (image, *args)
コード例 #21
0
ファイル: stoneStamp.py プロジェクト: darrydai/stoneScanner
bg_Img=bg_Img.convert('RGBA')

#load stone image
stoneImg=Image.open("/Users/Darry/Desktop/Darrys_project/2021/04_文博花蓮館/code/stoneScanner/data/pic/stone_morph.png")
stoneImg=stoneImg.convert('RGBA')

#Enhance stone image sharpness
enh_sha = ImageEnhance.Sharpness(stoneImg)
stoneImg = enh_sha.enhance(sharpness)

#Enhance stone image contrast
enh_con = ImageEnhance.Contrast(stoneImg)
stoneImg = enh_con.enhance(contrast)

#Enhance stone image color
enh_col = ImageEnhance.Color(stoneImg)
stoneImg = enh_col.enhance(color)

#resize stone image
stoneImg.thumbnail((295,295),Image.BILINEAR)
stoneW,stoneH=stoneImg.size

stoneCopy=stoneImg

#convert image to gary then to binary
stoneCopy=stoneCopy.convert('L')
stoneCopy=stoneCopy.convert('1')

#shift the stone image position
bg_Img.paste(stoneCopy,(int((blackArea[0]-stoneW)/2),int((blackArea[1]-stoneH)/2)),stoneImg)
bg_Img.save("/Users/Darry/Desktop/Darrys_project/2021/04_文博花蓮館/code/stoneScanner/data/pic/stamp/01.png",dpi=(600,300))
コード例 #22
0
ファイル: autoaugment.py プロジェクト: hzphzp/MetaBIN
def color(img, factor, **__):
    return ImageEnhance.Color(img).enhance(factor)
コード例 #23
0
from PIL import Image
from PIL import ImageOps
from PIL import ImageEnhance

im = Image.open("../../_images/portrait.jpg").convert("L")

im = ImageOps.colorize(
    im,
    (255, 0, 0),
    (0, 255, 0),
)
im = ImageEnhance.Contrast(im).enhance(3)
im = ImageEnhance.Color(im).enhance(0.5)
im.show()
コード例 #24
0
    def __init__(self,
                 p1,
                 operation1,
                 magnitude_idx1,
                 p2,
                 operation2,
                 magnitude_idx2,
                 fillcolor=(128, 128, 128)):
        ranges = {
            "shearX": np.linspace(0, 0.3, 10),
            "shearY": np.linspace(0, 0.3, 10),
            "translateX": np.linspace(0, 150 / 331, 10),
            "translateY": np.linspace(0, 150 / 331, 10),
            "rotate": np.linspace(0, 30, 10),
            "color": np.linspace(0.0, 0.9, 10),
            "posterize": np.round(np.linspace(8, 4, 10), 0).astype(np.int),
            "solarize": np.linspace(256, 0, 10),
            "contrast": np.linspace(0.0, 0.9, 10),
            "sharpness": np.linspace(0.0, 0.9, 10),
            "brightness": np.linspace(0.0, 0.9, 10),
            "autocontrast": [0] * 10,
            "equalize": [0] * 10,
            "invert": [0] * 10,
            "cutout": np.linspace(0.0, 0.2, 10),
        }

        def Cutout(img, v):  # [0, 60] => percentage: [0, 0.2]
            #assert 0.0 <= v <= 0.2
            if v <= 0.:
                return img

            v = v * img.size[0]

            return CutoutAbs(img, v)

            # x0 = np.random.uniform(w - v)
            # y0 = np.random.uniform(h - v)
            # xy = (x0, y0, x0 + v, y0 + v)
            # color = (127, 127, 127)
            # img = img.copy()
            # PIL.ImageDraw.Draw(img).rectangle(xy, color)
            # return img

        def CutoutAbs(img, v):  # [0, 60] => percentage: [0, 0.2]
            # assert 0 <= v <= 20
            if v < 0:
                return img
            w, h = img.size
            x0 = np.random.uniform(w)
            y0 = np.random.uniform(h)

            x0 = int(max(0, x0 - v / 2.))
            y0 = int(max(0, y0 - v / 2.))
            x1 = min(w, x0 + v)
            y1 = min(h, y0 + v)

            xy = (x0, y0, x1, y1)
            color = (125, 123, 114)
            # color = (0, 0, 0)
            img = img.copy()
            ImageDraw.Draw(img).rectangle(xy, color)
            return img

        # from https://stackoverflow.com/questions/5252170/specify-image-filling-color-when-rotating-in-python-with-pil-and-setting-expand
        def rotate_with_fill(img, magnitude):
            rot = img.convert("RGBA").rotate(magnitude)
            return Image.composite(rot, Image.new("RGBA", rot.size,
                                                  (128, ) * 4),
                                   rot).convert(img.mode)

        func = {
            "shearX":
            lambda img, magnitude: img.transform(img.size,
                                                 Image.AFFINE,
                                                 (1, magnitude * random.choice(
                                                     [-1, 1]), 0, 0, 1, 0),
                                                 Image.BICUBIC,
                                                 fillcolor=fillcolor),
            "shearY":
            lambda img, magnitude: img.transform(img.size,
                                                 Image.AFFINE,
                                                 (1, 0, 0, magnitude * random.
                                                  choice([-1, 1]), 1, 0),
                                                 Image.BICUBIC,
                                                 fillcolor=fillcolor),
            "translateX":
            lambda img, magnitude: img.transform(
                img.size,
                Image.AFFINE, (1, 0, magnitude * img.size[0] * random.choice(
                    [-1, 1]), 0, 1, 0),
                fillcolor=fillcolor),
            "translateY":
            lambda img, magnitude: img.transform(
                img.size,
                Image.AFFINE, (1, 0, 0, 0, 1, magnitude * img.size[1] * random.
                               choice([-1, 1])),
                fillcolor=fillcolor),
            "cutout":
            lambda img, magnitude: Cutout(img, magnitude),
            "rotate":
            lambda img, magnitude: rotate_with_fill(img, magnitude),
            # "rotate": lambda img, magnitude: img.rotate(magnitude * random.choice([-1, 1])),
            "color":
            lambda img, magnitude: ImageEnhance.Color(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "posterize":
            lambda img, magnitude: ImageOps.posterize(img, magnitude),
            "solarize":
            lambda img, magnitude: ImageOps.solarize(img, magnitude),
            "contrast":
            lambda img, magnitude: ImageEnhance.Contrast(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "sharpness":
            lambda img, magnitude: ImageEnhance.Sharpness(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "brightness":
            lambda img, magnitude: ImageEnhance.Brightness(img).enhance(
                1 + magnitude * random.choice([-1, 1])),
            "autocontrast":
            lambda img, magnitude: ImageOps.autocontrast(img),
            "equalize":
            lambda img, magnitude: ImageOps.equalize(img),
            "invert":
            lambda img, magnitude: ImageOps.invert(img)
        }

        # self.name = "{}_{:.2f}_and_{}_{:.2f}".format(
        #     operation1, ranges[operation1][magnitude_idx1],
        #     operation2, ranges[operation2][magnitude_idx2])
        self.p1 = p1
        self.operation1 = func[operation1]
        self.magnitude1 = ranges[operation1][magnitude_idx1]
        self.p2 = p2
        self.operation2 = func[operation2]
        self.magnitude2 = ranges[operation2][magnitude_idx2]
コード例 #25
0
 def forward(self, data, state):
     im = Image.fromarray(data)
     factor = 1.0 + self.diff * random.choice([1.0, -1.0])
     im = ImageEnhance.Color(im).enhance(factor)
     return np.copy(np.asarray(im))
コード例 #26
0
ファイル: util.py プロジェクト: vilka-lab/EffcientNetV2
def random_color1(img, factor):
    factor = (factor / 10.) * .9
    factor = 1.0 + -factor if random.random() > 0.5 else factor
    return ImageEnhance.Color(img).enhance(factor)
コード例 #27
0
ファイル: reader.py プロジェクト: will-jl944/PaddleSlim
 def random_color(img, lower=0.5, upper=1.5):
     e = np.random.uniform(lower, upper)
     return ImageEnhance.Color(img).enhance(e)
コード例 #28
0
ファイル: util.py プロジェクト: vilka-lab/EffcientNetV2
def random_color2(img, factor):
    factor = (factor / 10.) * 1.8 + 0.1
    return ImageEnhance.Color(img).enhance(factor)
コード例 #29
0
 def tranfun(self, image):
     image = getpilimage(image)
     col = ImageEnhance.Color(image)
     return col.enhance(random.uniform(self.lower, self.upper))
コード例 #30
0
def color(pil_img, level):
    level = float_parameter(sample_level(level), 1.8) + 0.1
    return ImageEnhance.Color(pil_img).enhance(level)