コード例 #1
0
def space(image):
    image = image.convert('RGB')
    colours = util.get_dominant_colours(image, 12)
    colours = util.order_colours_by_brightness(colours)
    indices = sorted(random.sample(range(len(colours)), 3))
    colours = [colours[i] for i in indices]
    light, bg, dark = map(tuple, colours)
    light = (200, 200, 100)
    dark = (100, 200, 100)
    bg = (0, 0, 50, 255)

    layer = Image.open(
        os.path.dirname(os.path.abspath(__file__)) + '/' + 'assets/space.jpg')
    layer = util.random_crop(layer, util.WIDTH, util.HEIGHT)

    colours = util.get_dominant_colours(image, 10)
    colours = util.order_colours_by_saturation(colours)[:-3]
    colours = random.sample(colours, 5)
    colours = util.order_colours_by_hue(colours)

    layer = layer.convert('RGB')
    gradient = util.create_gradient(layer.size, colours)
    im = Image.blend(layer, gradient, .4)

    return im
コード例 #2
0
    def __getitem__(self, item):
        img_name = self.imglst[item]
        prefix = ".".join(img_name.split('.')[:-1])
        label_name = prefix + '.txt'
        text_polys, text_tags = parse_lines(os.path.join(self.data_dir, label_name))
        im = cv2.imread(os.path.join(self.data_dir, img_name))
        # im = Image.open(os.path.join(self.data_dir, img_name)).convert('RGB')
        im = np.array(im)[:, :, :3]
        im, text_polys = random_scale(im, text_polys)
        score_maps, kernel_maps, training_mask = shrink_polys(im, polys=text_polys, tags=text_tags, mini_scale_ratio=0.5, num_kernels=6)
        imgs = [im, score_maps, kernel_maps, training_mask]

        # random_flip,random rotate, random crop

        imgs = random_horizontal_flip(imgs)
        # imgs = random_rotate(imgs)
        imgs = random_crop(imgs, self.input_size)

        image, score_map, kernel_map, training_mask = imgs[0], imgs[1], imgs[2], imgs[3]
        if self.debug:
            im_show = np.concatenate([score_map, kernel_map[:, :, 0], kernel_map[:, :, 5]], axis=1)
            cv2.imshow('img', image)
            cv2.imshow('score_map', im_show)
            cv2.waitKey()
        image = mx.nd.array(image)
        score_map = mx.nd.array(score_map, dtype=np.float32)
        kernal_map = mx.nd.array(kernel_map, dtype=np.float32)
        training_mask = mx.nd.array(training_mask, dtype=np.float32)
        trans_image = self.trans(image)
        return trans_image, score_map, kernel_map, training_mask, transforms.ToTensor()(image)
コード例 #3
0
ファイル: space.py プロジェクト: fffunction/jam-image-filter
def space(image):
    image = image.convert('RGB')
    colours = util.get_dominant_colours(image, 12)
    colours = util.order_colours_by_brightness(colours)
    indices = sorted(random.sample(range(len(colours)), 3))
    colours = [colours[i] for i in indices]
    light, bg, dark = map(tuple, colours)
    light = (200, 200, 100)
    dark = (100, 200, 100)
    bg = (0, 0, 50, 255)

    layer = Image.open(os.path.dirname(os.path.abspath(__file__)) + '/' +
                       'assets/space.jpg')
    layer = util.random_crop(layer, util.WIDTH, util.HEIGHT)

    colours = util.get_dominant_colours(image, 10)
    colours = util.order_colours_by_saturation(colours)[:-3]
    colours = random.sample(colours, 5)
    colours = util.order_colours_by_hue(colours)

    layer = layer.convert('RGB')
    gradient = util.create_gradient(layer.size, colours)
    im = Image.blend(layer, gradient, .4)

    return im
コード例 #4
0
ファイル: bbqnet.py プロジェクト: stmharry/BBQNet
def value_pipeline(value, phase):
    _MAX_DELTA = 63
    _CONTRAST_LOWER = 0.5
    _CONTRAST_UPPER = 1.5

    value = util.to_rgb(value)
    value = util.random_resize(value, size_range=_SIZE_RANGE)
    value = util.random_crop(value, size=_NET_SIZE)
    value = util.random_flip(value)
    if phase == data._TRAIN:
        value = util.random_adjust(value, max_delta=_MAX_DELTA, contrast_lower=_CONTRAST_LOWER, contrast_upper=_CONTRAST_UPPER)
    value = util.remove_mean(value, mean=_MEAN)
    return value
コード例 #5
0
ファイル: ombre.py プロジェクト: fffunction/jam-image-filter
def ombre(image):
    image = image.convert('RGB')
    colours = util.get_dominant_colours(image, 12)
    colours = util.order_colours_by_brightness(colours)
    light = random.choice(colours[:3])
    dark = random.choice(colours[-3:])

    layer = Image.open(os.path.dirname(os.path.abspath(__file__)) + '/' +
                       'assets/ombre.jpg')
    layer = util.random_crop(layer, util.WIDTH, util.HEIGHT)

    layer = layer.convert('RGB')
    layer = ImageOps.grayscale(layer)
    layer = ImageOps.colorize(layer, dark, light)
    return layer
コード例 #6
0
def ombre(image):
    image = image.convert('RGB')
    colours = util.get_dominant_colours(image, 12)
    colours = util.order_colours_by_brightness(colours)
    light = random.choice(colours[:3])
    dark = random.choice(colours[-3:])

    layer = Image.open(
        os.path.dirname(os.path.abspath(__file__)) + '/' + 'assets/ombre.jpg')
    layer = util.random_crop(layer, util.WIDTH, util.HEIGHT)

    layer = layer.convert('RGB')
    layer = ImageOps.grayscale(layer)
    layer = ImageOps.colorize(layer, dark, light)
    return layer
コード例 #7
0
 def _random_crop_with_labels(self, image: np.ndarray,
                              labels: dict) -> Tuple[np.ndarray, dict]:
     new_labels = copy.deepcopy(labels)
     scale = 0.1 + np.random.random() * 0.9
     image, (offset_y, offset_x) = util.random_crop(image, scale)
     height, width, _ = image.shape
     for i in range(len(labels["points"])):
         points = np.array(labels["points"][i])
         original_area = cv2.contourArea(points)
         new_points = np.array(new_labels["points"][i])
         new_points[:, 0] = np.clip(points[:, 0] - offset_x, 0, width)
         new_points[:, 1] = np.clip(points[:, 1] - offset_y, 0, height)
         new_labels["points"][i] = new_points.tolist()
         new_area = cv2.contourArea(new_points)
         if original_area * 0.2 > new_area:
             new_labels["ignored"][i] = True
     return image, new_labels
コード例 #8
0
ファイル: bokeh.py プロジェクト: phuedx/jam-image-filter
def bokeh(image):
    image = image.convert('RGB')
    colours = util.get_dominant_colours(image, 8)
    colours = util.order_colours_by_brightness(colours)[2:7]
    colour = random.choice(colours)
    colour = util.modify_hsv(colour, s=lambda s: 255, v=lambda v: 255)
    light = (255, 244, 180)

    layer = Image.open(
        os.path.dirname(os.path.abspath(__file__)) + '/' + 'assets/bokeh.png')
    layer = util.random_crop(layer, util.WIDTH, util.HEIGHT)
    r, g, b, a = layer.split()
    layer = layer.convert('RGB')
    layer = ImageOps.grayscale(layer)
    layer = ImageOps.colorize(layer, colour, light)
    layer.putalpha(a)
    im = Image.new('RGB', layer.size)
    im.paste(layer, (0, 0), layer)
    return im
コード例 #9
0
ファイル: bokeh.py プロジェクト: fffunction/jam-image-filter
def bokeh(image):
    image = image.convert('RGB')
    colours = util.get_dominant_colours(image, 8)
    colours = util.order_colours_by_brightness(colours)[2:7]
    colour = random.choice(colours)
    colour = util.modify_hsv(colour, s=lambda s: 255, v=lambda v: 255)
    light = (255, 244, 180)

    layer = Image.open(os.path.dirname(os.path.abspath(__file__)) + '/' +
                       'assets/bokeh.png')
    layer = util.random_crop(layer, util.WIDTH, util.HEIGHT)
    r, g, b, a = layer.split()
    layer = layer.convert('RGB')
    layer = ImageOps.grayscale(layer)
    layer = ImageOps.colorize(layer, colour, light)
    layer.putalpha(a)
    im = Image.new('RGB', layer.size)
    im.paste(layer, (0, 0), layer)
    return im
コード例 #10
0
            y_ = x_[:, :, :, 0:img_size]
            x_ = x_[:, :, :, img_size:]
        else:
            y_ = x_[:, :, :, img_size:]
            x_ = x_[:, :, :, 0:img_size]

        if img_size != opt.input_size:
            x_ = util.imgs_resize(x_, opt.input_size)
            y_ = util.imgs_resize(y_, opt.input_size)

        if opt.resize_scale:
            x_ = util.imgs_resize(x_, opt.resize_scale)
            y_ = util.imgs_resize(y_, opt.resize_scale)

        if opt.crop_size:
            x_, y_ = util.random_crop(x_, y_, opt.crop_size)

        if opt.fliplr:
            x_, y_ = util.random_fliplr(x_, y_)

        x_, y_ = Variable(x_), Variable(y_)

        D_result = D(x_, y_).squeeze()
        D_real_loss = BCE_loss(D_result, Variable(torch.ones(D_result.size())))

        G_result = G(x_)
        D_result = D(x_, G_result).squeeze()
        D_fake_loss = BCE_loss(D_result,
                               Variable(torch.zeros(D_result.size())))

        D_train_loss = (D_real_loss + D_fake_loss) * 0.5
コード例 #11
0
        G_optimizer.param_groups[0]['lr'] -= opt.lrG / (opt.train_epoch -
                                                        opt.decay_epoch)
        #H_A_optimizer.param_groups[0]['lr']  = opt.lrH*(0.1**(epoch//opt.decay_epoch))
    start_batch_time = time.time()

    for iteration, batch in enumerate(train_loader, 0):
        realA = batch[0]
        realB = batch[1]
        batch_ind = batch[2]

        if opt.resize_scale:
            realA = util.imgs_resize(realA, opt.resize_scale)
            realB = util.imgs_resize(realB, opt.resize_scale)

        if opt.crop:
            realA = util.random_crop(realA, opt.input_size)
            realB = util.random_crop(realB, opt.input_size)

        if opt.fliplr:
            realA = util.random_fliplr(realA)
            realB = util.random_fliplr(realB)

        realA, realB = Variable(realA.cuda()), Variable(realB.cuda())

        # G STEP

        # train generator G
        G_optimizer.zero_grad()

        # generate real A to fake B; D_A(G_A(A))
        fakeB = G_A(realA)