def test_decode_rgba(): image = decode("LlMF%n00%#MwS|WCWEM{R*bbWBbH", 416, 416, mode=PixelMode.RGBA) assert image.getbands() == ('R', 'G', 'B', 'A')
def test_asymmetric(): image = PIL.Image.open(os.path.join(base_path, "cool_cat.jpg")) blur_hash = blurhash.encode(np.array(image.convert("RGB")), components_x=2, components_y=8) assert blur_hash == "%BMOZfK1BBNG2skqs9n4?HvgJ.Nav}J-$%sm" decoded_image = blurhash.decode(blur_hash, 32, 32) assert np.sum(np.var(decoded_image, axis=0)) > np.sum( np.var(decoded_image, axis=1)) blur_hash = blurhash.encode(np.array(image.convert("RGB")), components_x=8, components_y=2) decoded_image = blurhash.decode(blur_hash, 32, 32) assert np.sum(np.var(decoded_image, axis=0)) < np.sum( np.var(decoded_image, axis=1))
def test_invalid_parameters(): image = np.array( PIL.Image.open(os.path.join(base_path, "cool_cat.jpg")).convert("RGB")) with pytest.raises(ValueError): blurhash.decode("UBMO", 32, 32) with pytest.raises(ValueError): blurhash.decode("UBMOZfK1GG%LBBNG", 32, 32) with pytest.raises(ValueError): blurhash.encode(image, components_x=0, components_y=1) with pytest.raises(ValueError): blurhash.encode(image, components_x=1, components_y=0) with pytest.raises(ValueError): blurhash.encode(image, components_x=1, components_y=10) with pytest.raises(ValueError): blurhash.encode(image, components_x=10, components_y=1)
def test_linear_dc_only(): image = PIL.Image.open(os.path.join(base_path, "cool_cat.jpg")) linearish_image = np.array(image.convert("RGB")) / 255.0 blur_hash = blurhash.encode(linearish_image, components_x=1, components_y=1, linear=True) avg_color = blurhash.decode(blur_hash, 1, 1, linear=True) reference_avg_color = np.mean( linearish_image.reshape( linearish_image.shape[0] * linearish_image.shape[1], -1), 0) assert np.sum(np.abs(avg_color - reference_avg_color)) < 0.01
def processImage(): file = request.files['file'] width = int(request.form.get('width', 128)) height = int(request.form.get('height', 128)) image = np.array(PIL.Image.open(file).convert("RGB")) blur_hash = blurhash.encode(np.array(image)) img = PIL.Image.fromarray( np.array(blurhash.decode(blur_hash, width, height)).astype('uint8')) fp = tempfile.NamedTemporaryFile(delete=False) img.save(fp, format='PNG') fp.close() return send_file(fp.name, mimetype='image/png')
def test_decode_blurhash(): image = decode("LlMF%n00%#MwS|WCWEM{R*bbWBbH", 416, 416) assert type(image) == Image.Image
def test_decode_valid_width_height(): image = decode("LlMF%n00%#MwS|WCWEM{R*bbWBbH", 640, 480) assert (image.width == 640 and image.height == 480)
def test_decode_invalid_punch(): with pytest.raises(ValueError): decode("LlMF%n00%#MwS|WCWEM{R*bbWBbH", 416, 416, punch=-2)
def test_decode_invalid_height(): with pytest.raises(ValueError): decode("LlMF%n00%#MwS|WCWEM{R*bbWBbH", 416, 0)
def test_decode_invalid_width(): with pytest.raises(ValueError): decode("LlMF%n00%#MwS|WCWEM{R*bbWBbH", -416, 416)
def test_decode_invalid_mode(): with pytest.raises(ValueError): decode("LlMF%n00%#MwS|WCWEM{R*bbWBbH", 416, 416, mode='XXX')
def test_decode_invalid_blurhash(): with pytest.raises(ValueError): decode("#MwS|WCWEM{R", 416, 416)
def test_decode_punch(): image = decode("LlMF%n00%#MwS|WCWEM{R*bbWBbH", 416, 416, punch=2) assert type(image) == Image.Image
def test_decode(): image = blurhash.decode("UBMOZfK1GG%LBBNG,;Rj2skq=eE1s9n4S5Na", 32, 32) reference_image = np.load(os.path.join(base_path, "blurhash_out.npy")) assert np.sum(np.abs(image - reference_image)) < 1.0
def processImage(): image = PIL.Image.open(input_image).convert("RGB") image_size = (image.width, image.height) print("Read image " + input_image + "({} x {})".format(image_size[0], image_size[1])) # Convert to linear and thumbnail image_linear = np.vectorize(blurhash.srgb_to_linear)(np.array(image)) image_linear_thumb = [] for i in range(3): channel_linear = PIL.Image.fromarray(image_linear[:, :, i].astype("float32"), mode='F') channel_linear.thumbnail((work_size, work_size)) image_linear_thumb.append(np.array(channel_linear)) image_linear_thumb = np.transpose(np.array(image_linear_thumb), (1, 2, 0)) print("Encoder working at size: {} x {}".format( image_linear_thumb.shape[1], image_linear_thumb.shape[0])) # Figure out a good component count components_x = int( max( min_components, min( target_components, round(image_linear_thumb.shape[1] / (work_size / target_components))))) components_y = int( max( min_components, min( target_components, round(image_linear_thumb.shape[0] / (work_size / target_components))))) print("Using component counts: {} x {}".format(components_x, components_y)) # Create blurhash blur_hash = blurhash.encode(image_linear_thumb, components_x, components_y, linear=True) print("Blur hash of image: " + blur_hash) """ Part 2: Decode """ # Figure out what size to decode to decode_components_x, decode_components_y = blurhash.components(blur_hash) decode_size_x = decode_components_x * (work_size // target_components) decode_size_y = decode_components_y * (work_size // target_components) print("Decoder working at size {} x {}".format(decode_size_x, decode_size_y)) # Decode decoded_image = np.array( blurhash.decode(blur_hash, decode_size_x, decode_size_y, linear=True)) # Scale so that we have the right size to fill out_size without letter/pillarboxing # while matching original images aspect ratio. fill_x_size_y = out_size[0] * (image_size[0] / image_size[1]) fill_y_size_x = out_size[1] * (image_size[1] / image_size[0]) scale_target_size = list(out_size) if fill_x_size_y / out_size[1] < fill_y_size_x / out_size[0]: scale_target_size[0] = max(scale_target_size[0], int(fill_y_size_x)) else: scale_target_size[1] = max(scale_target_size[1], int(fill_x_size_y)) # Scale (ideally, your UI layer should take care of this in some kind of efficient way) print("Scaling to target size: {} x {}".format(scale_target_size[0], scale_target_size[1])) decoded_image_large = [] for i in range(3): channel_linear = PIL.Image.fromarray( decoded_image[:, :, i].astype("float32"), mode='F') decoded_image_large.append( np.array( channel_linear.resize(scale_target_size, PIL.Image.BILINEAR))) decoded_image_large = np.transpose(np.array(decoded_image_large), (1, 2, 0)) # Convert to srgb PIL image decoded_image_out = np.vectorize(blurhash.linear_to_srgb)( np.array(decoded_image_large)) decoded_image_out = PIL.Image.fromarray( np.array(decoded_image_out).astype('uint8')) # Crop to final size and write decoded_image_out = decoded_image_out.crop(( (decoded_image_out.width - out_size[0]) / 2, (decoded_image_out.height - out_size[1]) / 2, (decoded_image_out.width + out_size[0]) / 2, (decoded_image_out.height + out_size[1]) / 2, )) decoded_image_out.save(output_image) print("Wrote final result to " + str(output_image))
components_x, components_y, linear=True) print("Blur hash of image: " + blur_hash) """ Part 2: Decode """ # Figure out what size to decode to decode_components_x, decode_components_y = blurhash.components(blur_hash) decode_size_x = decode_components_x * (work_size // target_components) decode_size_y = decode_components_y * (work_size // target_components) print("Decoder working at size {} x {}".format(decode_size_x, decode_size_y)) # Decode decoded_image = np.array( blurhash.decode(blur_hash, decode_size_x, decode_size_y, linear=True)) # Scale so that we have the right size to fill out_size without letter/pillarboxing # while matching original images aspect ratio. fill_x_size_y = out_size[0] * (image_size[0] / image_size[1]) fill_y_size_x = out_size[1] * (image_size[1] / image_size[0]) scale_target_size = list(out_size) if fill_x_size_y / out_size[1] < fill_y_size_x / out_size[0]: scale_target_size[0] = max(scale_target_size[0], int(fill_y_size_x)) else: scale_target_size[1] = max(scale_target_size[1], int(fill_x_size_y)) # Scale (ideally, your UI layer should take care of this in some kind of efficient way) print("Scaling to target size: {} x {}".format(scale_target_size[0], scale_target_size[1])) decoded_image_large = []