Esempio n. 1
0
def test_invariant_denoise_color(dtype):
    denoised_img_color = _invariant_denoise(
        noisy_img_color.astype(dtype),
        _denoise_wavelet,
        denoiser_kwargs=dict(channel_axis=-1))
    denoised_mse = mse(denoised_img_color, test_img_color)
    original_mse = mse(noisy_img_color, test_img_color)
    assert denoised_mse < original_mse
    assert denoised_img_color.dtype == _supported_float_type(dtype)
Esempio n. 2
0
def test_invariant_denoise_color():
    denoised_img_color = _invariant_denoise(
        noisy_img_color,
        _denoise_wavelet,
        denoiser_kwargs=dict(multichannel=True))

    denoised_mse = mse(denoised_img_color, test_img_color)
    original_mse = mse(noisy_img_color, test_img_color)
    assert_(denoised_mse < original_mse)
Esempio n. 3
0
def test_calibrate_denoiser():
    parameter_ranges = {'sigma': np.linspace(0.1, 1, 5) / 2}

    denoiser = calibrate_denoiser(noisy_img,
                                  _denoise_wavelet,
                                  denoise_parameters=parameter_ranges)

    denoised_mse = mse(denoiser(noisy_img), test_img)
    original_mse = mse(noisy_img, test_img)
    assert_(denoised_mse < original_mse)
Esempio n. 4
0
def test_invariant_denoise_color_deprecated():

    with expected_warnings(["`multichannel` is a deprecated argument"]):
        denoised_img_color = _invariant_denoise(
            noisy_img_color,
            _denoise_wavelet,
            denoiser_kwargs=dict(multichannel=True))

    denoised_mse = mse(denoised_img_color, test_img_color)
    original_mse = mse(noisy_img_color, test_img_color)
    assert_(denoised_mse < original_mse)
Esempio n. 5
0
def dist_metric(A, B, mode):
    if mode == 'mean_squared_error':
        d = mse(A, B)
    elif mode == 'adapted_rand_error':
        d, _, _ = arerr(A, B)
    elif mode == 'structural_similarity':
        d = 1.0 - ssim(A, B)
    return d
Esempio n. 6
0
def subtract(args):
    template_paths = glob.glob(args.template_dir + '/*')
    template_imgs = []
    for path in template_paths:
        template_img = cv.imread(path)
        template_imgs.append(template_img)

    base_img = cv.imread(args.base_img_path)
    target_img = cv.imread(args.target_img_path)

    base_w, base_h = data.get_wh(base_img)
    target_w, target_h = data.get_wh(target_img)

    max_w, max_h = max(base_w, target_w), max(base_h, target_h)
    base_img_frame = ImgFrame(base_img, (2 * max_w, 2 * max_h))
    target_img_frame = ImgFrame(target_img, (2 * max_w, 2 * max_h))

    offsets = get_offset(template_imgs, base_img_frame.img,
                         target_img_frame.img)
    print(f"offset: {offsets}")

    base_move = [offset if offset > 0 else 0 for offset in offsets]
    target_move = [-offset if offset <= 0 else 0 for offset in offsets]

    base_img_frame.move_img(*base_move)
    target_img_frame.move_img(*target_move)

    # Fit two images by rotation
    lowest_error = math.inf
    best_angle = 0
    max_angle = 30
    step = 2
    # TODO: change to Parallel Processing.
    for angle in range(-max_angle, max_angle + 1, step):
        if angle == -max_angle:
            base_img_frame.rotate_image(-max_angle)
        else:
            base_img_frame.rotate_image(step)
        error = mse(base_img_frame.img_frame, target_img_frame.img_frame)
        if error < lowest_error:
            lowest_error = error
            best_angle = angle
    print(f"lowest:error is {lowest_error} in {best_angle} degree.")

    # return to best angle
    base_img_frame.rotate_image(best_angle - max_angle)

    threshold = 50
    sub_img_frame = (np.abs(target_img_frame.img_frame -
                            base_img_frame.img_frame) > threshold) * 255

    # inter_mask_frame = base_img_frame.img_frame * target_img_frame.img_frame > 0
    # sub_img_frame = sub_img_frame * inter_mask_frame

    # cv.imwrite("base_img.bmp", base_img_frame.img_frame.astype(np.float32))
    # cv.imwrite("target_img.bmp", target_img_frame.img_frame.astype(np.float32))
    cv.imwrite(args.sub_path, sub_img_frame.astype(np.float32))
Esempio n. 7
0
def compare_image(metric, ref, target):

    if metric == 'ssim':
        return ssim(ref, target, multichannel=True)

    if metric == 'psnr':
        return psnr(ref, target)

    if metric == 'mse':
        return mse(ref, target)

    return None
Esempio n. 8
0
def extract_features(ctx):

    logging.debug(f"Dataset: {dataset}")
    logging.debug(f"Debug: {debug}")
    logging.debug(f"Epochs: {epochs}")
    logging.debug(f"Batch Size: {batch_size}")
    logging.debug(f"Maximum batches per epoch: {max_batches}")
    logging.debug(f"Test-train split: {split*100}%")
    logging.debug(f"Base features: {n_base_features}")
    logging.debug(f"Latent features: {n_latent_features}")
    logging.debug(f"VAE Layers: {n_layers}")

    # TODO: define experiment name, make exp dir under predictions dir

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    data = HCSData.from_csv(dataset)  # Load dataset

    test_loader = torch.utils.data.DataLoader(  # Generate a testing loader
        data,
        batch_size=batch_size,
        shuffle=False)

    net = VAE_fm(lf=n_latent_features,
                 base=n_base_features)  # TODO: load from .pt
    logging.debug(net)

    if torch.cuda.device_count() > 1:  # If multiple gpu's
        net = torch.nn.DataParallel(net)  # Parallelize
    net.to(device)  # Move model to devic

    try:
        for epoch in range(epochs):  # Iterate through epochs
            with torch.no_grad():
                for bn, (X, _) in tqdm(enumerate(test_loader),
                                       total=max_batches):

                    x = X.to(device)
                    o, u, logvar = net(x)

                    X = x.cpu().detach().numpy()
                    O = o.cpu().detach.numpy()
                    err = mse(X, O)
                    tqdm.write(err)  # TODO: Format nicely

                    # TODO: save feature maps (u, o) and predictions to exp dir
    except (KeyboardInterrupt, SystemExit):
        print("Session interrupted.")
Esempio n. 9
0
def compare_image(metric, ref, image):

    error = None
    if metric == 'ssim':
        error = ssim(image, ref, multichannel=True)

    if metric == 'mse':
        error = mse(image, ref)

    if metric == 'rmse':
        error = rmse(image, ref)

    if metric == 'nrmse':
        error = nrmse(image, ref)

    return error
Esempio n. 10
0
def test_calibrate_denoiser_extra_output():
    parameter_ranges = {'sigma': np.linspace(0.1, 1, 5) / 2}
    _, (parameters_tested,
        losses) = calibrate_denoiser(noisy_img,
                                     _denoise_wavelet,
                                     denoise_parameters=parameter_ranges,
                                     extra_output=True)

    all_denoised = [
        _invariant_denoise(noisy_img,
                           _denoise_wavelet,
                           denoiser_kwargs=denoiser_kwargs)
        for denoiser_kwargs in parameters_tested
    ]

    ground_truth_losses = [mse(img, test_img) for img in all_denoised]
    assert_(np.argmin(losses) == np.argmin(ground_truth_losses))
Esempio n. 11
0
    def run(self):
        """
        Method to execute when the thread has started
        """
        for (i, (pathA, imageA)) in enumerate(self.batch):
            for (y, (pathB, imageB)) in enumerate(images):

                # Skip the same images, those will always be equal
                if pathA == pathB:
                    continue

                # Skip if one of them has already been removed
                if pathA in removed or pathB in removed:
                    continue

                # Remove an image if its too blurry or too similar
                if blur(imageA) < blur_threshold:
                    remove(pathA)
                elif blur(imageB) < blur_threshold:
                    remove(pathB)
                if mse(imageA, imageB) < similarity_threshold:
                    remove(pathB)
Esempio n. 12
0
    def compare_full(self, img: Image, rect_color: RGBColor = (0, 0, 255), line_thickness=1, line_type=cv.LINE_8):
        cim1, cim2 = self.with_color_space('BGR'), img.with_color_space('BGR')
        gim1, gim2 = cim1.with_color_space('GRAY'), cim2.with_color_space('GRAY')
        score, diff = gim1.compare_ssim(gim2, full=True)
        diff = (diff * 255).astype("uint8")
        threshold = cv.threshold(diff, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)[1]
        contours = im.grab_contours(cv.findContours(threshold.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE))

        for contour in contours:
            x, y, w, h = cv.boundingRect(contour)

            cv.rectangle(cim1.array, (x, y), (x + w, y + h), rect_color, line_thickness, line_type)
            cv.rectangle(cim2.array, (x, y), (x + w, y + h), rect_color, line_thickness, line_type)

        mse_ = mse(gim1.array, gim2.array)

        return {
            "mse": mse_,
            "ssim": score,
            "image1": cim1,
            "image2": cim2,
            "diff": diff,
            "threshold": threshold
        }
Esempio n. 13
0
def test_invariant_denoise():
    denoised_img = _invariant_denoise(noisy_img, _denoise_wavelet)

    denoised_mse = mse(denoised_img, test_img)
    original_mse = mse(noisy_img, test_img)
    assert_(denoised_mse < original_mse)
def main():

    parser = argparse.ArgumentParser(
        description=
        "Compare multiple image using metric on different estimators")

    parser.add_argument('--json',
                        type=str,
                        help="json with all build figure data",
                        required=True)

    args = parser.parse_args()

    p_json = args.json

    # extract data from json configuration
    json_data = None

    with open(p_json, 'r') as json_file:
        json_data = json.load(json_file)

    reference = json_data["reference"]
    estimators = json_data["estimators"]
    metric = json_data["metric"]

    p_folder = os.path.join(json_data['output'], json_data['nsamples'],
                            'processing')
    p_output = os.path.join(json_data['output'], json_data['nsamples'],
                            'metrics')

    print(f"Comparisons of {reference} with {estimators}")

    if not os.path.exists(p_output):
        os.makedirs(p_output)

    counter = 0

    for i, est in enumerate(estimators):

        # get current expected method for estimator
        method = json_data['methods'][i]

        default_estimator_path = os.path.join(p_folder, method, reference)

        # expected images path have same name
        images = os.listdir(default_estimator_path)

        # prepare output filename
        counter_str = str(counter)

        while len(counter_str) < 3:
            counter_str = "0" + counter_str

        output_filename = os.path.join(
            p_output, f"{counter_str}_{method}_{reference}_{est}_{metric}.csv")
        output_file = open(output_filename, 'w')

        for img in sorted(images):

            est1_path_image = os.path.join(p_folder, method, reference, img)
            est2_path_image = os.path.join(p_folder, method, est, img)

            img_rgb_1 = np.array(Image.open(est1_path_image))
            img_rgb_2 = np.array(Image.open(est2_path_image))

            scene_name = img.replace('.png', '')

            if metric == 'ssim':
                sentence = "{0};{1};{2};{3}\n".format(
                    scene_name, img, est,
                    ssim(img_rgb_1, img_rgb_2, multichannel=True))
                output_file.write(sentence)

            if metric == 'rmse':
                sentence = "{0};{1};{2};{3}\n".format(
                    scene_name, img, est, rmse(img_rgb_1, img_rgb_2))
                output_file.write(sentence)

            if metric == 'nrmse':
                sentence = "{0};{1};{2};{3}\n".format(
                    scene_name, img, est, nrmse(img_rgb_1, img_rgb_2))
                output_file.write(sentence)

            if metric == 'mse':
                sentence = "{0};{1};{2};{3}\n".format(
                    scene_name, img, est, mse(img_rgb_1, img_rgb_2))
                output_file.write(sentence)

            if metric == 'psnr':
                sentence = "{0};{1};{2};{3}\n".format(
                    scene_name, img, est, psnr(img_rgb_1, img_rgb_2))
                output_file.write(sentence)

            if metric == 'rmse_ssim':
                sentence = "{0};{1};{2};{3}\n".format(
                    scene_name, img, est,
                    rmse(img_rgb_1, img_rgb_2) /
                    ssim(img_rgb_1, img_rgb_2, multichannel=True))
                output_file.write(sentence)

            if metric == 'firefly':
                sentence = "{0};{1};{2};{3}\n".format(
                    scene_name, img, est, firefly_error(img_rgb_1, img_rgb_2))
                output_file.write(sentence)

        counter += 1

    output_file.close()
Esempio n. 15
0
def reconstructionAttack(model,
                         alpha=5000,
                         beta=100,
                         gamma=0.01,
                         delta=0.1,
                         save=True,
                         show=False):
    dae = False
    if (model.name == 'DAESoftMax'):
        dae = True

    # reload model
    model.load_state_dict(torch.load('models/' + model.name + '_model.pt'))

    # performance measures
    startTime = time.time()
    timeStr = time.strftime("%H:%M:%S", time.localtime(startTime))
    mse_all, nrmsev_all, ssmv_all, epochs = [], [], [], []

    # SDG
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(model.parameters(), lr=delta)

    test_x = get_orig()
    rec_x = np.zeros((40, 112, 92), dtype='float32')
    process = False

    print('DAE Flag is', dae)
    # if DAE images have different size and process is true
    encoder = Autoencoder(0)
    if (dae):
        test_x = encoder.encode(torch.Tensor(test_x))
        rec_x = np.zeros((40, 300), dtype='float32')

    for c in classes:
        print('\nReconstructing class', c)
        best_x, best_cost = '', 1
        if (dae):
            img = np.zeros_like(test_x[0].detach().numpy())
        else:
            img = np.zeros_like(test_x[0])
        ssmv, msev, nrmsev = 0, 0, 0
        rec, orig = '', ''

        if (dae):
            process = True
            best_x = img = np.zeros((1, 300), dtype='float32')
        else:
            np.zeros_like(test_x[0])

        b = beta

        for i in range(alpha):
            best_cost, best_x, b, img, stop = invert(model,
                                                     img,
                                                     criterion,
                                                     optimizer,
                                                     delta,
                                                     c_to_i(c),
                                                     best_cost,
                                                     best_x,
                                                     i,
                                                     b,
                                                     beta,
                                                     gamma,
                                                     processing=process)
            if stop:
                epochs.append(i)
                break

        if (dae):
            orig = test_x[c_to_i(c)].detach().numpy()
            rec = best_x.reshape(300)
            rec_x[c_to_i(c)] = rec

        else:
            orig = test_x[c_to_i(c)]
            rec = best_x.reshape(112, 92)
            rec_x[c_to_i(c)] = rec

        ssmv = ssm(rec, orig)
        msev = mse(rec, orig)
        nrmsev = nrmse(rec, orig)
        mse_all.append(msev)
        nrmsev_all.append(nrmsev)
        ssmv_all.append(ssmv)

        if (show or save):
            if (dae):
                rec = encoder.decode(torch.Tensor([rec])).view(
                    112, 92).detach().numpy()
                orig = encoder.decode(torch.Tensor([orig])).view(
                    112, 92).detach().numpy()

            fig = plt.figure(figsize=(10, 4))
            fig.suptitle("SSM: {:.1e}, NRMSE: {:.1f}".format(ssmv, nrmsev))
            ax1 = fig.add_subplot(1, 2, 1)
            ax1.imshow(rec, cmap='gray')
            ax2 = fig.add_subplot(1, 2, 2)
            ax2.imshow(orig, cmap='gray')
            plt.savefig(f'./data/results/' + model.name + '_class_' + c +
                        '.png')
            if show:
                plt.show()

    endTime = time.time()
    dur = endTime - startTime
    print("Duration in sec: " + str(int(dur)))

    # Calculating means performance values of all images
    print("\nAverage performance", model.name)
    print('MSE mean', np.mean(mse_all), 'with std of ', np.std(mse_all))
    print('NRMSE mean', np.mean(nrmsev_all), 'with std of +/-',
          np.std(nrmsev_all))
    print('SSM mean', np.mean(ssmv_all), 'with std of +/-', np.std(ssmv_all))
    print('Epochs mean', np.mean(epochs), 'with std of +/-', np.std(epochs))
Esempio n. 16
0
def get_mse(y_real, y_pred, is_prepare=True):
    if is_prepare:
        y_pred, y_real = prepare_data(y_pred, y_real)
    return mse(y_pred, y_real)
Esempio n. 17
0
def compute_mse(
    img1, img2
):  # auxiliary function, computes the MSE distance between images "img1" and "img2"

    return (mse(img1, img2))
Esempio n. 18
0
def train(name, csv_file, data_path, debug, epochs, batch_size, max_batches,
          split, parallel, n_base_features, n_latent_features, n_layers, beta):

    # If debug, set logger level and log parameters
    if debug:
        logging.getLogger().setLevel(logging.DEBUG)
        logging.debug(f"CSV File: {csv_file}")
        logging.debug(f"Data Path: {data_path}")
        logging.debug(f"Debug: {debug}")
        logging.debug(f"Epochs: {epochs}")
        logging.debug(f"Batch Size: {batch_size}")
        logging.debug(f"Maximum batches per epoch: {max_batches}")
        logging.debug(f"Test-train split: {split*100}%")
        logging.debug(f"Base features: {n_base_features}")
        logging.debug(f"Latent features: {n_latent_features}")
        logging.debug(f"VAE Layers: {n_layers}")

    # Set up gpu/cpu device
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # Dataset
    data = HCSData.from_csv(csv_file, data_path)  # Load dataset
    logging.debug('Data loaded')
    train, test = data.split(split)  # Split data into train and test
    # data[0][0].shape
    train_loader = torch.utils.data.DataLoader(  # Generate a training loader
        train, batch_size=batch_size, shuffle=True)
    test_loader = torch.utils.data.DataLoader(  # Generate a testing loader
        test, batch_size=batch_size, shuffle=True)

    net = VAE_fm(lf=n_latent_features, base=n_base_features)
    logging.debug(net)

    # Move Model to GPU
    if torch.cuda.device_count() > 1:  # If multiple gpu's
        net = torch.nn.DataParallel(net)  # Parallelize
    net.to(device)  # Move model to device

    tr_writer = SummaryWriter(
        f"{data_path}/runs/training_{time.strftime('%Y-%m-%d')}_{name}")
    vl_writer = SummaryWriter(
        f"{data_path}/runs/validation_{time.strftime('%Y-%m-%d')}_{name}")

    # Define loss and optimizer
    # criterion = torch.nn.MSELoss()
    vae_loss = Loss()
    optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=0.01)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer)

    print("Training...")

    try:
        # Training
        for epoch in range(epochs):  # Iter through epochcs
            cum_loss = 0
            msg = f"Training epoch {epoch+1}: "
            ttl = max_batches or len(train_loader)  # Iter through batches
            for batch_n, (X, _) in tqdm(enumerate(train_loader), msg, ttl):

                if batch_n > max_batches:
                    break
                x = X.to(device)  # Move batch samples to gpu

                o, u, logvar = net(x)  # Forward pass
                optimizer.zero_grad()  # Reset gradients

                loss = vae_loss(o, x, u, logvar, beta)  # Compute Loss
                loss.backward()  # Propagate loss, compute gradients
                optimizer.step()  # Update weights

                cum_loss += loss.item()

                in_grid = torchvision.utils.make_grid(
                    x.view(3 * batch_size, 1, x.shape[-2], x.shape[-1]),
                    nrow=3
                )
                out_grid = torchvision.utils.make_grid(
                    o.view(3 * batch_size, 1, o.shape[-2], o.shape[-1]),
                    nrow=3
                )

                if True:
                    tr_writer.add_image('Input', in_grid, epoch *
                                        max_batches + batch_n)
                    tr_writer.add_image('Output', out_grid, epoch *
                                        max_batches + batch_n)
                    # writer.add_image('Mean', u_grid, epoch *
                    #                  max_batches + batch_n)
                    # writer.add_image('Logvar', logvar_grid, epoch *
                    #                  max_batches + batch_n)

                tr_writer.add_scalar(
                    'loss',
                    loss,
                    epoch * max_batches + batch_n
                )
                tr_writer.add_scalar(
                    'mse',
                    mse(x.cpu().detach().numpy(), o.cpu().detach().numpy()),
                    epoch * max_batches + batch_n
                )

            with torch.no_grad():

                val_loss = 0
                val_mse = []
                # Iter through batches
                msg = f"Testing epoch {epoch+1}: "
                ttl = max_batches or len(test_loader)
                for batch_n, (X, _) in tqdm(enumerate(test_loader), msg, ttl):

                    if batch_n > 16:
                        break

                    # Move batch samples to gpu
                    x = X.to(device)
                    o, u, logvar = net(x)  # Forward pass

                    loss = vae_loss(o, x, u, logvar)
                    val_loss += loss.item()

                    val_mse.append(mse(x.cpu().detach().numpy(),
                                       o.cpu().detach().numpy()))

                vl_writer.add_scalar(
                    'loss',
                    val_loss,
                    epoch * max_batches + batch_n
                )
                vl_writer.add_scalar(
                    'mse',
                    np.mean(val_mse),
                    epoch * max_batches + batch_n
                )

            scheduler.step(-cum_loss)

            torch.save(net.state_dict(),
                       f"{data_path}/models/{net.__class__.__name__}"
                       f"_base-{n_base_features}_latent-{n_latent_features}"
                       f"_{time.strftime('%Y-%m-%d_%H-%M')}_{name}.pt"
                       )

    except (KeyboardInterrupt, SystemExit):
        print("Saving model...")
        torch.save(net.state_dict(),
                   f"{data_path}/models/{net.__class__.__name__}"
                   f"_base-{n_base_features}_latent-{n_latent_features}"
                   f"_{time.strftime('%Y-%m-%d_%H-%M')}_{name}.pt"
                   )
        print("Model saved.")
Esempio n. 19
0
def get_rmse(y_real, y_pred, is_prepare=True):
    if is_prepare:
        y_pred, y_real = prepare_data(y_pred, y_real)
    return np.sqrt(mse(y_pred, y_real))
Esempio n. 20
0
sigma_range = np.arange(sigma / 2, 1.5 * sigma, 0.025)

parameters_tested = [{
    'sigma': sigma,
    'convert2ycbcr': True,
    'wavelet': 'db2',
    'multichannel': True
} for sigma in sigma_range]

denoised_invariant = [
    _invariant_denoise(noisy, _denoise_wavelet, denoiser_kwargs=params)
    for params in parameters_tested
]

self_supervised_loss = [mse(img, noisy) for img in denoised_invariant]
ground_truth_loss = [mse(img, image) for img in denoised_invariant]

opt_idx = np.argmin(self_supervised_loss)
plot_idx = [0, opt_idx, len(sigma_range) - 1]

get_inset = lambda x: x[25:225, 100:300]

plt.figure(figsize=(10, 12))

gs = gridspec.GridSpec(3, 3)
ax1 = plt.subplot(gs[0, :])
ax2 = plt.subplot(gs[1, :])
ax_image = [plt.subplot(gs[2, i]) for i in range(3)]

ax1.plot(sigma_range,
Esempio n. 21
0
import argparse
import imutils
import random
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True, help="first input image")
ap.add_argument("-s", "--second", required=True, help="second")
args = vars(ap.parse_args())

# load the two input images
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])

# convert the images to grayscale
grayA = cv2.cvtColor(imageA, cv2.COLOR_BGR2GRAY)
grayB = cv2.cvtColor(imageB, cv2.COLOR_BGR2GRAY)

# compute the Structural Similarity Index (SSIM) between the two
# images, ensuring that the difference image is returned
s = ssim(grayA, grayB, full=True)
m = mse(grayA, grayB)

m = 1 - (1 / m)

score = s[0] * 100 * m * random.random() * 100
s1 = 100000 * (s[0] + m - 1) + 1000 * random.random() + 11000
print("SSIM: {}".format(s[0]))
print("MSE: {}".format(m))
print(s1)
Esempio n. 22
0
orig_image = cv2.imread(filename, cv2.IMREAD_COLOR)

if args.downsample:
    orig_image = orig_image[::args.downsample, ::args.downsample]

sizes = []
mses = []
ssims = []
compression_times = []
decompression_times = []

for quality in tqdm.trange(101):
    data, image, time_c, time_d = check_compression(orig_image, quality)
    ssims.append(ssim(orig_image, image, multichannel=True))
    sizes.append(len(data))
    mses.append(mse(image, orig_image))
    compression_times.append(time_c)
    decompression_times.append(time_d)

size1 = len(check_lossless_compression(image, 1))
size9 = len(check_lossless_compression(image, 9))
print('PNG compression range:', size9, size1)

size = image.nbytes

fig = plt.figure(filename + ' JPEG performance')

ax = fig.add_subplot(2, 2, 1)
ax.set_title('Size ratio (blue) and Structural Similarity Index (orange)')
plt.plot(np.array(sizes) / size)
plt.axhline(size1 / size, color='red')
from skimage.metrics import structural_similarity as ssim
from skimage.metrics import mean_squared_error as mse
from PIL import Image
import numpy as np
import logging

# Create and configure logger
logging.basicConfig(format='%(asctime)s %(message)s', filemode='w')
logger = logging.getLogger()
logger.setLevel(logging.INFO)

original = np.array(Image.open("ImagenesGen/original.jpg").convert("LA"))
restaurada = np.array(Image.open("ImagenesGen/Ivan2.jpg").convert("LA"))

logger.info("------ SIMILAR A BLANCO NEGRO ------")
logger.info("         ------ SSIM ------")
ssim_recuperado = ssim(original, restaurada, multichannel=True)
porcentaje_recuperado_ssim = (ssim_recuperado) * 100
logger.info("Las imagene se parece aproximadamente en un: " +
            str(porcentaje_recuperado_ssim) + "%")

logger.info("         ------ MSE ------")
mse_recuperado = mse(original, restaurada)
#porcentaje_recuperado_mse = 100 - ((100 * mse_recuperado) / mse_original)
logger.info("Se recuper aproximadamente un: " + str(mse_recuperado))
Esempio n. 24
0
def test_invariant_denoise_3d():
    denoised_img_3d = _invariant_denoise(noisy_img_3d, _denoise_wavelet)

    denoised_mse = mse(denoised_img_3d, test_img_3d)
    original_mse = mse(noisy_img_3d, test_img_3d)
    assert_(denoised_mse < original_mse)
Esempio n. 25
0
 def compare_mse(self, img: Image):
     return mse(self.with_color_space('GRAY').array, img.with_color_space('GRAY').array)
danada_bw = np.array(Image.open("imagenes/danada.jpg").convert("LA"))
restaurada_bw = np.array(Image.open("imagenes/restaurada.jpg").convert("LA"))

original_color = np.array(Image.open("imagenes/original.jpg"))
danada_color = np.array(Image.open("imagenes/danada.jpg"))
restaurada_color = np.array(Image.open("imagenes/restaurada.jpg"))

logger.info("------ SIMILAR A BLANCO NEGRO ------")
logger.info("         ------ SSIM ------")
ssim_original_bw = ssim(original_bw, danada_bw, multichannel=True)
ssim_recuperado_bw = ssim(original_bw, restaurada_bw, multichannel=True)
porcentaje_recuperado_bw_ssim = (ssim_recuperado_bw - ssim_original_bw) * 100
logger.info("Se recupero aproximadamente un: " + str(porcentaje_recuperado_bw_ssim) + "%")

logger.info("         ------ MSE ------")
mse_original_bw = mse(original_bw, danada_bw)
mse_recuperado_bw = mse(original_bw, restaurada_bw)
porcentaje_recuperado_bw_mse = 100 - ((100 * mse_recuperado_bw) / mse_original_bw)
logger.info("Se recupero aproximadamente un: " + str(porcentaje_recuperado_bw_mse) + "%")

logger.info("------ SIMILAR A COLOR ------")
logger.info("     ------ SSIM ------")
ssim_original_color = ssim(original_color, danada_color, multichannel=True)
ssim_recuperado_color = ssim(original_color, restaurada_color, multichannel=True)
porcentaje_recuperado_bw_ssim = (ssim_recuperado_color - ssim_original_color) * 100
logger.info("Se recupero aproximadamente un: " + str(porcentaje_recuperado_bw_ssim) + "%")

logger.info("      ------ MSE ------")
mse_original_color = mse(original_color, danada_color)
mse_recuperado_color = mse(original_color, restaurada_color)
porcentaje_recuperado_bw_mse = 100 - ((100 * mse_recuperado_color) / mse_original_color)
Esempio n. 27
0
 def removeLoosers(self):
     rangSum = (len(self.oldIndivids) + 1) / 2 * len(self.oldIndivids)
     self.individs = []
     bord = self.size - self.startize // 2
     self.size = int(self.startize * 1.5)
     maxbord = len(self.oldIndivids)
     if self.oldIndivids:
         for i in self.oldIndivids:
             if i.rang > bord:
                 i.survival += 1 / ((maxbord - i.rang + 1)**2)
                 self.individs.append(i)
     rng = self.size - len(self.individs)
     halfrang = len(self.oldIndivids) / 2
     if rng > 0:
         metricsList = {}
         for i in range(len(self.newgeneratedIndivids)):
             dat1 = img_as_float(self.newgeneratedIndivids[i].img)
             metricsval = 0
             for j in self.oldIndivids:
                 dat2 = img_as_float(j.img)
                 countmetr = mse(dat1, dat2)
                 if countmetr <= 0.005:
                     self.newgeneratedIndivids[i].lookslike = True
                 metricsval += countmetr * (
                     (j.rang**2) / halfrang**2 - 1) / 3
             if metricsval != -1:
                 metricsval *= -1
                 self.newgeneratedIndivids[i].fit = metricsval
                 metricsList[i] = metricsval
         list_keys = sorted(metricsList, key=metricsList.get)
         leftToStay = min(
             rng - rng // 5, len(list_keys)
         )  ##### 80% элитизм, для mse оказалось лучшим решением его увеличить
         for i in range(len(list_keys)):
             self.newgeneratedIndivids[list_keys[i]].rang = i + 1
         list_keys = list(reversed(list_keys))
         for i in self.individs:
             i.rang = len(self.newgeneratedIndivids) + i.rang - bord
         alreadyin = []
         for i in range(self.newgenarateSize):
             if leftToStay > 0:
                 if self.newgeneratedIndivids[
                         list_keys[i]].fit not in alreadyin:
                     self.individs.append(
                         self.newgeneratedIndivids[list_keys[i]])
                     alreadyin.append(
                         self.newgeneratedIndivids[list_keys[i]].fit)
                     leftToStay -= 1
             else:
                 break
         newindlen = len(self.newgeneratedIndivids)
         allPie = (newindlen + 1) / 2 * newindlen - (
             2 * newindlen - leftToStay + 1) / 2 * leftToStay
         leftToStay = min(self.size - len(self.individs), len(list_keys))
         while leftToStay > 0:
             piePart = randint(1, allPie)
             countPieParts = 0
             for i in self.newgeneratedIndivids:
                 if (i not in self.individs) and (i.fit not in alreadyin):
                     countPieParts += i.rang
                     if piePart <= countPieParts:
                         self.individs.append(i)
                         allPie -= i.rang
                         leftToStay -= 1
                         alreadyin.append(i.fit)
                         break
         rangList = {}
         countSimilar = 0
         for i in range(len(self.individs)):
             rangList[self.individs[i].rang] = i
             if self.individs[i].lookslike:
                 countSimilar += 1
             self.individs[i].lookslike = False
             datcheck1 = img_as_float(self.individs[i].img)
             for j in range(len(self.individs)):
                 datcheck2 = img_as_float(self.individs[j].img)
                 countmetrch = mse(datcheck1, datcheck2)
                 if countmetrch != 0 and countmetrch <= 0.011:
                     self.individs[i].lookslike = True
                     break
         if countSimilar >= len(self.individs) * 2 / 3:
             self.cont = True
         else:
             self.cont = False
         countSimilar = 0
         for i in self.individs:
             if i.lookslike:
                 countSimilar += 1
         if countSimilar >= len(self.individs) / 2:
             self.cont = True
         list_keys = list(rangList.keys())
         list_keys.sort()
         for i in range(len(list_keys)):
             self.individs[rangList[list_keys[i]]].rang = i + 1
     self.oldIndivids = self.individs
        temp_points = random.choices(range(pts1.shape[0]), k=3)

        a = pts1[temp_points]
        b = pts2[temp_points]

        M = cv.getAffineTransform(b, a)
        image_path = target_images_two[i]
        target_image = cv.imread(image_path)
        converted = cv.warpAffine(target_image, M, (image.shape[0], image.shape[1]))
        mse_value = ssim(main_images, converted, multichannel=True)
        if mse_value > best_value:
            best = converted
            best_value = mse_value
        k += 1

    temp_mse = mse(main_images, best)
    temp_ssim = ssim(main_images, best, multichannel=True)
    all_mse.append(temp_mse)
    all_ssim.append(temp_ssim)
    print("MSE = ", temp_mse)
    print("SSIM = ", temp_ssim)
    cv.imwrite("../output/converted_Attack_2_{}.jpg".format(i + 1), best)

all_mp = np.array(all_mp)
all_mse = np.array(all_mse)
all_ssim = np.array(all_ssim)
print("mean MP = ", all_mp.mean())
print("mean MSE = ", all_mse.mean())
print("mean SSIM = ", all_ssim.mean())
print("std MP = ", all_mp.std())
print("std MSE = ", all_mse.std())
Esempio n. 29
0
def main(dataset_name=None):
    cuda = True if torch.cuda.is_available() else False
    IMAGE_SIZE = np.array([256, 256])
    opt.dataset_name = dataset_name
    files = opt.dataroot + opt.dataset_name + '_' + opt.phase + '.txt'
    comp_paths = []
    harmonized_paths = []
    mask_paths = []
    real_paths = []
    with open(files, 'r') as f:
        for line in f.readlines():
            name_str = line.rstrip()
            if opt.evaluation_type == 'our':
                harmonized_path = os.path.join(
                    opt.result_root, name_str.replace(".jpg",
                                                      "_harmonized.jpg"))
                if os.path.exists(harmonized_path):
                    real_path = os.path.join(
                        opt.result_root, name_str.replace(".jpg", "_real.jpg"))
                    mask_path = os.path.join(
                        opt.result_root, name_str.replace(".jpg", "_mask.jpg"))
                    comp_path = os.path.join(
                        opt.result_root, name_str.replace(".jpg", "_comp.jpg"))
            elif opt.evaluation_type == 'ori':
                comp_path = os.path.join(opt.dataroot, 'composite_images',
                                         line.rstrip())
                harmonized_path = comp_path
                if os.path.exists(comp_path):
                    real_path = os.path.join(opt.dataroot, 'real_images',
                                             line.rstrip())
                    name_parts = real_path.split('_')
                    real_path = real_path.replace(
                        ('_' + name_parts[-2] + '_' + name_parts[-1]), '.jpg')
                    mask_path = os.path.join(opt.dataroot, 'masks',
                                             line.rstrip())
                    mask_path = mask_path.replace(('_' + name_parts[-1]),
                                                  '.png')

            real_paths.append(real_path)
            mask_paths.append(mask_path)
            comp_paths.append(comp_path)
            harmonized_paths.append(harmonized_path)
    count = 0

    mse_scores = 0
    sk_mse_scores = 0
    fmse_scores = 0
    psnr_scores = 0
    fpsnr_scores = 0
    ssim_scores = 0
    fssim_scores = 0
    fore_area_count = 0
    fmse_score_list = []
    image_size = 256

    for i, harmonized_path in enumerate(tqdm(harmonized_paths)):
        count += 1

        harmonized = Image.open(harmonized_path).convert('RGB')
        real = Image.open(real_paths[i]).convert('RGB')
        mask = Image.open(mask_paths[i]).convert('1')
        if mask.size[0] != image_size:
            harmonized = tf.resize(harmonized, [image_size, image_size],
                                   interpolation=Image.BICUBIC)
            mask = tf.resize(mask, [image_size, image_size],
                             interpolation=Image.BICUBIC)
            real = tf.resize(real, [image_size, image_size],
                             interpolation=Image.BICUBIC)

        harmonized_np = np.array(harmonized, dtype=np.float32)
        real_np = np.array(real, dtype=np.float32)

        harmonized = tf.to_tensor(harmonized_np).unsqueeze(0).cuda()
        real = tf.to_tensor(real_np).unsqueeze(0).cuda()
        mask = tf.to_tensor(mask).unsqueeze(0).cuda()

        mse_score = mse(harmonized_np, real_np)
        psnr_score = psnr(real_np, harmonized_np, data_range=255)

        fore_area = torch.sum(mask)
        fmse_score = torch.nn.functional.mse_loss(
            harmonized * mask, real * mask) * 256 * 256 / fore_area

        mse_score = mse_score.item()
        fmse_score = fmse_score.item()
        fore_area_count += fore_area.item()
        fpsnr_score = 10 * np.log10((255**2) / fmse_score)

        ssim_score, fssim_score = pytorch_ssim.ssim(
            harmonized, real, window_size=opt.ssim_window_size, mask=mask)

        psnr_scores += psnr_score
        mse_scores += mse_score
        fmse_scores += fmse_score
        fpsnr_scores += fpsnr_score
        ssim_scores += ssim_score
        fssim_scores += fssim_score

        image_name = harmonized_path.split("/")
        image_fmse_info = (image_name[-1], round(fmse_score, 2),
                           fore_area.item(), round(mse_score, 2),
                           round(psnr_score, 2), round(fpsnr_scores, 4))
        fmse_score_list.append(image_fmse_info)

    mse_scores_mu = mse_scores / count
    psnr_scores_mu = psnr_scores / count
    fmse_scores_mu = fmse_scores / count
    fpsnr_scores_mu = fpsnr_scores / count
    ssim_scores_mu = ssim_scores / count
    fssim_score_mu = fssim_scores / count

    print(count)
    mean_sore = "%s MSE %0.2f | PSNR %0.2f | SSIM %0.4f |fMSE %0.2f | fPSNR %0.2f | fSSIM %0.4f" % (
        opt.dataset_name, mse_scores_mu, psnr_scores_mu, ssim_scores_mu,
        fmse_scores_mu, fpsnr_scores_mu, fssim_score_mu)
    print(mean_sore)

    return mse_scores_mu, fmse_scores_mu, psnr_scores_mu, fpsnr_scores_mu