Exemple #1
0
    def detect_moon(self):
        x, y = tuple(int(v) for v in self.moon_loc)
        win = self.image[y - 24:y + 25, x - 29:x + 30, :]

        if 0:
            # TODO: what transformation would make this work?
            win = ImageProc.adjust_gamma(win / 662 * 1023,
                                         2.2,
                                         inverse=1,
                                         max_val=1023)

        h, w, s, c = *win.shape[0:2], 19, 18
        mask = np.zeros((h, w), dtype='uint8')
        mask[(h // 2 - s):(h // 2 + s + 1),
             (w // 2 - s):(w // 2 + s +
                           1)] = ImageProc.bsphkern(s * 2 + 1).astype('uint8')
        mask[0:c, :] = 0

        if SHOW_MEASURES:
            mask_img = (mask.reshape(
                (h, w, 1)) * np.array([255, 0, 0]).reshape(
                    (1, 1, 3))).astype('uint8')
            win_img = np.clip(win, 0, 255).astype('uint8')
            plt.imshow(np.flip(ImageProc.merge((mask_img, win_img)), axis=2))
            plt.show()

        mask = mask.flatten().astype('bool')
        n = np.sum(mask)
        measures = []
        for i, cam in enumerate(self.cam):
            raw_du = np.sum(win[:, :, i].flatten()[mask])
            bg_du = np.mean(win[:, :, i].flatten()[np.logical_not(mask)])
            du_count = raw_du - bg_du * n
            measures.append(MoonMeasure(self, i, du_count))
        self.measures = measures
        return measures
Exemple #2
0
    def detect_source(self, kernel, total_radiation=False):
        assert kernel.shape[0] % 2 and kernel.shape[
            1] % 2, 'kernel width and height must be odd numbers'
        kernel = self.gain * self.exposure * kernel
        fkernel = ImageProc.fuzzy_kernel(kernel, self.impulse_spread)
        method = [cv2.TM_CCORR_NORMED, cv2.TM_SQDIFF][0]
        corr = cv2.matchTemplate(self.image.astype(np.float32),
                                 fkernel.astype(np.float32), method)
        _, _, minloc, maxloc = cv2.minMaxLoc(
            corr)  # minval, maxval, minloc, maxloc
        loc = minloc if method in (cv2.TM_SQDIFF,
                                   cv2.TM_SQDIFF_NORMED) else maxloc
        loc_i = tuple(np.round(np.array(loc)).astype(np.int))

        if SHOW_LAB_MEASURES:
            sc = 1024 / self.image.shape[1]
            img = cv2.resize(self.image.astype(np.float), None, fx=sc,
                             fy=sc) / np.max(self.image)
            center = np.array(loc) + np.flip(fkernel.shape[:2]) / 2
            img = cv2.circle(
                img, tuple(np.round(center * sc).astype(np.int)),
                round((kernel.shape[0] - self.impulse_spread) / 2 * sc),
                [0, 0, 1.0])
            cv2.imshow('te', img)
            print('waiting...', end='', flush=True)
            cv2.waitKey()
            print('done')

        if True:
            # use result as a mask to calculate mean and variance
            kh, kw = np.array(fkernel.shape[:2]) // 2
            win = self.image[loc_i[1]:loc_i[1] + kh * 2 + 1,
                             loc_i[0]:loc_i[0] + kw * 2 + 1, :].reshape(
                                 (-1, 3))
            kernel_max = np.max(np.sum(kernel, axis=2))
            mask = np.sum(fkernel, axis=2) / kernel_max > 0.95
            mean = np.median(win[mask.flatten(), :], axis=0)
            std = np.std(win[mask.flatten(), :], axis=0)
            n = np.sum(mask)

            tmp = np.zeros(self.image.shape[:2])
            tmp[loc_i[1]:loc_i[1] + kh * 2 + 1,
                loc_i[0]:loc_i[0] + kw * 2 + 1] = mask
            img_m = tmp

        else:
            # calculate a correlation channel (over whole image)
            k = kernel.shape[0] // 2
            corr = cv2.matchTemplate(
                self.image.astype(np.float32),
                kernel[k:k + 1, k:k + 1, :].astype(np.float32), method)

            # calculate mean & variance of kernel area using corr channel
            win = corr[loc_i[1] - k:loc_i[1] + k + 1,
                       loc_i[0] - k:loc_i[0] + k + 1]
            corr_mean = np.mean(win)
            corr_std = np.std(win)

            # threshold using mean - sd
            _, mask = cv2.threshold(corr, corr_mean - corr_std, 1,
                                    cv2.THRESH_BINARY)

            # dilate & erode to remove inner spots
            krn1 = ImageProc.bsphkern(round(1.5 * corr.shape[0] / 512) * 2 + 1)
            krn2 = ImageProc.bsphkern(round(2 * corr.shape[0] / 512) * 2 + 1)
            mask = cv2.dilate(mask, krn1, iterations=1)  # remove holes
            mask = cv2.erode(mask, krn2, iterations=1)  # same size
            mask = mask.astype(np.bool)

            # use result as a mask to calculate mean and variance
            mean = np.mean(self.image.reshape((-1, 3))[mask.flatten()], axis=0)
            var = np.var(self.image.reshape((-1, 3))[mask.flatten()], axis=0)
            n = np.sum(mask)

        if self.debug:
            sc = 1024 / self.image.shape[1]
            img_m = np.repeat(np.atleast_3d(img_m.astype(np.uint8) * 127),
                              3,
                              axis=2)
            merged = ImageProc.merge(
                (self.image.astype(np.float32) / np.max(self.image),
                 img_m.astype(np.float32) / 255))
            img = cv2.resize(merged, None, fx=sc, fy=sc)
            cv2.imshow('te', img)
            arr_n, lims, _ = plt.hist(self.image[:, :, 1].flatten(),
                                      bins=np.max(self.image) + 1,
                                      log=True,
                                      histtype='step')
            plt.hist(win[mask.flatten(), 1].flatten(),
                     bins=np.max(self.image) + 1,
                     log=True,
                     histtype='step')
            x = np.linspace(0, np.max(win), np.max(win) + 1)
            i = list(np.logical_and(lims[1:] > mean[1], arr_n > 0)).index(True)
            plt.plot(x, arr_n[i] * np.exp(-((x - mean[1]) / std[1])**2))
            plt.ylim(1e-1, 1e6)
            plt.figure()
            plt.imshow(self.image[loc_i[1]:loc_i[1] + kh * 2 + 1,
                                  loc_i[0]:loc_i[0] + kw * 2 + 1, :] /
                       np.max(self.image))
            print('waiting (%.1f, %.1f)...' % (mean[1], std[1]),
                  end='',
                  flush=True)
            cv2.waitKey(1)
            plt.show()
            print('done')

        return LabMeasure(self, mean, std, n)
Exemple #3
0
    writer = cv2.VideoWriter(target_file, cv2.VideoWriter_fourcc(*codecs[0]),
                             framerate, (dw, dh))
    imgs = []
    times = []
    try:
        for i, f in enumerate(img_files):
            if i % skip_mult == 0:
                tools.show_progress(
                    len(img_files) // skip_mult, i // skip_mult)
                img = cv2.imread(os.path.join(folder, f), cv2.IMREAD_COLOR)
                if sw != dw or sh != dh:
                    img = cv2.resize(img, (dw, dh),
                                     interpolation=cv2.INTER_AREA)
                if exposure:
                    # blend images to simulate blur due to long exposure times
                    timestr = f[0:17]
                    time = datetime.datetime.strptime(timestr,
                                                      '%Y-%m-%dT%H%M%S')
                    imgs.append(img)
                    times.append(time)
                    idxs = np.where(
                        np.array(times) > time -
                        datetime.timedelta(seconds=exposure))
                    if len(idxs) < np.ceil(exposure):
                        continue
                    img = ImageProc.merge(np.array(imgs)[idxs])

                writer.write(img)
    finally:
        writer.release()
Exemple #4
0
def texture_noise(model, support=None, L=None, noise_sd=SHAPE_MODEL_NOISE_LV['lo'],
                  len_sc=SHAPE_MODEL_NOISE_LEN_SC, max_rng=None, max_n=1e4, hf_noise=True):
    tex = model.load_texture()
    if tex is None:
        print('tools.texture_noise: no texture loaded')
        return [None] * 3

    r = np.sqrt(max_n / np.prod(tex.shape[:2]))
    ny, nx = (np.array(tex.shape[:2]) * r).astype(np.int)
    n = nx * ny
    tx_grid_xx, tx_grid_yy = np.meshgrid(np.linspace(0, 1, nx), np.linspace(0, 1, ny))
    tx_grid = np.hstack((tx_grid_xx.reshape((-1, 1)), tx_grid_yy.reshape((-1, 1))))

    support = support if support else model
    points = np.array(support.vertices)
    max_rng = np.max(np.ptp(points, axis=0)) if max_rng is None else max_rng

    # use vertices for distances, find corresponding vertex for each pixel
    y_cov = None
    if L is None:
        try:
            from sklearn.gaussian_process.kernels import Matern, WhiteKernel
        except:
            print('Requires scikit-learn, install using "conda install scikit-learn"')
            sys.exit()

        kernel = 1.0 * noise_sd * Matern(length_scale=len_sc * max_rng, nu=1.5) \
                 + 0.5 * noise_sd * Matern(length_scale=0.1 * len_sc * max_rng, nu=1.5) \
                 + WhiteKernel(
            noise_level=1e-5 * noise_sd * max_rng)  # white noise for positive definite covariance matrix only

        # texture coordinates given so that x points left and *Y POINTS UP*
        tex_img_coords = np.array(support.texcoords)
        tex_img_coords[:, 1] = 1 - tex_img_coords[:, 1]
        _, idxs = find_nearest_each(haystack=tex_img_coords, needles=tx_grid)
        tx2vx = support.texture_to_vertex_map()
        y_cov = kernel(points[tx2vx[idxs], :] - np.mean(points, axis=0))

        if 0:
            # for debugging distances
            import matplotlib.pyplot as plt
            import cv2
            from visnav.algo.image import ImageProc

            orig_tx = cv2.imread(os.path.join(DATA_DIR, '67p+tex.png'), cv2.IMREAD_GRAYSCALE)
            gx, gy = np.gradient(points[tx2vx[idxs], :].reshape((ny, nx, 3)), axis=(1, 0))
            gxy = np.linalg.norm(gx, axis=2) + np.linalg.norm(gy, axis=2)
            gxy = (gxy - np.min(gxy)) / (np.max(gxy) - np.min(gxy))
            grad_img = cv2.resize((gxy * 255).astype('uint8'), orig_tx.shape)
            overlaid = ImageProc.merge((orig_tx, grad_img))

            plt.figure(1)
            plt.imshow(overlaid)
            plt.show()

    # sample gp
    e0, L = mv_normal(np.zeros(n), cov=y_cov, L=L)
    e0 = e0.reshape((ny, nx))

    # interpolate for final texture
    x = np.linspace(np.min(tx_grid_xx), np.max(tx_grid_xx), tex.shape[1])
    y = np.linspace(np.min(tx_grid_yy), np.max(tx_grid_yy), tex.shape[0])
    interp0 = RectBivariateSpline(tx_grid_xx[0, :], tx_grid_yy[:, 0], e0, kx=1, ky=1)
    err0 = interp0(x, y)

    if 0:
        import matplotlib.pyplot as plt
        import cv2
        from visnav.algo.image import ImageProc
        orig_tx = cv2.imread(os.path.join(DATA_DIR, '67p+tex.png'), cv2.IMREAD_GRAYSCALE)
        err_ = err0 if 1 else e0
        eimg = (err_ - np.min(err_)) / (np.max(err_) - np.min(err_))
        eimg = cv2.resize((eimg * 255).astype('uint8'), orig_tx.shape)
        overlaid = ImageProc.merge((orig_tx, eimg))
        plt.figure(1)
        plt.imshow(overlaid)
        plt.show()

    err1 = 0
    if hf_noise:
        e1, L = mv_normal(np.zeros(n), L=L)
        e1 = e1.reshape((ny, nx))
        interp1 = RectBivariateSpline(tx_grid_xx[0, :], tx_grid_yy[:, 0], e1, kx=1, ky=1)
        err_coef = interp1(x, y)
        lo, hi = np.min(err_coef), np.max(err_coef)
        err_coef = (err_coef - lo) / (hi - lo)

        len_sc = 10
        err1 = generate_field_fft(tex.shape, (6 * noise_sd, 4 * noise_sd),
                                  (len_sc / 1000, len_sc / 4500)) if hf_noise else 0
        err1 *= err_coef

    noisy_tex = tex + err0 + err1

    min_v, max_v = np.quantile(noisy_tex, (0.0001, 0.9999))
    min_v = min(0, min_v)
    noisy_tex = (np.clip(noisy_tex, min_v, max_v) - min_v) / (max_v - min_v)

    if 0:
        import matplotlib.pyplot as plt
        plt.figure(1)
        plt.imshow(noisy_tex)
        plt.figure(2)
        plt.imshow(err0)
        plt.figure(3)
        plt.imshow(err1)
        plt.show()

    return noisy_tex, np.std(err0 + err1), L