Esempio n. 1
0
def _sampleDistribution(params, numSamples, verbosity=0):
    """
  Given the parameters of a distribution, generate numSamples points from it.
  This routine is mostly for testing.

  :returns: A numpy array of samples.
  """
    if params.has_key("name"):
        if params["name"] == "normal":
            samples = numpy.random.normal(loc=params["mean"],
                                          scale=math.sqrt(params["variance"]),
                                          size=numSamples)
        elif params["name"] == "pareto":
            samples = numpy.random.pareto(params["alpha"], size=numSamples)
        elif params["name"] == "beta":
            samples = numpy.random.beta(a=params["alpha"],
                                        b=params["beta"],
                                        size=numSamples)
        else:
            raise ValueError("Undefined distribution: " + params["name"])
    else:
        raise ValueError("Bad distribution params: " + str(params))

    if verbosity > 0:
        print "\nSampling from distribution:", params
        print "After estimation, mean=", cupy.mean(samples), \
              "var=", cupy.var(samples), "stdev=", math.sqrt(cupy.var(samples))
    return samples
Esempio n. 2
0
def _sampleDistribution(params, numSamples, verbosity=0):
  """
  Given the parameters of a distribution, generate numSamples points from it.
  This routine is mostly for testing.

  :returns: A numpy array of samples.
  """
  if params.has_key("name"):
    if params["name"] == "normal":
      samples = numpy.random.normal(loc=params["mean"],
                                    scale=math.sqrt(params["variance"]),
                                    size=numSamples)
    elif params["name"] == "pareto":
      samples = numpy.random.pareto(params["alpha"], size=numSamples)
    elif params["name"] == "beta":
      samples = numpy.random.beta(a=params["alpha"], b=params["beta"],
                                  size=numSamples)
    else:
      raise ValueError("Undefined distribution: " + params["name"])
  else:
    raise ValueError("Bad distribution params: " + str(params))

  if verbosity > 0:
    print "\nSampling from distribution:", params
    print "After estimation, mean=", cupy.mean(samples), \
          "var=", cupy.var(samples), "stdev=", math.sqrt(cupy.var(samples))
  return samples
Esempio n. 3
0
 def _t_test(self, with_sample, without_sample):
     """
     compute one-tailed two-sample t-test with a test statistics according to
         t_j: \frac{\mu_j - \bar{\mu_j}}{\sqrt{\frac{s^2_j}{\norm{I_j}} +
         \frac{\bar{s^2_j}}{\norm{\bar{I_j}}}}}
     """
     return (cp.mean(with_sample) - cp.mean(without_sample)) /\
         cp.sqrt(cp.var(with_sample)**2 / len(with_sample) + cp.var(without_sample)**2 / len(without_sample))
    def save_mean_and_variance(self):
        subset_size = self.num_observations_per_file
        new_total_size = self.total_images + subset_size
        co1 = self.total_images / new_total_size
        co2 = subset_size / new_total_size

        images = cp.asarray(self.images)

        subset_mean = cp.mean(images, axis=(0, 1))
        subset_var = cp.var(images, axis=(0, 1))

        new_dataset_mean = subset_mean if self.dataset_mean is None else co1 * self.dataset_mean + co2 * subset_mean
        new_dataset_var = subset_var if self.dataset_var is None else co1 * (
            self.dataset_var + self.dataset_mean**2) + co2 * (
                subset_var + subset_mean**2) - new_dataset_mean**2

        # avoid negative value
        new_dataset_var[new_dataset_var < 0] = 0

        self.dataset_var = new_dataset_var
        self.dataset_mean = new_dataset_mean
        self.dataset_std = cp.sqrt(self.dataset_var)

        cp.save(os.path.join(self.path, "mean.npy"), self.dataset_mean)
        cp.save(os.path.join(self.path, "std.npy"), self.dataset_std)
Esempio n. 5
0
    def forward(self, is_training=True):
        inputs = self.input_tensor
        # self.input_shape =inputs.shape
        gamma, beta = self.variables
        N, C, H, W = inputs.shape
        self.shape_field = tuple([i for i in range(2, inputs.ndim)])

        x_group = cp.reshape(inputs, (N, self.G, C // self.G, H, W))
        mean = cp.mean(x_group, axis=self.shape_field, keepdims=True)
        var = cp.var(x_group, axis=self.shape_field, keepdims=True)
        xgmu = x_group - mean
        sqrtvar = cp.sqrt(var + self.epsilon)
        x_group_norm = xgmu / sqrtvar

        x_norm = cp.reshape(x_group_norm, (N, C, H, W))

        outputs = gamma.output_tensor * x_norm + beta.output_tensor

        self.cache = (xgmu, sqrtvar, x_norm)

        self.output_tensor = outputs

        if self.require_grads:
            self.grads = cp.zeros_like(self.output_tensor)
        super().forward(is_training)
Esempio n. 6
0
    def forward(self,is_training=True):
        inputs=self.input_tensor
        gamma,beta=self.variables
        outputs = cp.zeros_like(inputs)
        if is_training:
            self.cache = []
            for k in range(inputs.shape[self.axis]):
            #calc mean
                mean=cp.mean(inputs[:,k])
                #calc var
                var=cp.var(inputs[:,k])
                #x minus u
                xmu=inputs[:,k]-mean
                sqrtvar=cp.sqrt(var+self.epsilon)
                normalized_x=xmu/sqrtvar
                outputs[:,k]=gamma.output_tensor[k]*normalized_x+beta.output_tensor[k]
                self.cache.append((xmu,sqrtvar,normalized_x))
                self.moving_mean[k]=self.momentum*self.moving_mean[k]+(1-self.momentum)*mean
                self.moving_variance[k] = self.momentum * self.moving_variance[k] + (1 - self.momentum) * var

        else:
            for k in range(inputs.shape[self.axis]):
                std=cp.sqrt(self.moving_variance[k]+self.epsilon)
                outputs[:,k]=(gamma.output_tensor[k]/std)*inputs[:,k]+(beta.output_tensor[k]-gamma.output_tensor[k]*self.moving_mean[k]/std)


        self.output_tensor=outputs
        if self.require_grads:
            self.grads = cp.zeros_like(self.output_tensor)
        super().forward(is_training)
Esempio n. 7
0
def var(inp) -> 'Tensor':
    _check_tensors(inp)
    engine = _get_engine(inp)

    return _create_tensor(
        inp,
        data=engine.var(inp.data),
        func=wrapped_partial(var_backward, inp=inp)
    )
Esempio n. 8
0
def boxcox_llf(lmb, data):
    """The boxcox log-likelihood function.

    Parameters
    ----------
    lmb : scalar
        Parameter for Box-Cox transformation
    data : array-like
        Data to calculate Box-Cox log-likelihood for. If
        `data` is multi-dimensional, the log-likelihood
        is calculated along the first axis

    Returns
    -------
    llf : float or cupy.ndarray
        Box-Cox log-likelihood of `data` given `lmb`. A float
        for 1-D `data`, an array otherwise

    See Also
    --------
    scipy.stats.boxcox_llf

    """

    if data.ndim == 1 and data.dtype == cupy.float16:
        data = data.astype(cupy.float64)
    if data.ndim == 1 and data.dtype == cupy.float32:
        data = data.astype(cupy.float64)
    if data.ndim == 1 and data.dtype == cupy.complex64:
        data = data.astype(cupy.complex128)

    N = data.shape[0]
    if N == 0:
        return cupy.array(cupy.nan)

    logdata = cupy.log(data)

    # Compute the variance of the transformed data
    if lmb == 0:
        variance = cupy.var(logdata, axis=0)
    else:
        variance = cupy.var(data**lmb / lmb, axis=0)

    return (lmb - 1) * cupy.sum(logdata, axis=0) - N / 2 * cupy.log(variance)
Esempio n. 9
0
    def forward(self, x):
        gamma = self.load('gamma')
        beta = self.load('beta')

        mu = cp.mean(x, axis=0)
        var = cp.var(x, axis=0)
        x_norm = (x - mu) / cp.sqrt(var + EPS)
        out = gamma * x_norm + beta

        self.save('input', x)
        self.save('mu', mu)
        self.save('var', var)
        return out
Esempio n. 10
0
    def forward(self, is_training=True):
        inputs = self.input_tensor
        self.input_shape = inputs.shape
        if self.input_tensor.ndim == 4:
            N, C, H, W = self.input_shape
            inputs = inputs.transpose(0, 3, 2, 1).reshape(N * H * W, C)

        gamma, beta = self.variables
        if is_training:

            #calc mean
            mean = cp.mean(inputs, axis=0)
            #calc var
            var = cp.var(inputs, axis=0)
            #x minus u
            xmu = inputs - mean
            sqrtvar = cp.sqrt(var + self.epsilon)
            normalized_x = xmu / sqrtvar
            outputs = gamma.output_tensor * normalized_x + beta.output_tensor
            self.cache = (xmu, sqrtvar, normalized_x)
            self.moving_mean = self.momentum * self.moving_mean + (
                1 - self.momentum) * mean
            self.moving_variance = self.momentum * self.moving_variance + (
                1 - self.momentum) * var

        else:
            scale = gamma.output_tensor / (cp.sqrt(self.moving_variance +
                                                   self.epsilon))
            outputs = inputs * scale + (beta.output_tensor -
                                        self.moving_mean * scale)
        if self.input_tensor.ndim == 4:
            N, C, H, W = self.input_shape
            outputs = outputs.reshape(N, W, H, C).transpose(0, 3, 2, 1)
        self.output_tensor = outputs

        if self.require_grads:
            self.grads = cp.zeros_like(self.output_tensor)
        super().forward(is_training)
Esempio n. 11
0
def mdist(A, B):
    ret = cp.var(A - B) / (cp.var(A) + cp.var(B))
    return 0.5 * ret
def chambolleProjectionGPU(f, f_ref, mi=100, tau=0.25, tol=1e-5):
    '''
    The 2D case of Chambolle projection algorithm. This version uses reference image.

    Source
    -------
    Cywińska, Maria, Maciej Trusiak, and Krzysztof Patorski. 
    "Automatized fringe pattern preprocessing using unsupervised variational image decomposition." Optics express 27.16 (2019): 22542-22562.

    Parameters
    ----------
    f : cupy.ndarray
        image which is input for Chambolle
    f_ref : cupy.ndarray
        image og input but perfectly without background function
    mi : float
        regularization parameter that defines the separation of the energy between the fringes and noise components
    tau : float
        Chambolle projection step value
    tol : float
        error tolerance when algorithm should stop its work

    Returns
    -------
    x_best : numpy.ndarray
        image with filtered background function
    it_min : int
        number of iterations that was needed to reach result image
    rms_min : float
        error of the result image
    '''
    n = 1
    xi = cp.array([cp.zeros(f.shape), cp.zeros(f.shape)])
    x1 = cp.zeros(f.shape)
    x2 = cp.zeros(f.shape)
    x_best = cp.zeros(f.shape)

    rms_min_A = []
    rms_min = 1.0
    it_min = 0

    for _ in iter(int, 1):

        gdv = cp.array(gradient2DGPU(divergence2DGPU(xi) - f / mi))
        d = cp.sqrt(cp.power(gdv[0], 2) + cp.power(gdv[1], 2))
        d = cp.tile(d, [2, 1, 1])
        xi = cp.divide(xi + tau * gdv, 1 + tau * d)

        x2 = mi * divergence2DGPU(xi)

        diff = x2 - f_ref
        rms_n = cp.sqrt(cp.var(diff.flatten()))

        if len(rms_min_A) < 100:
            rms_min_A.append(rms_min)
        else:
            rms_min_A.pop(0)
            rms_min_A.append(rms_min)

        if rms_n < rms_min:
            rms_diff = rms_min_A[0] - rms_min_A[-1]
            rms_local_diff = rms_min - rms_n

            if (rms_diff < 10 * tol):
                if (rms_local_diff < tol):
                    rms_min = rms_n
                    it_min = n
                    break

            rms_min = rms_n
            it_min = n

        x1 = x2
        n = n + 1

        if n - it_min >= 100:
            break

        pass

    x_best = x2

    return [x_best, it_min, rms_min]
Esempio n. 13
0
    /,
    *,
    axis: Optional[Union[int, Tuple[int, ...]]] = None,
    dtype: Optional[Dtype] = None,
    keepdims: bool = False,
) -> Array:
    if x.dtype not in _numeric_dtypes:
        raise TypeError("Only numeric dtypes are allowed in sum")
    # Note: sum() and prod() always upcast integers to (u)int64 and float32 to
    # float64 for dtype=None. `np.sum` does that too for integers, but not for
    # float32, so we need to special-case it here
    if dtype is None and x.dtype == float32:
        dtype = float64
    return Array._new(
        np.sum(x._array, axis=axis, dtype=dtype, keepdims=keepdims))


def var(
    x: Array,
    /,
    *,
    axis: Optional[Union[int, Tuple[int, ...]]] = None,
    correction: Union[int, float] = 0.0,
    keepdims: bool = False,
) -> Array:
    # Note: the keyword argument correction is different here
    if x.dtype not in _floating_dtypes:
        raise TypeError("Only floating-point dtypes are allowed in var")
    return Array._new(
        np.var(x._array, axis=axis, ddof=correction, keepdims=keepdims))
Esempio n. 14
0
def var(x):
    return cp.var(x, ddof=1) if x.size > 0 else cp.nan
Esempio n. 15
0
    def render(representation,
               camera_distance,
               start_t,
               end_t,
               gt_images,
               gt_viewpoints,
               animation_frame_array,
               rotate_camera=True):

        gt_images = np.squeeze(gt_images)
        gt_viewpoints = cp.reshape(cp.asarray(gt_viewpoints), (15, 1, 7))
        idx = cp.argsort(cp.squeeze(gt_viewpoints)[:, 0])

        gt_images = [
            i
            for i, v in sorted(zip(gt_images, idx), key=operator.itemgetter(1))
        ]
        gt_viewpoints = [
            i for i, v in sorted(zip(gt_viewpoints, idx),
                                 key=operator.itemgetter(1))
        ]
        count = 0
        '''shows variance and mean images of 100 samples from the Gaussian.'''
        for t in range(start_t, end_t):
            artist_array = [
                axis_observations.imshow(make_uint8(axis_observations_image),
                                         interpolation="none",
                                         animated=True)
            ]

            horizontal_angle_rad, vertical_angle_rad = compute_camera_angle_at_frame(
                t)

            if rotate_camera == False:
                horizontal_angle_rad, vertical_angle_rad = compute_camera_angle_at_frame(
                    0)
            query_viewpoints = rotate_query_viewpoint(horizontal_angle_rad,
                                                      vertical_angle_rad,
                                                      camera_distance)

            # shape 100x1x3x64x64, when Model is from model_testing.py
            generated_images = model.generate_image(query_viewpoints,
                                                    representation, 100)

            # generate predicted from ground truth viewpoints
            predicted_images = model.generate_image(gt_viewpoints[count],
                                                    representation, 1)

            # predicted_images = model.generate_image(query_viewpoints, representation,1)
            predicted_images = np.squeeze(predicted_images)
            image_mse = get_mse_image(gt_images[count], predicted_images)

            # when sampling with 100
            cpu_generated_images = chainer.backends.cuda.to_cpu(
                generated_images)
            generated_images = np.squeeze(cpu_generated_images)

            # # cpu calculation
            # cpu_image_mean = np.mean(cpu_generated_images,axis=0)
            # cpu_image_std = np.std(cpu_generated_images,axis=0)
            # cpu_image_var = np.var(cpu_generated_images,axis=0)
            # image_mean = np.squeeze(chainer.backends.cuda.to_gpu(cpu_image_mean))
            # image_std = chainer.backends.cuda.to_gpu(cpu_image_std)
            # image_var = np.squeeze(chainer.backends.cuda.to_gpu(cpu_image_var))

            image_mean = cp.mean(cp.squeeze(generated_images), axis=0)
            image_var = cp.var(cp.squeeze(generated_images), axis=0)

            # convert to black and white.
            # grayscale
            r, g, b = image_var
            gray_image_var = 0.2989 * r + 0.5870 * g + 0.1140 * b
            # thresholding Otsu's method
            thresh = threshold_otsu(gray_image_var)
            var_binary = gray_image_var > thresh

            sample_image = np.squeeze(generated_images[0])

            if count == 14:
                count = 0
            elif (t - fps) % 10 == 0:
                count += 1

            print("computed an image. Count =", count)

            artist_array.append(
                axis_generation_variance.imshow(var_binary,
                                                cmap=plt.cm.gray,
                                                interpolation="none",
                                                animated=True))
            artist_array.append(
                axis_generation_mean.imshow(make_uint8(image_mean),
                                            interpolation="none",
                                            animated=True))
            artist_array.append(
                axis_generation_sample.imshow(make_uint8(sample_image),
                                              interpolation="none",
                                              animated=True))
            artist_array.append(
                axis_generation_mse.imshow(make_uint8(image_mse),
                                           cmap='gray',
                                           interpolation="none",
                                           animated=True))

            animation_frame_array.append(artist_array)
def eegstats(signals, samples, statistic):

    import cupy as cp
    from scipy.stats import skew, kurtosis

    if statistic == 'mean':
        means = cp.zeros(samples)
        for i in range(len(signals)):
            means[i] = cp.mean(signals[i])
        return means

    elif statistic == 'std':
        std = cp.zeros(samples)
        for i in range(len(signals)):
            std[i] = cp.std(signals[i])
        return std

    elif statistic == 'skewness':
        skewness = cp.zeros(samples)
        for i in range(len(signals)):
            skewness[i] = skew(signals[i])
        return skewness

    elif statistic == 'kurtosis':
        kurt = cp.zeros(samples)
        for i in range(len(signals)):
            kurt[i] = kurtosis(signals[i])
        return kurt

    elif statistic == 'maximum':
        maxim = cp.zeros(samples)
        for i in range(len(signals)):
            maxim[i] = cp.amax(signals[i])
        return maxim

    elif statistic == 'minimum':
        minim = cp.zeros(samples)
        for i in range(len(signals)):
            minim[i] = cp.amin(signals[i])
        return minim
    ########
    elif statistic == 'n5':
        n5 = cp.zeros(samples)
        for i in range(len(signals)):
            n5[i] = cp.percentile(cp.asarray(signals[i]), 5)
        return n5

    elif statistic == 'n25':
        n25 = cp.zeros(samples)
        for i in range(len(signals)):
            n25[i] = cp.percentile(cp.asarray(signals[i]), 25)
        return n25

    elif statistic == 'n75':
        n75 = cp.zeros(samples)
        for i in range(len(signals)):
            n75[i] = cp.percentile(cp.asarray(signals[i]), 75)
        return n75

    elif statistic == 'n95':
        n95 = cp.zeros(samples)
        for i in range(len(signals)):
            n95[i] = cp.percentile(cp.asarray(signals[i]), 95)
        return n95

    elif statistic == 'median':
        median = cp.zeros(samples)
        for i in range(len(signals)):
            median[i] = cp.percentile(cp.asarray(signals[i]), 50)
        return median

    elif statistic == 'variance':
        variance = cp.zeros(samples)
        for i in range(len(signals)):
            variance[i] = cp.var(cp.asarray(signals[i]))
        return variance

    elif statistic == 'rms':
        rms = cp.zeros(samples)
        for i in range(len(signals)):
            rms[i] = cp.mean(cp.sqrt(cp.asarray(signals[i])**2))
        return rms
    def render(representation,
               camera_distance,
               obs_viewpoint,
               start_t,
               end_t,
               animation_frame_array,
               savename=None,
               rotate_camera=True):

        all_var_bg = []
        all_var = []
        all_var_z = []
        all_q_view = []

        all_c = []
        all_h = []
        all_u = []
        for t in range(start_t, end_t):
            artist_array = [
                axis_observations.imshow(make_uint8(axis_observations_image),
                                         interpolation="none",
                                         animated=True)
            ]

            # convert x,y into radians??
            # try reversing the camera direction calculation in rotate query viewpoint (impossible to reverse the linalg norm...)

            horizontal_angle_rad = np.arctan2(obs_viewpoint[0],
                                              obs_viewpoint[2])
            vertical_angle_rad = np.arcsin(obs_viewpoint[1] / camera_distance)

            # xz_diagonal = np.sqrt(np.square(obs_viewpoint[0])+np.square(obs_viewpoint[2]))

            # vertical_angle_rad = np.arctan2(obs_viewpoint[1],xz_diagonal)
            # vertical_angle_rad = np.arcsin(obs_viewpoint[1]/camera_distance)

            # horizontal_angle_rad, vertical_angle_rad = 0,0
            # ipdb.set_trace()
            horizontal_angle_rad, vertical_angle_rad = compute_vertical_rotation_at_frame(
                horizontal_angle_rad, vertical_angle_rad, t)
            if rotate_camera == False:
                horizontal_angle_rad, vertical_angle_rad = compute_camera_angle_at_frame(
                    0)

            query_viewpoints = rotate_query_viewpoint(horizontal_angle_rad,
                                                      vertical_angle_rad,
                                                      camera_distance)

            # obtain generated images, as well as mean and variance before gaussian
            generated_images, var_bg, latent_z, ct = model.generate_multi_image(
                query_viewpoints, representation, 100)
            logging.info("retrieved variables, time elapsed: " +
                         str(time.time() - start_time))

            # cpu_generated_images = chainer.backends.cuda.to_cpu(generated_images)
            generated_images = np.squeeze(generated_images)

            latent_z = np.squeeze(latent_z)
            # ipdb.set_trace()
            ct = np.squeeze(ct)

            # ht = np.squeeze(np.asarray(ht))
            # ut = np.squeeze(np.asarray(ut))

            # obtain data from Chainer Variable and obtain mean
            var_bg = cp.mean(var_bg, axis=0)
            logging.info("variance of bg, time elapsed: " +
                         str(time.time() - start_time))
            var_z = cp.var(latent_z, axis=0)
            logging.info("variance of z, time elapsed: " +
                         str(time.time() - start_time))
            # ipdb.set_trace()
            # print(ct.shape())
            var_c = cp.var(ct, axis=0)

            logging.info("variance of c, time elapsed: " +
                         str(time.time() - start_time))
            # var_h = cp.var(ht,axis=0)
            # var_u = cp.var(ut,axis=0)

            # write viewpoint and image variance to file
            gen_img_var = np.var(generated_images, axis=0)
            logging.info("calculated variance of gen images, time elapsed: " +
                         str(time.time() - start_time))

            all_var_bg.append((var_bg)[None])
            all_var.append((gen_img_var)[None])
            all_var_z.append((var_z)[None])
            all_q_view.append(
                chainer.backends.cuda.to_cpu(horizontal_angle_rad)[None] *
                180 / math.pi)

            all_c.append((var_c)[None])
            logging.info("appending, time elapsed: " +
                         str(time.time() - start_time))
            # all_h.append(chainer.backends.cuda.to_cpu(var_h)[None])
            # all_u.append(chainer.backends.cuda.to_cpu(var_u)[None])

            # sample = generated_images[0]
            pred_mean = cp.mean(generated_images, axis=0)

            # artist_array.append(
            #     axis_generation.imshow(
            #         make_uint8(pred_mean),
            #         interpolation="none",
            #         animated=True))

            # animation_frame_array.append(artist_array)

        all_var_bg = np.concatenate(chainer.backends.cuda.to_cpu(all_var_bg),
                                    axis=0)
        all_var = np.concatenate(chainer.backends.cuda.to_cpu(all_var), axis=0)
        all_var_z = np.concatenate(chainer.backends.cuda.to_cpu(all_var_z),
                                   axis=0)

        all_c = np.concatenate(chainer.backends.cuda.to_cpu(all_c), axis=0)
        # all_h = np.concatenate(all_h,axis=0)
        # all_u = np.concatenate(all_u,axis=0)
        logging.info("concatenating, time elapsed: " +
                     str(time.time() - start_time))

        with h5py.File(savename, "a") as f:
            f.create_dataset("variance_all_viewpoints", data=all_var)
            f.create_dataset("query_viewpoints",
                             data=np.squeeze(np.asarray(all_q_view)))
            f.create_dataset("variance_b4_gaussian", data=all_var_bg)
            f.create_dataset("variance_of_z", data=all_var_z)

            f.create_dataset("c", data=all_c)
            # f.create_dataset("h",data=all_h)
            # f.create_dataset("u",data=all_u)
        logging.info("saving, time elapsed: " + str(time.time() - start_time))
Esempio n. 18
0
    def render_wVar(representation,
                    camera_distance,
                    camera_position_y,
                    total_frames,
                    animation_frame_array,
                    no_of_samples,
                    rotate_camera=True,
                    wVariance=True):

        # highest_var = 0.0
        # with open("queries.txt",'w') as file_wviews, open("variance.txt",'w') as file_wvar:
        for t in range(0, total_frames):
            artist_array = [
                axis_observations.imshow(cv2.cvtColor(
                    make_uint8(axis_observations_image), cv2.COLOR_BGR2RGB),
                                         interpolation="none",
                                         animated=True)
            ]

            horizontal_angle_rad = compute_camera_angle_at_frame(t)
            if rotate_camera == False:
                horizontal_angle_rad = compute_camera_angle_at_frame(0)

            query_viewpoints = rotate_query_viewpoint(horizontal_angle_rad,
                                                      camera_distance,
                                                      camera_position_y)

            # q_x, q_y, q_z, _, _, _, _ = query_viewpoints[0]

            # file_wviews.writelines("".join(str(q_x))+", "+
            #                         "".join(str(q_y))+", "+
            #                         "".join(str(q_z))+"\n")

            generated_images = cp.squeeze(
                cp.array(
                    model.generate_images(query_viewpoints, representation,
                                          no_of_samples)))
            # ipdb.set_trace()
            var_image = cp.var(generated_images, axis=0)
            mean_image = cp.mean(generated_images, axis=0)
            mean_image = make_uint8(
                np.squeeze(chainer.backends.cuda.to_cpu(mean_image)))
            mean_image_rgb = cv2.cvtColor(mean_image, cv2.COLOR_BGR2RGB)

            var_image = chainer.backends.cuda.to_cpu(var_image)

            # grayscale
            r, g, b = var_image
            gray_var_image = 0.2989 * r + 0.5870 * g + 0.1140 * b
            # thresholding Otsu's method
            # thresh = threshold_otsu(gray_var_image)
            # var_binary = gray_var_image > thresh

            ## hill climb algorthm for searching highest variance
            # cur_var = np.mean(gray_var_image)
            # if cur_var>highest_var:
            #     highest_var = cur_var

            #     if wVariance==True:
            #         print('highest variance: '+str(highest_var)+', viewpoint: '+str(query_viewpoints[0]))
            #         highest_var_vp = query_viewpoints[0]
            #         file_wvar.writelines('highest variance: '+str(highest_var)+', viewpoint: '+str(highest_var_vp)+'\n')
            #     else:
            #         pass

            artist_array.append(
                axis_generation_var.imshow(gray_var_image,
                                           cmap=plt.cm.gray,
                                           interpolation="none",
                                           animated=True))

            artist_array.append(
                axis_generation_mean.imshow(mean_image_rgb,
                                            interpolation="none",
                                            animated=True))

            animation_frame_array.append(artist_array)
Esempio n. 19
0
def sum(
    x: Array,
    /,
    *,
    axis: Optional[Union[int, Tuple[int, ...]]] = None,
    dtype: Optional[Dtype] = None,
    keepdims: bool = False,
) -> Array:
    if x.dtype not in _numeric_dtypes:
        raise TypeError("Only numeric dtypes are allowed in sum")
    # Note: sum() and prod() always upcast integers to (u)int64 and float32 to
    # float64 for dtype=None. `np.sum` does that too for integers, but not for
    # float32, so we need to special-case it here
    if dtype is None and x.dtype == float32:
        dtype = float64
    return Array._new(np.sum(x._array, axis=axis, dtype=dtype, keepdims=keepdims))


def var(
    x: Array,
    /,
    *,
    axis: Optional[Union[int, Tuple[int, ...]]] = None,
    correction: Union[int, float] = 0.0,
    keepdims: bool = False,
) -> Array:
    # Note: the keyword argument correction is different here
    if x.dtype not in _floating_dtypes:
        raise TypeError("Only floating-point dtypes are allowed in var")
    return Array._new(np.var(x._array, axis=axis, ddof=correction, keepdims=keepdims))