def _update_hist(self, new_input):
     range_ext = cp.around(new_input.min() - self.bin_size / 2, 1), \
                 cp.around(new_input.max() + self.bin_size / 2, 1)
     bins_array = cp.arange(range_ext[0], range_ext[1] + self.bin_size,
                            self.bin_size)
     weights, bins = cp.histogram(new_input, bins_array)
     if self._empty:
         self.weights, self.bins = weights, bins[:-1]
         self._empty = False
     else:  #  update the hist
         self.weights, self.bins = concat_hists(self.weights, self.bins,
                                                weights, bins[:-1],
                                                self.bin_size, self._rd)
Exemplo n.º 2
0
def _fft_convolve(a1, a2, mode):

    if a1.size < a2.size:
        a1, a2 = a2, a1

    if a1.dtype.kind == 'c' or a2.dtype.kind == 'c':
        fft, ifft = cupy.fft.fft, cupy.fft.ifft
    else:
        fft, ifft = cupy.fft.rfft, cupy.fft.irfft

    dtype = cupy.result_type(a1, a2)
    n1, n2 = a1.size, a2.size
    out_size = n1 + n2 - 1
    fa1 = fft(a1, out_size)
    fa2 = fft(a2, out_size)
    out = ifft(fa1 * fa2, out_size)

    if mode == 'full':
        start, end = None, None
    elif mode == 'same':
        start = (n2 - 1) // 2
        end = start + n1
    elif mode == 'valid':
        start, end = n2 - 1, n1
    else:
        raise ValueError(
            'acceptable mode flags are `valid`, `same`, or `full`.')

    out = out[start:end]

    if dtype.kind in 'iu':
        out = cupy.around(out)

    return out.astype(dtype, copy=False)
Exemplo n.º 3
0
def initialize(m, n, d):
    W = cp.random.normal(size=(m, d))
    y = cp.random.normal(size=n)
    a = 2*cp.around(cp.random.uniform(size=m))-cp.ones(m)
    x_tmp = cp.random.uniform(size=(n, d))
    x = cp.divide(x_tmp, cp.linalg.norm(x_tmp))
    return x, W, a, y
Exemplo n.º 4
0
def eval_emo_lex(derived_emo_lex, emo_lex, trans, induct_emos_file, induct_emos_eval_file, emotion):
    print("Number of derived emotion ratings:", len(derived_emo_lex))
    derived_emos = []
    real_emos = []
    words = []
    trans = {word_src: tgt_words for word_src, tgt_words in trans}
    for word, emo in derived_emo_lex.items():
        translations = ",".join([t[0] for t in trans[word]])
        induct_emos_file.write(f"{word}\t{translations}\t{emotion}\t{emo}\n")
        real_emo = emo_lex.get(word, None)
        if real_emo:
            induct_emos_eval_file.write(f"{word}\t{translations}\t{emotion}\t{emo}\t{real_emo}\n")
            derived_emos.append(emo)
            real_emos.append(real_emo)
            words.append(word)
    
    print("Coverage in test set:", len(derived_emos) / len(derived_emo_lex))

    derived_emos = np.array(derived_emos, dtype=float)
    real_emos = np.array(real_emos, dtype=float)
    corr_coeff = np.corrcoef(derived_emos, real_emos, rowvar=False)
    top_words = np.argsort(-derived_emos)[:10]
    print(derived_emos[top_words])
    top_words = [words[int(idx)] for idx in top_words]
    print(top_words)
    corr_coeff = np.around(corr_coeff[0, 1], 3)
    print("Correlation:", corr_coeff)
    return [corr_coeff, len(derived_emo_lex), derived_emos.shape[0]]
Exemplo n.º 5
0
def FSITM(HDR, LDR, alpha=None):

    NumPixels = LDR.size

    if alpha is None:
        r = cp.floor(NumPixels / (2.**18))
        if r > 1.:
            alpha = 1. - (1. / r)
        else:
            alpha = 0.

    minNonzero = cp.min(HDR[HDR > 0])
    LogH = cp.log(cp.maximum(HDR, minNonzero))

    # float is needed for further calculation
    LogH = cp.around((LogH - LogH.min()) * 255. /
                     (LogH.max() - LogH.min())).astype(cp.float)

    if alpha > 0.:
        PhaseHDR_CH = phasecong100(HDR, 2, 2, 8, 8)
        PhaseLDR_CH8 = phasecong100(LDR, 2, 2, 8, 8)
    else:  # so, if image size is smaller than 512x512?
        PhaseHDR_CH = 0
        PhaseLDR_CH8 = 0

    PhaseLogH = phasecong100(LogH, 2, 2, 2, 2)
    PhaseH = alpha * PhaseHDR_CH + (1 - alpha) * PhaseLogH

    PhaseLDR_CH2 = phasecong100(LDR, 2, 2, 2, 2)
    PhaseL = alpha * PhaseLDR_CH8 + (1 - alpha) * PhaseLDR_CH2
    Q = cp.sum(
        cp.logical_or(cp.logical_and(PhaseL <= 0, PhaseH <= 0),
                      cp.logical_and(PhaseL > 0, PhaseH > 0))) / NumPixels
    return Q
Exemplo n.º 6
0
 def gpd(points, fpoints=fpoints, cell=cell, radii=radii):
     points = cp.asarray(points)
     
     diff = cp.expand_dims(points, axis=1)-cp.expand_dims(fpoints, axis=0)
     diff = (diff - cp.around(diff)).reshape(-1,3)
     diff = cp.dot(cell, diff.T).T
     diff = cp.linalg.norm(diff, axis=1).reshape(-1,fpoints.shape[0])-radii.T
     return cp.min(diff, axis=1)
Exemplo n.º 7
0
 def test_probe_support(self):
     """Finite probe support penalty function is within expected bounds."""
     penalty = tike.ptycho.probe.finite_probe_support(
         probe=cp.zeros((101, 101)),  # must be odd shaped for min to be 0
         radius=0.5 * 0.4,
         degree=1.0,  # must have degree >= 1 for upper bound to be p
         p=2.345,
     )
     try:
         import tifffile
         os.makedirs(resultdir, exist_ok=True)
         tifffile.imsave(os.path.join(resultdir, 'penalty.tiff'),
                         penalty.astype('float32').get())
     except ImportError:
         pass
     assert cp.around(cp.min(penalty), 3) == 0.000
     assert cp.around(cp.max(penalty), 3) == 2.345
Exemplo n.º 8
0
 def draw_beta(min_value, max_value, mean_value, n_values, round=False):
     """ draw `n_values` values between `min_value` and `max_value` having 
     `mean_value` as (asymptotical) average"""
     a, b = get_alpha_beta(min_value, max_value, mean_value)
     durations = cp.random.beta(
         a, b, n_values) * (max_value - min_value) + min_value
     if round:
         durations = cp.around(durations)
     return durations.reshape(-1, 1).astype(cp.float16)
Exemplo n.º 9
0
 def counts(y):
     _, y_indices = cp.unique(y, return_inverse=True)
     class_counts = cp.bincount(y_indices)
     total = cp.sum(class_counts)
     percent_counts = []
     for count in (class_counts):
         percent_counts.append(
             cp.around(float(count) / total.item(), decimals=2).item())
     return percent_counts
Exemplo n.º 10
0
def around(inp) -> 'Tensor':
    _check_tensors(inp)
    engine = _get_engine(inp)

    return _create_tensor(
        inp,
        data=engine.around(inp.data),
        func=wrapped_partial(around_backward, inp=inp)
    )
Exemplo n.º 11
0
 def regression(self, test):
     mean = np.zeros(test.shape[0])
     for x in range(test.shape[0]):  # for each test example
         mean[x] = np.sum(self.n_weights[x] * self.n_labels[x])
         if self.weighted:  # divide by sum of the weights
             mean[x] = mean[x] / np.sum(self.n_weights[x])
         else:  # divide by k
             mean[x] = mean[x] / self.k
         # print("Prediction for Test=" + str(x) + " is " + str(mean[x]))
     mean = np.around(mean, 0)  # round for picking a class
     return mean
Exemplo n.º 12
0
def zero_one(y, target, cuda=False):
    if cuda:
        res = (target != cp.around(y)).mean()
        cp.cuda.Stream.null.synchronize()
        return res
    else:
        y_one_hot = np.zeros(y.shape)
        i = 0
        for col in y:
            y_one_hot[i, np.argmax(col)] = 1
            i += 1
        return sklearn.metrics.zero_one_loss(np.around(y_one_hot), target)
Exemplo n.º 13
0
def test_silhouette_samples_batched(metric, chunk_divider, labeled_clusters):
    X, labels = labeled_clusters
    cuml_scores = cu_silhouette_samples(X, labels, metric=metric,
                                        chunksize=int(X.shape[0] /
                                                      chunk_divider))
    sk_scores = sk_silhouette_samples(X, labels, metric=metric)

    cu_trunc = cp.around(cuml_scores, decimals=3)
    sk_trunc = cp.around(sk_scores, decimals=3)

    diff = cp.absolute(cu_trunc - sk_trunc) > 0
    over_diff = cp.all(diff)

    # 0.5% elements allowed to be different
    if len(over_diff.shape) > 0:
        assert over_diff.shape[0] <= 0.005 * X.shape[0]

    # different elements should not differ more than 1e-1
    tolerance_diff = cp.absolute(cu_trunc[diff] - sk_trunc[diff]) > 1e-1
    diff_change = cp.all(tolerance_diff)
    if len(diff_change.shape) > 0:
        assert False
Exemplo n.º 14
0
 def __write_to_txtfileIDCT(self, y8x8, cb8x8, cr8x8):
     """writing the values 8x8 blocks of each IDCT-pic (y, cb, cr) to a textfile"""
     y8x8 = np.around(y8x8, 3)
     cb8x8 = np.around(cb8x8, 3)
     cr8x8 = np.around(cr8x8, 3)
     if args.cupy:
         y8x8 =  np.asnumpy(y8x8)
         cb8x8 = np.asnumpy(cb8x8)
         cr8x8 = np.asnumpy(cr8x8)
     with open(os.path.join(inputpath, args.output, "debug", infilename + "_IDCT_y.txt"), "w") as IDCT_y_txt:
         with open(os.path.join(inputpath, args.output, "debug", infilename + "_IDCT_cb.txt"), "w") as IDCT_cb_txt:
             with open(os.path.join(inputpath, args.output, "debug", infilename + "_IDCT_cr.txt"), "w") as IDCT_cr_txt:
                 for i, block in enumerate(y8x8):
                     IDCT_y_txt.write("Block: " + str(i+1) + "\n")
                     IDCT_cb_txt.write("Block: " + str(i+1) + "\n")
                     IDCT_cr_txt.write("Block: " + str(i+1) + "\n")
                     for j in range(64):
                         IDCT_y_txt.write(str(j+1) + ": " + str(y8x8[i][j]) + "; ")
                         IDCT_cb_txt.write(str(j+1) + ": " + str(cb8x8[i][j]) + "; ")
                         IDCT_cr_txt.write(str(j+1) + ": " + str(cr8x8[i][j]) + "; ")
                     IDCT_y_txt.write("\n")
                     IDCT_cb_txt.write("\n")
                     IDCT_cr_txt.write("\n")
Exemplo n.º 15
0
def quantize_real(x,
                  target_mean=0,
                  target_std=32 / (2 * xp.sqrt(2 * xp.log(2))),
                  num_bits=8,
                  data_mean=None,
                  data_std=None,
                  stats_calc_num_samples=10000):
    """
    Quantize real voltage data to integers with specified number of bits
    and target statistics. 

    Parameters
    ----------
    x : array
        Array of voltages
    target_mean : float, optional
        Target mean for voltages
    target_std : float, optional
        Target standard deviation for voltages
    num_bits : int, optional
        Number of bits to quantize to. Quantized voltages will span -2**(num_bits - 1) 
        to 2**(num_bits - 1) - 1, inclusive.
    data_mean : float, optional
        Mean of input voltages, if already known
    data_std : float, optional
        Standard deviation of input voltages, if already known. If None, estimates mean and
        standard deviation automatically.
    stats_calc_num_samples : int, optional
        Maximum number of samples for use in estimating noise statistics
        
    Returns
    -------
    q_voltages : array
        Array of quantized voltages
    """
    if data_std is None:
        data_mean, data_std = data_stream.estimate_stats(
            x, stats_calc_num_samples)

    if data_std == 0:
        factor = 0
    else:
        factor = target_std / data_std

    q_voltages = xp.around(factor * (x - data_mean) + target_mean)
    q_voltages = xp.clip(q_voltages, -2**(num_bits - 1), 2**(num_bits - 1) - 1)
    q_voltages = q_voltages.astype(int)

    return q_voltages
Exemplo n.º 16
0
def train_cv_split(x_array, y_array, cv_pct):
    '''
    Inputs - Dataset in the form of np array, percentage of dataset you would like to select as cross validation set \n
    There are two "dataset" arguments, data and data2. This is in case you have separate arrays for x and y. They will be sampled with the same indices.
    Outputs - Training set, cross validation set, output format is np array
    '''
    
    idx = cp.random.randint(x_array.shape[0], size= int(cp.around(x_array.shape[0]*cv_pct)) )
    
    mask = cp.ones(x_array.shape[0],dtype=bool) 
    mask[idx] = 0

    x_array_test = x_array[idx, :]
    x_array_train = x_array[mask, :]
    y_array_test = y_array[idx, :]
    y_array_train = y_array[mask, :]

    return x_array_train, x_array_test, y_array_train, y_array_test
Exemplo n.º 17
0
    def fit(self,
            X,
            Y,
            learning_rate=10e-7,
            reg=0,
            epochs=120000,
            show_fig=False):
        X, Y = shuffle(X, Y)
        Xvalid, Yvalid = X[-1000:], Y[-1000:]
        X, Y = X[:-1000], Y[:-1000]

        N, D = X.shape
        self.w = cp.random.randn(D) / cp.sqrt(D)
        self.b = 0

        costs = []
        best_validation_error = 1

        for i in range(epochs):
            pY = self.forward(X)

            # gradient descent step
            self.w -= learning_rate * (X.T.dot(pY - Y) + reg * self.w)
            self.b -= learning_rate * ((pY - Y).sum() + reg * self.b)

            if i % 20 == 0:
                pYvalid = self.forward(Xvalid)
                c = sigmoid_cost(Yvalid, pYvalid)
                costs.append(c)
                e = error_rate(
                    Yvalid, cp.around(pYvalid)
                )  # cp.round just means threshold of 0.5 for classification
                print("i: ", i, " cost: ", c, " error: ", e)
                if e < best_validation_error:
                    best_validation_error = e

        print("best validation error: ", best_validation_error)
        if show_fig:
            plt.plot(costs)
            plt.show()
Exemplo n.º 18
0
def _fft_convolve(a1, a2, mode):

    offset = 0
    if a1.size < a2.size:
        a1, a2 = a2, a1
        offset = 1 - a2.size % 2

    # if either of them is complex, the dtype after multiplication will also be
    if a1.dtype.kind == 'c' or a2.dtype.kind == 'c':
        fft, ifft = cupy.fft.fft, cupy.fft.ifft
    else:
        fft, ifft = cupy.fft.rfft, cupy.fft.irfft

    dtype = cupy.result_type(a1, a2)
    n1, n2 = a1.size, a2.size
    out_size = cupyx.scipy.fft.next_fast_len(n1 + n2 - 1)
    fa1 = fft(a1, out_size)
    fa2 = fft(a2, out_size)
    out = ifft(fa1 * fa2, out_size)

    if mode == 'full':
        start, end = 0, n1 + n2 - 1
    elif mode == 'same':
        start = (n2 - 1) // 2 + offset
        end = start + n1
    elif mode == 'valid':
        start, end = n2 - 1, n1
    else:
        raise ValueError(
            'acceptable mode flags are `valid`, `same`, or `full`.')

    out = out[start:end]

    if dtype.kind in 'iu':
        out = cupy.around(out)

    return out.astype(dtype, copy=False)
Exemplo n.º 19
0
def test_mark_boundaries_subpixel():
    # fmt: off
    labels = cp.array(
        [[0, 0, 0, 0], [0, 0, 5, 0], [0, 1, 5, 0], [0, 0, 5, 0], [0, 0, 0, 0]],
        dtype=np.uint8)
    np.random.seed(0)
    # fmt: on
    # Note: use np.random to have same seed as NumPy
    # Note: use np.round until cp.around is fixed upstream
    image = cp.asarray(np.round(np.random.rand(*labels.shape), 2))
    marked = mark_boundaries(image, labels, color=white, mode='subpixel')
    marked_proj = cp.asarray(cp.around(cp.mean(marked, axis=-1), 2))

    # fmt: off
    ref_result = cp.array([
        [0.55, 0.63, 0.72, 0.69, 0.6, 0.55, 0.54],  # noqa
        [0.45, 0.58, 0.72, 1., 1., 1., 0.69],  # noqa
        [0.42, 0.54, 0.65, 1., 0.44, 1., 0.89],  # noqa
        [0.69, 1., 1., 1., 0.69, 1., 0.83],  # noqa
        [0.96, 1., 0.38, 1., 0.79, 1., 0.53],  # noqa
        [0.89, 1., 1., 1., 0.38, 1., 0.16],  # noqa
        [0.57, 0.78, 0.93, 1., 0.07, 1., 0.09],  # noqa
        [0.2, 0.52, 0.92, 1., 1., 1., 0.54],  # noqa
        [0.02, 0.35, 0.83, 0.9, 0.78, 0.81, 0.87]
    ])  # noqa
    # fmt: on

    # TODO: get fully equivalent interpolation/boundary as skimage
    #       I think this requires fixing mode='reflect' upstream in SciPy
    if False:
        assert_allclose(marked_proj, ref_result, atol=0.01)
    else:
        # Note: grlee77: only test locations of ones, due to different default
        #                interpolation settings in CuPy version of mark
        #                 boundaries
        assert_allclose(marked_proj == 1, ref_result == 1, atol=0.01)
Exemplo n.º 20
0
def phase_cross_correlation(reference_image,
                            moving_image,
                            *,
                            upsample_factor=1,
                            space="real",
                            return_error=True,
                            reference_mask=None,
                            moving_mask=None,
                            overlap_ratio=0.3):
    """Efficient subpixel image translation registration by cross-correlation.

    This code gives the same precision as the FFT upsampled cross-correlation
    in a fraction of the computation time and with reduced memory requirements.
    It obtains an initial estimate of the cross-correlation peak by an FFT and
    then refines the shift estimation by upsampling the DFT only in a small
    neighborhood of that estimate by means of a matrix-multiply DFT.

    Parameters
    ----------
    reference_image : array
        Reference image.
    moving_image : array
        Image to register. Must be same dimensionality as
        ``reference_image``.
    upsample_factor : int, optional
        Upsampling factor. Images will be registered to within
        ``1 / upsample_factor`` of a pixel. For example
        ``upsample_factor == 20`` means the images will be registered
        within 1/20th of a pixel. Default is 1 (no upsampling).
        Not used if any of ``reference_mask`` or ``moving_mask`` is not None.
    space : string, one of "real" or "fourier", optional
        Defines how the algorithm interprets input data. "real" means
        data will be FFT'd to compute the correlation, while "fourier"
        data will bypass FFT of input data. Case insensitive. Not
        used if any of ``reference_mask`` or ``moving_mask`` is not
        None.
    return_error : bool, optional
        Returns error and phase difference if on, otherwise only
        shifts are returned. Has noeffect if any of ``reference_mask`` or
        ``moving_mask`` is not None. In this case only shifts is returned.
    reference_mask : ndarray
        Boolean mask for ``reference_image``. The mask should evaluate
        to ``True`` (or 1) on valid pixels. ``reference_mask`` should
        have the same shape as ``reference_image``.
    moving_mask : ndarray or None, optional
        Boolean mask for ``moving_image``. The mask should evaluate to ``True``
        (or 1) on valid pixels. ``moving_mask`` should have the same shape
        as ``moving_image``. If ``None``, ``reference_mask`` will be used.
    overlap_ratio : float, optional
        Minimum allowed overlap ratio between images. The correlation for
        translations corresponding with an overlap ratio lower than this
        threshold will be ignored. A lower `overlap_ratio` leads to smaller
        maximum translation, while a higher `overlap_ratio` leads to greater
        robustness against spurious matches due to small overlap between
        masked images. Used only if one of ``reference_mask`` or
        ``moving_mask`` is None.

    Returns
    -------
    shifts : ndarray
        Shift vector (in pixels) required to register ``moving_image``
        with ``reference_image``. Axis ordering is consistent with
        numpy (e.g. Z, Y, X)
    error : float
        Translation invariant normalized RMS error between
        ``reference_image`` and ``moving_image``.
    phasediff : float
        Global phase difference between the two images (should be
        zero if images are non-negative).

    References
    ----------
    .. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
           "Efficient subpixel image registration algorithms,"
           Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156`
    .. [2] James R. Fienup, "Invariant error metrics for image reconstruction"
           Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352`
    .. [3] Dirk Padfield. Masked Object Registration in the Fourier Domain.
           IEEE Transactions on Image Processing, vol. 21(5),
           pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
    .. [4] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
           Pattern Recognition, pp. 2918-2925 (2010).
           :DOI:`10.1109/CVPR.2010.5540032`
    """
    if (reference_mask is not None) or (moving_mask is not None):
        return _masked_phase_cross_correlation(reference_image, moving_image,
                                               reference_mask, moving_mask,
                                               overlap_ratio)

    # images must be the same shape
    if reference_image.shape != moving_image.shape:
        raise ValueError("images must be same shape")

    # assume complex data is already in Fourier space
    if space.lower() == 'fourier':
        src_freq = reference_image
        target_freq = moving_image
    # real data needs to be fft'd.
    elif space.lower() == 'real':
        src_freq = fft.fftn(reference_image)
        target_freq = fft.fftn(moving_image)
    else:
        raise ValueError('space argument must be "real" of "fourier"')

    # Whole-pixel shift - Compute cross-correlation by an IFFT
    shape = src_freq.shape
    image_product = src_freq * target_freq.conj()
    cross_correlation = fft.ifftn(image_product)

    # Locate maximum
    maxima = cp.unravel_index(cp.argmax(cp.abs(cross_correlation)),
                              cross_correlation.shape)
    midpoints = cp.asarray([np.fix(axis_size / 2) for axis_size in shape])

    float_dtype = image_product.real.dtype
    shifts = cp.stack([m.astype(float_dtype, copy=False) for m in maxima])
    shifts[shifts > midpoints] -= cp.asarray(shape)[shifts > midpoints]

    if upsample_factor == 1:
        if return_error:
            sabs = cp.abs(src_freq)
            sabs *= sabs
            tabs = cp.abs(target_freq)
            tabs *= tabs
            src_amp = np.sum(sabs) / src_freq.size
            target_amp = np.sum(tabs) / target_freq.size
            CCmax = cross_correlation[maxima]
    # If upsampling > 1, then refine estimate with matrix multiply DFT
    else:
        # Initial shift estimate in upsampled grid
        shifts = cp.around(shifts * upsample_factor) / upsample_factor
        upsampled_region_size = math.ceil(upsample_factor * 1.5)
        # Center of output array at dftshift + 1
        dftshift = np.fix(upsampled_region_size / 2.0)
        upsample_factor = float(upsample_factor)
        # Matrix multiply DFT around the current shift estimate
        sample_region_offset = dftshift - shifts * upsample_factor
        cross_correlation = _upsampled_dft(image_product.conj(),
                                           upsampled_region_size,
                                           upsample_factor,
                                           sample_region_offset).conj()

        # Locate maximum and map back to original pixel grid
        maxima = cp.unravel_index(cp.argmax(cp.abs(cross_correlation)),
                                  cross_correlation.shape)
        CCmax = cross_correlation[maxima]

        maxima = (cp.stack([m.astype(float_dtype, copy=False)
                            for m in maxima]) - dftshift)

        shifts = shifts + maxima / upsample_factor

        if return_error:
            src_amp = cp.abs(src_freq)
            src_amp *= src_amp
            src_amp = cp.sum(src_amp)
            target_amp = cp.abs(target_freq)
            target_amp *= target_amp
            target_amp = cp.sum(target_amp)

    # If its only one row or column the shift along that dimension has no
    # effect. We set to zero.
    for dim in range(src_freq.ndim):
        if shape[dim] == 1:
            shifts[dim] = 0

    if return_error:
        # Redirect user to masked_phase_cross_correlation if NaNs are observed
        if cp.isnan(CCmax) or cp.isnan(src_amp) or cp.isnan(target_amp):
            raise ValueError(
                "NaN values found, please remove NaNs from your "
                "input data or use the `reference_mask`/`moving_mask` "
                "keywords, eg: "
                "phase_cross_correlation(reference_image, moving_image, "
                "reference_mask=~np.isnan(reference_image), "
                "moving_mask=~np.isnan(moving_image))")

        return shifts, _compute_error(CCmax, src_amp, target_amp),\
            _compute_phasediff(CCmax)
    else:
        return shifts
Exemplo n.º 21
0
# fY11=reduce_y(flag=2)
z=r+jay*rx
yy=1/z
cp.fill_diagonal(yyfull, yy)
Y_dummy=cp.matmul(yyfull,c_line.T)
Y=cp.matmul(c_line,Y_dummy)+Y_2

cp.fill_diagonal(Y,Y.diagonal()+yl+Gb+jay*Bb)
Y[g_bus[:,None]-1,g_bus-1]=Y[g_bus[:,None]-1,g_bus-1]+permmod[:]
posfrecV1=-cp.linalg.solve(Y,Y_c)
temp=cp.matmul(Y_b,posfrecV1)
posfY11=Y_a+temp.T
#print(posfY11)
#print("Finish reduced Y, start simulation!")
# Start of simulation
t_step=cp.around((s1[1:]-s1[:-1])/s7[:-1])
t_width=(s1[1:]-s1[:-1])/t_step[:]
sim_k=int(t_step.sum())
sim_k=sim_k+1
#print(sim_k)
mac_ang,mac_spd,dmac_ang,dmac_spd,pelect=cp.zeros((5,sim_k,ngen),dtype=cp.float64)
eprime=cp.zeros((sim_k,ngen),dtype=cp.complex128)

theta=cp.radians(b_ang)
bus_volt=V*cp.exp(jay*theta)
mva=basmva/g_m
tst1=bus_int[g_bus-1].astype(cp.int)
eterm=V[tst1-1] # terminal bus voltage
pelect[0]=b_pg[tst1-1]     # BUS_pg
qelect=b_qg[tst1-1]     # BUS_qg
#compute the initial values for generator dynamics
Exemplo n.º 22
0
 def predict(self, X):
     pY = self.forward(X)
     return cp.around(pY)  # binary classification with threshold of 0.5
Exemplo n.º 23
0
def _fft_convolve(a1, a2, mode):

    offset = 0
    if a1.size < a2.size:
        a1, a2 = a2, a1
        offset = 1 - a2.size % 2

    # if either of them is complex, the dtype after multiplication will also be
    if a1.dtype.kind == 'c' or a2.dtype.kind == 'c':
        fft, ifft = cupy.fft.fft, cupy.fft.ifft
        is_c2c = True
    else:
        fft, ifft = cupy.fft.rfft, cupy.fft.irfft
        is_c2c = False

    # hack to work around NumPy/CuPy FFT dtype incompatibility:
    # CuPy internally converts fp16 to fp32 before doing FFT (whereas Numpy
    # converts both fp16 and fp32 to fp64), so here we do the cast early and
    # explicitly, and make sure a correct cuFFT plan can be generated. After
    # the fft-ifft round trip, we cast the output dtype to the correct one.
    out_dtype = cupy.result_type(a1, a2)
    dtype = _output_dtype(out_dtype, 'C2C' if is_c2c else 'R2C')
    a1 = a1.astype(dtype, copy=False)
    a2 = a2.astype(dtype, copy=False)

    n1, n2 = a1.size, a2.size
    out_size = cupyx.scipy.fft.next_fast_len(n1 + n2 - 1)
    # skip calling get_fft_plan() as we know the args exactly
    if is_c2c:
        fft_t = cufft.CUFFT_C2C if dtype == cupy.complex64 else cufft.CUFFT_Z2Z
        fft_plan = cufft.Plan1d(out_size, fft_t, 1)
        ifft_plan = fft_plan
    else:
        fft_t = cufft.CUFFT_R2C if dtype == cupy.float32 else cufft.CUFFT_D2Z
        fft_plan = cufft.Plan1d(out_size, fft_t, 1)
        # this is a no-op context manager
        # TODO(leofang): use contextlib.nullcontext() for PY37+?
        ifft_plan = contextlib.suppress()
    with fft_plan:
        fa1 = fft(a1, out_size)
        fa2 = fft(a2, out_size)
    with ifft_plan:
        out = ifft(fa1 * fa2, out_size)

    if mode == 'full':
        start, end = 0, n1 + n2 - 1
    elif mode == 'same':
        start = (n2 - 1) // 2 + offset
        end = start + n1
    elif mode == 'valid':
        start, end = n2 - 1, n1
    else:
        raise ValueError(
            'acceptable mode flags are `valid`, `same`, or `full`.')

    out = out[start:end]

    if out.dtype.kind in 'iu':
        out = cupy.around(out)

    return out.astype(out_dtype, copy=False)
Exemplo n.º 24
0
def equalize_adapthist(image, kernel_size=None, clip_limit=0.01, nbins=256):
    """Contrast Limited Adaptive Histogram Equalization (CLAHE).

    An algorithm for local contrast enhancement, that uses histograms computed
    over different tile regions of the image. Local details can therefore be
    enhanced even in regions that are darker or lighter than most of the image.

    Parameters
    ----------
    image : (N1, ...,NN[, C]) ndarray
        Input image.
    kernel_size: int or array_like, optional
        Defines the shape of contextual regions used in the algorithm. If
        iterable is passed, it must have the same number of elements as
        ``image.ndim`` (without color channel). If integer, it is broadcasted
        to each `image` dimension. By default, ``kernel_size`` is 1/8 of
        ``image`` height by 1/8 of its width.
    clip_limit : float, optional
        Clipping limit, normalized between 0 and 1 (higher values give more
        contrast).
    nbins : int, optional
        Number of gray bins for histogram ("data range").

    Returns
    -------
    out : (N1, ...,NN[, C]) ndarray
        Equalized image with float64 dtype.

    See Also
    --------
    equalize_hist, rescale_intensity

    Notes
    -----
    * For color images, the following steps are performed:
       - The image is converted to HSV color space
       - The CLAHE algorithm is run on the V (Value) channel
       - The image is converted back to RGB space and returned
    * For RGBA images, the original alpha channel is removed.

    .. versionchanged:: 0.17
        The values returned by this function are slightly shifted upwards
        because of an internal change in rounding behavior.

    References
    ----------
    .. [1] http://tog.acm.org/resources/GraphicsGems/
    .. [2] https://en.wikipedia.org/wiki/CLAHE#CLAHE
    """

    image = img_as_uint(image)
    image = cp.around(
        rescale_intensity(image, out_range=(0, NR_OF_GRAY - 1))
    ).astype(cp.uint16)

    if kernel_size is None:
        kernel_size = tuple(
            [image.shape[dim] // 8 for dim in range(image.ndim)]
        )
    elif isinstance(kernel_size, numbers.Number):
        kernel_size = (kernel_size,) * image.ndim
    elif len(kernel_size) != image.ndim:
        ValueError("Incorrect value of `kernel_size`: {}".format(kernel_size))

    kernel_size = [int(k) for k in kernel_size]

    image = _clahe(image, kernel_size, clip_limit, nbins)
    image = img_as_float(image)
    return rescale_intensity(image)
Exemplo n.º 25
0
def cross_correlate_masked(arr1,
                           arr2,
                           m1,
                           m2,
                           mode="full",
                           axes=(-2, -1),
                           overlap_ratio=0.3):
    """
    Masked normalized cross-correlation between arrays.

    Parameters
    ----------
    arr1 : ndarray
        First array.
    arr2 : ndarray
        Seconds array. The dimensions of `arr2` along axes that are not
        transformed should be equal to that of `arr1`.
    m1 : ndarray
        Mask of `arr1`. The mask should evaluate to `True`
        (or 1) on valid pixels. `m1` should have the same shape as `arr1`.
    m2 : ndarray
        Mask of `arr2`. The mask should evaluate to `True`
        (or 1) on valid pixels. `m2` should have the same shape as `arr2`.
    mode : {'full', 'same'}, optional
        'full':
            This returns the convolution at each point of overlap. At
            the end-points of the convolution, the signals do not overlap
            completely, and boundary effects may be seen.
        'same':
            The output is the same size as `arr1`, centered with respect
            to the `‘full’` output. Boundary effects are less prominent.
    axes : tuple of ints, optional
        Axes along which to compute the cross-correlation.
    overlap_ratio : float, optional
        Minimum allowed overlap ratio between images. The correlation for
        translations corresponding with an overlap ratio lower than this
        threshold will be ignored. A lower `overlap_ratio` leads to smaller
        maximum translation, while a higher `overlap_ratio` leads to greater
        robustness against spurious matches due to small overlap between
        masked images.

    Returns
    -------
    out : ndarray
        Masked normalized cross-correlation.

    Raises
    ------
    ValueError : if correlation `mode` is not valid, or array dimensions along
        non-transformation axes are not equal.

    References
    ----------
    .. [1] Dirk Padfield. Masked Object Registration in the Fourier Domain.
           IEEE Transactions on Image Processing, vol. 21(5),
           pp. 2706-2718 (2012). :DOI:`10.1109/TIP.2011.2181402`
    .. [2] D. Padfield. "Masked FFT registration". In Proc. Computer Vision and
           Pattern Recognition, pp. 2918-2925 (2010).
           :DOI:`10.1109/CVPR.2010.5540032`
    """
    if mode not in {"full", "same"}:
        raise ValueError("Correlation mode {} is not valid.".format(mode))

    if arr1.dtype.kind == "c" or arr2.dtype.kind == "c":
        raise ValueError("complex-valued arr1, arr2 are not supported")
    fixed_image = cp.asarray(arr1, dtype=np.float)
    fixed_mask = cp.asarray(m1, dtype=np.bool)
    moving_image = cp.asarray(arr2, dtype=np.float)
    moving_mask = cp.asarray(m2, dtype=np.bool)
    eps = np.finfo(np.float).eps

    # Array dimensions along non-transformation axes should be equal.
    all_axes = set(range(fixed_image.ndim))
    for axis in all_axes - set(axes):
        if fixed_image.shape[axis] != moving_image.shape[axis]:
            raise ValueError(
                "Array shapes along non-transformation axes should be "
                "equal, but dimensions along axis {a} are not".format(a=axis))

    # Determine final size along transformation axes
    # Note that it might be faster to compute Fourier transform in a slightly
    # larger shape (`fast_shape`). Then, after all fourier transforms are done,
    # we slice back to`final_shape` using `final_slice`.
    final_shape = list(arr1.shape)
    for axis in axes:
        final_shape[axis] = (fixed_image.shape[axis] +
                             moving_image.shape[axis] - 1)
    final_shape = tuple(final_shape)
    final_slice = tuple([slice(0, int(sz)) for sz in final_shape])

    # Extent transform axes to the next fast length (i.e. multiple of 3, 5, or
    # 7)
    fast_shape = tuple([next_fast_len(final_shape[ax]) for ax in axes])

    # We use numpy.fft or the new scipy.fft because they allow leaving the
    # transform axes unchanged which was not possible with scipy.fftpack's
    # fftn/ifftn in older versions of SciPy.
    # E.g. arr shape (2, 3, 7), transform along axes (0, 1) with shape (4, 4)
    # results in arr_fft shape (4, 4, 7)
    fft = partial(fftmodule.fftn, s=fast_shape, axes=axes)
    ifft = partial(fftmodule.ifftn, s=fast_shape, axes=axes)

    fixed_image[cp.logical_not(fixed_mask)] = 0.0
    moving_image[cp.logical_not(moving_mask)] = 0.0

    # N-dimensional analog to rotation by 180deg is flip over all relevant axes.
    # See [1] for discussion.
    rotated_moving_image = _flip(moving_image, axes=axes)
    rotated_moving_mask = _flip(moving_mask, axes=axes)

    fixed_fft = fft(fixed_image)
    rotated_moving_fft = fft(rotated_moving_image)
    fixed_mask_fft = fft(fixed_mask)
    rotated_moving_mask_fft = fft(rotated_moving_mask)

    # Calculate overlap of masks at every point in the convolution.
    # Locations with high overlap should not be taken into account.
    number_overlap_masked_px = cp.real(
        ifft(rotated_moving_mask_fft * fixed_mask_fft))
    number_overlap_masked_px[:] = cp.around(number_overlap_masked_px)
    number_overlap_masked_px[:] = cp.fmax(number_overlap_masked_px, eps)
    masked_correlated_fixed_fft = ifft(rotated_moving_mask_fft * fixed_fft)
    masked_correlated_rotated_moving_fft = ifft(fixed_mask_fft *
                                                rotated_moving_fft)

    numerator = ifft(rotated_moving_fft * fixed_fft)
    numerator -= (masked_correlated_fixed_fft *
                  masked_correlated_rotated_moving_fft /
                  number_overlap_masked_px)

    fixed_squared_fft = fft(cp.square(fixed_image))
    fixed_denom = ifft(rotated_moving_mask_fft * fixed_squared_fft)
    fixed_denom -= (cp.square(masked_correlated_fixed_fft) /
                    number_overlap_masked_px)
    fixed_denom[:] = cp.fmax(fixed_denom, 0.0)

    rotated_moving_squared_fft = fft(cp.square(rotated_moving_image))
    moving_denom = ifft(fixed_mask_fft * rotated_moving_squared_fft)
    moving_denom -= (cp.square(masked_correlated_rotated_moving_fft) /
                     number_overlap_masked_px)
    moving_denom[:] = cp.fmax(moving_denom, 0.0)

    denom = cp.sqrt(fixed_denom * moving_denom)

    # Slice back to expected convolution shape.
    numerator = numerator[final_slice]
    denom = denom[final_slice]
    number_overlap_masked_px = number_overlap_masked_px[final_slice]

    if mode == "same":
        _centering = partial(_centered, newshape=fixed_image.shape, axes=axes)
        denom = _centering(denom)
        numerator = _centering(numerator)
        number_overlap_masked_px = _centering(number_overlap_masked_px)

    # Pixels where `denom` is very small will introduce large
    # numbers after division. To get around this problem,
    # we zero-out problematic pixels.
    tol = 1e3 * eps * cp.max(cp.abs(denom), axis=axes, keepdims=True)
    nonzero_indices = denom > tol

    # TODO: grlee77: Added a cast to real here.
    #                probably it should be real earlier?
    numerator = numerator.real
    denom = denom.real
    out = cp.zeros_like(denom)
    out[nonzero_indices] = numerator[nonzero_indices] / denom[nonzero_indices]
    cp.clip(out, a_min=-1, a_max=1, out=out)

    # Apply overlap ratio threshold
    number_px_threshold = overlap_ratio * cp.max(
        number_overlap_masked_px, axis=axes, keepdims=True)
    out[number_overlap_masked_px < number_px_threshold] = 0.0

    return out
Exemplo n.º 26
0
def convolve(
    in1,
    in2,
    mode="full",
    method="auto",
):
    """
    Convolve two N-dimensional arrays.

    Convolve `in1` and `in2`, with the output size determined by the
    `mode` argument.

    Parameters
    ----------
    in1 : array_like
        First input.
    in2 : array_like
        Second input. Should have the same number of dimensions as `in1`.
    mode : str {'full', 'valid', 'same'}, optional
        A string indicating the size of the output:

        ``full``
           The output is the full discrete linear convolution
           of the inputs. (Default)
        ``valid``
           The output consists only of those elements that do not
           rely on the zero-padding. In 'valid' mode, either `in1` or `in2`
           must be at least as large as the other in every dimension.
        ``same``
           The output is the same size as `in1`, centered
           with respect to the 'full' output.
    method : str {'auto', 'direct', 'fft'}, optional
        A string indicating which method to use to calculate the convolution.

        ``direct``
           The convolution is determined directly from sums, the definition of
           convolution.
        ``fft``
           The Fourier Transform is used to perform the convolution by calling
           `fftconvolve`.
        ``auto``
           Automatically chooses direct or Fourier method based on an estimate
           of which is faster (default).

    Returns
    -------
    convolve : array
        An N-dimensional array containing a subset of the discrete linear
        convolution of `in1` with `in2`.

    See Also
    --------
    choose_conv_method : chooses the fastest appropriate convolution method
    fftconvolve

    Notes
    -----
    By default, `convolve` and `correlate` use ``method='auto'``, which calls
    `choose_conv_method` to choose the fastest method using pre-computed
    values (`choose_conv_method` can also measure real-world timing with a
    keyword argument). Because `fftconvolve` relies on floating point numbers,
    there are certain constraints that may force `method=direct` (more detail
    in `choose_conv_method` docstring).

    Examples
    --------
    Smooth a square pulse using a Hann window:

    >>> import cusignal
    >>> import cupy as cp
    >>> sig = cp.repeat(cp.asarray([0., 1., 0.]), 100)
    >>> win = cusignal.hann(50)
    >>> filtered = cusignal.convolve(sig, win, mode='same') / cp.sum(win)

    >>> import matplotlib.pyplot as plt
    >>> fig, (ax_orig, ax_win, ax_filt) = plt.subplots(3, 1, sharex=True)
    >>> ax_orig.plot(cp.asnumpy(sig))
    >>> ax_orig.set_title('Original pulse')
    >>> ax_orig.margins(0, 0.1)
    >>> ax_win.plot(cp.asnumpy(win))
    >>> ax_win.set_title('Filter impulse response')
    >>> ax_win.margins(0, 0.1)
    >>> ax_filt.plot(cp.asnumpy(filtered))
    >>> ax_filt.set_title('Filtered signal')
    >>> ax_filt.margins(0, 0.1)
    >>> fig.tight_layout()
    >>> fig.show()

    """

    volume = cp.asarray(in1)
    kernel = cp.asarray(in2)

    if volume.ndim == kernel.ndim == 0:
        return volume * kernel
    elif volume.ndim != kernel.ndim:
        raise ValueError("in1 and in2 should have the same dimensionality")

    if _inputs_swap_needed(mode, volume.shape, kernel.shape):
        # Convolution is commutative
        # order doesn't have any effect on output
        volume, kernel = kernel, volume

    if method == "auto":
        method = choose_conv_method(volume, kernel, mode=mode)

    if method == "fft":
        out = fftconvolve(volume, kernel, mode=mode)
        result_type = cp.result_type(volume, kernel)
        if result_type.kind in {"u", "i"}:
            out = cp.around(out)
        return out.astype(result_type)
    elif method == "direct":
        if volume.ndim > 1:
            raise ValueError("Direct method is only implemented for 1D")

        swapped_inputs = (mode != "valid") and (kernel.size > volume.size)

        if swapped_inputs:
            volume, kernel = kernel, volume

        return _convolution_cuda._convolve(volume, kernel, True,
                                           swapped_inputs, mode)

    else:
        raise ValueError("Acceptable method flags are 'auto',"
                         " 'direct', or 'fft'.")
def concat_hists(weights1, bins1, weights2, bins2, bin_size, rd):
    min1, max1 = cp.around(bins1[0], rd), cp.around(bins1[-1], rd)
    min2, max2 = cp.around(bins2[0], rd), cp.around(bins2[-1], rd)
    mini, maxi = min(min1, min2), max(max1, max2)
    new_bins = cp.arange(
        mini, maxi + bin_size * 0.9,
        bin_size)  # * 0.9 to avoid unexpected random inclusion of last element
    if min1 - mini != 0 and maxi - max1 != 0:
        ext1 = cp.pad(weights1, (cp.int(cp.around((min1 - mini) / bin_size)),
                                 cp.int(cp.around((maxi - max1) / bin_size))),
                      'constant',
                      constant_values=0)
    elif min1 - mini != 0:
        ext1 = cp.pad(weights1, (cp.int(cp.around(
            (min1 - mini) / bin_size)), 0),
                      'constant',
                      constant_values=0)
    elif maxi - max1 != 0:
        ext1 = cp.pad(weights1, (0, cp.int(cp.around(
            (maxi - max1) / bin_size))),
                      'constant',
                      constant_values=0)
    else:
        ext1 = weights1
    if min2 - mini != 0 and maxi - max2 != 0:
        ext2 = cp.pad(weights2, (cp.int(cp.around((min2 - mini) / bin_size)),
                                 cp.int(cp.around((maxi - max2) / bin_size))),
                      'constant',
                      constant_values=0)
    elif min2 - mini != 0:
        ext2 = cp.pad(weights2, (cp.int(cp.around(
            (min2 - mini) / bin_size)), 0),
                      'constant',
                      constant_values=0)
    elif maxi - max2 != 0:
        ext2 = cp.pad(weights2, (0, cp.int(cp.around(
            (maxi - max2) / bin_size))),
                      'constant',
                      constant_values=0)
    else:
        ext2 = weights2
    new_ext = ext1 + ext2
    return new_ext, new_bins
def Newtons_method_feasible_init_point(f,
                                       A,
                                       x_0,
                                       tol,
                                       tol_backtracking,
                                       x_ast=None,
                                       p_ast=None,
                                       maxiter=30,
                                       gf_symbolic=None,
                                       Hf_symbolic=None,
                                       Sigma=None):
    '''
    Newton's method to numerically approximate solution of min f subject to Ax = b.
    IMPORTANT: this implementation requires that initial point x_0, satisfies: Ax_0 = b
    Args:
        f (fun): definition of function f as lambda expression or function definition.
        A (numpy ndarray): 2d numpy array of shape (m,n) defines system of constraints Ax=b.
        x_0 (numpy ndarray): initial point for Newton's method. Must satisfy: Ax_0 = b
        tol (float): tolerance that will halt method. Controls stopping criteria.
        tol_backtracking (float): tolerance that will halt method. Controls value of line search by backtracking.
        x_ast (numpy ndarray): solution of min f, now it's required that user knows the solution...
        p_ast (float): value of f(x_ast), now it's required that user knows the solution...
        maxiter (int): maximum number of iterations
        gf_symbolic (fun): definition of gradient of f. If given, no approximation is
                                     performed via finite differences.
        Hf_symbolic (fun): definition of Hessian of f. If given, no approximation is
                                     performed via fi
                                     nite differences.
    Returns:
        x (numpy ndarray): numpy array, approximation of x_ast.
        iteration (int): number of iterations.
        Err_plot (numpy ndarray): numpy array of absolute error between p_ast and f(x) with x approximation
                          of x_ast. Useful for plotting.
        x_plot (numpy ndarray): numpy array that containts in columns vector of approximations. Last column
                        contains x, approximation of solution. Useful for plotting.
    '''
    iteration = 0

    x = x_0

    feval = f(x)

    if gf_symbolic:
        gfeval = gf_symbolic(x, Sigma)
    else:
        gfeval = gradient_approximation(f, x)

    if Hf_symbolic:
        Hfeval = Hf_symbolic(x, Sigma)
    else:
        Hfeval = Hessian_approximation(f, x)

    normgf = np.linalg.norm(gfeval)
    condHf = solver.utils.condicion_cupy(Hfeval)

    Err_plot_aux = np.zeros(maxiter)
    Err_plot_aux[iteration] = solver.utils.compute_error(p_ast, feval)

    Err = solver.utils.compute_error(x_ast, x)

    if (A.ndim == 1):
        p = 1
        n = x.size
        zero_matrix = cp.zeros(p)
        first_stack = cp.column_stack((Hfeval, A.T))
        second_stack = cp.row_stack(
            (A.reshape(1, n).T, zero_matrix)).reshape(1, n + 1)[0]
    else:
        p, n = A.shape
        zero_matrix = cp.zeros((p, p))
        first_stack = np.column_stack((Hfeval, A.T))
        second_stack = np.column_stack((A, zero_matrix))

    x_plot = cp.zeros((n, maxiter))
    x_plot[:, iteration] = x

    system_matrix = cp.vstack((first_stack, second_stack))
    zero_vector = cp.zeros(p)
    rhs = cp.vstack((gfeval.reshape(n, 1), zero_vector.reshape(p, 1))).T[0]

    #Newton's direction and Newton's decrement
    dir_desc = cp.linalg.solve(system_matrix, -rhs)
    dir_Newton = dir_desc[0:n]
    dec_Newton = -gfeval.dot(dir_Newton)
    w_dual_variable_estimation = dir_desc[n:(n + p)]

    print(
        'I\tNormgf \tNewton Decrement\tError x_ast\tError p_ast\tline search\tCondHf'
    )
    print('{}\t{}\t{}\t{}\t{}\t{}\t\t{}'.format(
        iteration, cp.around(normgf, 4), cp.around(dec_Newton, 4),
        cp.around(Err, 4), cp.around(Err_plot_aux[iteration], 4), "---",
        cp.around(condHf, 4)))
    stopping_criteria = dec_Newton / 2
    iteration += 1
    while (stopping_criteria > tol and iteration < maxiter):
        der_direct = -dec_Newton
        t = solver.line_search.line_search_by_backtracking(
            f, dir_Newton, x, der_direct)
        x = x + t * dir_Newton
        feval = f(x)

        if gf_symbolic:
            gfeval = gf_symbolic(x, Sigma)
        else:
            gfeval = gradient_approximation(f, x)

        if Hf_symbolic:
            Hfeval = Hf_symbolic(x, Sigma)
        else:
            Hfeval = Hessian_approximation(f, x)
        if (A.ndim == 1):
            first_stack = cp.column_stack((Hfeval, A.T))
        else:
            first_stack = cp.column_stack((Hfeval, A.T))

        system_matrix = cp.vstack((first_stack, second_stack))
        rhs = cp.vstack((gfeval.reshape(n, 1), zero_vector.reshape(p, 1))).T[0]
        #Newton's direction and Newton's decrement
        dir_desc = cp.linalg.solve(system_matrix, -rhs)
        dir_Newton = dir_desc[0:n]
        dec_Newton = -gfeval.dot(dir_Newton)
        w_dual_variable_estimation = dir_desc[n:(n + p)]

        Err_plot_aux[iteration] = solver.utils.compute_error(p_ast, feval)
        x_plot[:, iteration] = x
        Err = solver.utils.compute_error(x_ast, x)
        print('{}\t{}\t{}\t{}\t{}\t{}\t{}'.format(
            iteration, cp.around(normgf, 4), cp.around(dec_Newton, 4),
            cp.around(Err, 4), cp.around(Err_plot_aux[iteration], 4),
            cp.around(t, 4), cp.around(condHf, 4)))
        stopping_criteria = dec_Newton / 2
        if t < tol_backtracking:  #if t is less than tol_backtracking then we need to check the reason
            iter_salida = iteration
            iteration = maxiter - 1
        iteration += 1
    print('{} {}'.format("Error of x with respect to x_ast:", Err))
    print('{} {}'.format("Approximate solution:", x))
    cond = Err_plot_aux > np.finfo(float).eps * 10**(-2)
    Err_plot = Err_plot_aux[cond]

    if iteration == maxiter and t < tol_backtracking:
        print(
            "Backtracking value less than tol_backtracking, check approximation"
        )
        iteration = iter_salida
    else:
        if iteration == maxiter:
            print("Reached maximum of iterations, check approximation")
    x_plot = x_plot[:, :iteration]
    return [x, iteration, Err_plot, x_plot]
Exemplo n.º 29
0
def turtle():
    # make a copy for later comparisons
    global count_levels_combined
    count_levels_combined_copy = count_levels_combined.copy()


    level_target_arr = cp.random.randint(0, num_levels - 1, size=num_agents)
    level_self_arr = cp.around(agent_levels_list).astype(int)
    c_arr = cp.around(agent_classes_list).astype(int)

    level_target_arr_plus_one = level_target_arr + 1
    level_self_arr_plus_one = level_self_arr + 1


    s_target_arr =level_to_salary(level_target_arr_plus_one)
    s_self_arr = level_to_salary(level_self_arr_plus_one)

    # log utility functions with out third/competition term
    # #Calculate after tax target
    length_of_range = (num_levels + 1) / len(tax_rate)


    target_tax_bracket_arr = level_target_arr / length_of_range
    target_tax_bracket_arr = cp.floor(target_tax_bracket_arr)



    self_tax_bracket_arr = level_self_arr / length_of_range
    self_tax_bracket_arr = cp.floor(self_tax_bracket_arr)


    s_target_tax_rate_arr = cp.array(tax_rate)[target_tax_bracket_arr.astype(int)]
    s_self_tax_rate_arr = cp.array(tax_rate)[self_tax_bracket_arr.astype(int)]


    s_target_after_tax_arr = cp.multiply(s_target_arr, s_target_tax_rate_arr)
    s_self_after_tax_arr = cp.multiply(s_self_arr, s_self_tax_rate_arr)
    s_target_arr = s_target_after_tax_arr
    s_self_arr = s_self_after_tax_arr

    log_utility_payoff_target_alpha = cp.multiply(cp.asarray(agent_alpha_list), cp.log(s_target_arr))
    log_utility_payoff_target_beta = cp.multiply(cp.asarray(agent_beta_list), cp.power(np.log(s_target_arr), 2))
    log_utility_payoff_target_a_b = cp.subtract(log_utility_payoff_target_alpha, log_utility_payoff_target_beta)

    log_utility_payoff_self_alpha = cp.multiply(cp.asarray(agent_alpha_list), cp.log(s_self_arr))
    log_utility_payoff_self_beta = cp.multiply(cp.asarray(agent_beta_list), cp.power(np.log(s_self_arr), 2))
    log_utility_payoff_self_a_b = cp.subtract(log_utility_payoff_self_alpha, log_utility_payoff_self_beta)


    level_target_arr   = cp.asnumpy(level_target_arr)
    level_self_arr  = cp.asnumpy(level_self_arr)
    s_target_arr  = cp.asnumpy(s_target_arr)
    s_self_arr  = cp.asnumpy(s_self_arr)
    c_arr  = cp.asnumpy(c_arr)
    log_utility_payoff_target_a_b  = cp.asnumpy(log_utility_payoff_target_a_b)
    log_utility_payoff_self_a_b  = cp.asnumpy(log_utility_payoff_self_a_b)

    for i in range(num_agents):
        #Tax
        # # pick a random level as target
        level_target = level_target_arr[i]
        # # find levels
        level_self = level_self_arr[i]

        # # calculate salaries
        s_target = s_target_arr[i]
        s_self = s_self_arr[i]


        c = c_arr[i]

        # Note: agents should make decisions one by one, not all at once as
        # previously programmed. Otherwise, the program will produce wrong
        # results. Thus, we use count_levels_combined_copy instead of
        # count_levels_combined.

        num_target = count_levels_combined_copy[level_target]
        num_self = count_levels_combined_copy[level_self]

        alpha, beta, gamma = agent_alpha_list[i], agent_beta_list[i], agent_gamma_list[i]

        # target utility & current utility
        if utility_function == 'log':
            payoff_target = log_utility_payoff_target_a_b[i]
            payoff_target -= gamma * math.log(num_target + 1.0 / num_agents)

            payoff_self = log_utility_payoff_self_a_b[i]
            payoff_self -= gamma * math.log(num_self + 1.0 / num_agents)
        else:
            payoff_target = alpha * math.sqrt(s_target)
            payoff_target -= beta * math.sqrt(s_target) ** 2
            payoff_target -= gamma * math.log(num_target + 1.0 / num_agents)

            payoff_self = alpha * math.sqrt(s_self)
            payoff_self -= beta * math.sqrt(s_self) ** 2
            payoff_self -= gamma * math.log(num_self + 1.0 / num_agents)

        if payoff_target > payoff_self:
            # move agent from self to target
            count_levels_list[level_self, c] -= 1
            count_levels_combined_copy[level_self] -= 1
            count_levels_list[level_target, c] += 1
            count_levels_combined_copy[level_target] += 1
            agent_levels_list[i] = level_target

    # calculate the least square difference of count change
    loss = np.sum((count_levels_combined_copy - count_levels_combined) ** 2)

    # update state variable(s)
    count_levels_combined = count_levels_combined_copy
    return loss