Esempio n. 1
0
    def explain(self, x, scaled=True):
        """
        Return explanation of the anomalies based on t-scores.
        """
        if cp.ndim(x) < 2:
            x = x.reshape(1, -1)
        ranked_feature_importance = cp.zeros([x.shape[1], 1])

        for feature in range(x.shape[1]):
            # find all projections without the feature j and with feature j
            index_selected_feature = cp.where(
                self.projections[:, feature] != 0)[0]
            index_not_selected_feature = cp.where(
                self.projections[:, feature] == 0)[0]
            scores_with_feature = self.instance_score(x,
                                                      index_selected_feature)
            scores_without_feature = self.instance_score(
                x, index_not_selected_feature)
            ranked_feature_importance[feature, 0] = self.t_test(
                scores_with_feature, scores_without_feature)

        if scaled:
            assert cp.max(ranked_feature_importance) != cp.min(
                ranked_feature_importance)
            normalized_score = (ranked_feature_importance - cp.min(
                ranked_feature_importance)) / (
                cp.max(ranked_feature_importance) - cp.min(
                    ranked_feature_importance))
            return normalized_score
        else:
            return ranked_feature_importance
Esempio n. 2
0
def optimize(formula: cupy.ndarray, maximum_number_of_generations: int,
             population_size: int, selection_ratio: float,
             mutation_probability: float) -> cupy.ndarray:
    """
    Finds the best solution to the input formula within the number of generation given as input.

    The optimizer stops, either if it finds the perfect individual, or if the number of generation equals the maximum
    number of generations

    :param formula: The formula to solve
    :param maximum_number_of_generations: The maximum of number of generations before the stopping of the optimizer
    :param population_size: The number of individuals in each generation
    :param selection_ratio: Between 0 and 1. The surviving ratio of a generation, that is then bred
    :param mutation_probability: Between 0 and 1. The probability that has an individual to mutate
    :return: The best individual found
    """
    population = generate_random_first_generation(population_size,
                                                  formula.shape[1])
    population_fitness = evaluate_population(population, formula)
    best_fitness = cupy.max(population_fitness)
    best_individual = population[cupy.argmax(population_fitness)]
    logging.info(
        f'In the first generation the best fitness was {best_fitness * 100}%')

    if best_fitness == 1.:
        logging.info('Lucky ! The first generation contains the solution')
        return best_individual

    generation = 0
    for generation in range(maximum_number_of_generations):
        breeding_population = select_individuals(population,
                                                 population_fitness,
                                                 selection_ratio)
        population = population_mutation(
            population_binary_crossover(breeding_population, population_size),
            mutation_probability)
        population_fitness = evaluate_population(population, formula)

        if cupy.max(population_fitness) > best_fitness:
            best_fitness = cupy.max(population_fitness)
            best_individual = population[cupy.argmax(population_fitness)]
            logging.info(
                f'In the generation {generation + 1} new best individual has been found, '
                f'and it satisfies {best_fitness * 100}% of the formula')

            if best_fitness == 1.:
                break
        else:
            if generation % 1000 == 0:
                logging.info(
                    f'After {generation + 1}, the best individuals satisfies {best_fitness * 100}% '
                    f'of the formula')

    logging.info('Optimization ended.')
    logging.info(
        f'The best individual found after {generation + 1} generations, '
        f'and it satisfies {best_fitness * 100}% of the formula')

    return best_individual
Esempio n. 3
0
def mix(sound1, sound2, r, fs):
    gain1 = cupy.max(compute_gain(sound1.data, fs), axis=1)  # Decibel
    gain2 = cupy.max(compute_gain(sound2.data, fs), axis=1)
    t = 1.0 / (1 + cupy.power(10, (gain1 - gain2) / 20.) * (1 - r) / r)
    sound = ((sound1 * t[:, None] + sound2 * (1 - t[:, None])) /
             cupy.sqrt(t[:, None]**2 + (1 - t[:, None])**2))

    return sound
Esempio n. 4
0
 def get_evidmap(self):
     evidmap = cp.zeros(shape=(self.xPixelResol,self.yPixelResol))
     if cp.max(self.evidTime)-cp.min(self.evidTime)==0:
         div = 1
     else:
         div = cp.max(self.evidTime)-cp.min(self.evidTime)
     evidmap = evidmap + (self.evidTime-cp.mean(self.evidTime))/(div)*255
     return evidmap
Esempio n. 5
0
def mmr(
    doc_embedding,
    word_embeddings,
    words,
    top_n=5,
    diversity=0.8,
):
    """
    Calculate Maximal Marginal Relevance (MMR)
    between candidate keywords and the document.
    MMR considers the similarity of keywords/keyphrases with the
    document, along with the similarity of already selected
    keywords and keyphrases. This results in a selection of keywords
    that maximize their within diversity with respect to the document.
    Arguments:
        doc_embedding: The document embeddings
        word_embeddings: The embeddings of the selected candidate keywords/phrases
        words: The selected candidate keywords/keyphrases
        top_n: The number of keywords/keyhprases to return
        diversity: How diverse the select keywords/keyphrases are.
                   Values between 0 and 1 with 0 being not diverse at all
                   and 1 being most diverse.
    Returns:
         List[str]: The selected keywords/keyphrases
    """

    # Extract similarity within words, and between words and the document
    word_doc_similarity = 1 - pairwise_distances(
        word_embeddings, doc_embedding, metric="cosine")
    word_similarity = 1 - pairwise_distances(word_embeddings, metric="cosine")

    # Initialize candidates and already choose best keyword/keyphras
    keywords_idx = cp.argmax(word_doc_similarity)
    target = cp.take(keywords_idx, 0)
    candidates_idx = [i for i in range(len(words)) if i != target]
    for i in range(top_n - 1):
        candidate_similarities = word_doc_similarity[candidates_idx, :]
        if i == 0:
            first_row = cp.reshape(
                word_similarity[candidates_idx][:, keywords_idx],
                (word_similarity[candidates_idx][:, keywords_idx].shape[0], 1))
            target_similarities = cp.max(first_row, axis=1)
        else:
            target_similarities = cp.max(
                word_similarity[candidates_idx][:, keywords_idx], axis=1)
        # Calculate MMR
        mmr = (
            1 - diversity
        ) * candidate_similarities - diversity * target_similarities.reshape(
            -1, 1)

        mmr_idx = cp.take(cp.array(candidates_idx), cp.argmax(mmr))

        # Update keywords & candidates
        keywords_idx = cp.append(keywords_idx, mmr_idx)
        candidates_idx.remove(mmr_idx)

    return [words[idx] for idx in keywords_idx.get()]
Esempio n. 6
0
def softmax(x):
    if x.ndim == 2:
        x = x.T
        x = x - cp.max(x, axis=0)
        y = cp.exp(x) / cp.sum(cp.exp(x), axis=0)
        return y.T

    x = x - cp.max(x)
    return cp.exp(x) / cp.sum(cp.exp(x))
Esempio n. 7
0
def softmax(x):
    if x.ndim == 2:
        x = x.T
        x = x - np.max(x, axis=0)
        y = np.exp(x) / np.sum(np.exp(x), axis=0)
        return y.T

    x = x - np.max(x)  # オーバーフロー対策
    return np.exp(x) / np.sum(np.exp(x))
def softmax(x):
    if x.ndim == 2:
        x = x.T
        x = x - cp.max(x, axis=0)
        y = cp.exp(x, dtype=np.float32) / cp.sum(cp.exp(x, dtype=np.float32), axis=0, dtype=np.float32)
        return y.T 

    x = x - cp.max(x) # オーバーフロー対策
    return cp.exp(x) / cp.sum(cp.exp(x))
Esempio n. 9
0
def test_denoise_tv_chambolle_float_result_range():
    # astronaut image
    img = astro_gray
    int_astro = cp.multiply(img, 255).astype(np.uint8)
    assert cp.max(int_astro) > 1
    denoised_int_astro = restoration.denoise_tv_chambolle(int_astro, weight=0.1)
    # test if the value range of output float data is within [0.0:1.0]
    assert denoised_int_astro.dtype == np.float
    assert cp.max(denoised_int_astro) <= 1.0
    assert cp.min(denoised_int_astro) >= 0.0
Esempio n. 10
0
def test_adapthist_constant():
    """Test constant image, float and uint"""
    img = cp.zeros((8, 8))
    img += 2
    img = img.astype(np.uint16)
    adapted = exposure.equalize_adapthist(img, 3)
    assert cp.min(adapted) == cp.max(adapted)

    img = cp.zeros((8, 8))
    img += 0.1
    img = img.astype(np.float64)
    adapted = exposure.equalize_adapthist(img, 3)
    assert cp.min(adapted) == cp.max(adapted)
Esempio n. 11
0
def logsumexp_cupy(value, axis=None, keepdims=False, gpu=0):
    import cupy as cp
    with cp.cuda.Device(gpu):
        if axis is not None:
            max_val = cp.max(value, axis=axis, keepdims=True)
            value0 = value - max_val
            if not keepdims:
                max_val = cp.squeeze(max_val, axis)
            return max_val + cp.log(cp.sum(cp.exp(value0), axis=axis, keepdims=keepdims))
        else:
            max_val = cp.max(value)
            sum_exp = cp.sum(cp.exp(value - max_val))
            return max_val + math.log(sum_exp)
def delight_simple(image, dd, iterations=500):
    A = image[..., 3]
    u = cup.ones_like(image[..., 0])

    grads = cup.zeros((image.shape[0], image.shape[1], 2), dtype=cup.float32)
    grads[..., 0] = (cup.roll(image[..., 0], 1, axis=0) - image[..., 0]) * dd
    grads[..., 1] = (image[..., 0] - cup.roll(image[..., 0], 1, axis=1)) * dd
    # grads[..., 0] = (image[..., 0] - 0.5) * (dd)
    # grads[..., 1] = (image[..., 0] - 0.5) * (dd)
    for k in range(5, -1, -1):
        # multigrid
        k = 2**k
        print("grid step:", k)

        n = cup.roll(grads[..., 0], k, axis=1)
        n -= cup.roll(grads[..., 0], -k, axis=1)
        n += cup.roll(grads[..., 1], k, axis=0)
        n -= cup.roll(grads[..., 1], -k, axis=0)
        n *= 0.125 * image[..., 3]

        for ic in range(iterations):
            if ic % 100 == 0:
                print(ic)
            t = cup.roll(u, -k, axis=0)
            t += cup.roll(u, k, axis=0)
            t += cup.roll(u, -k, axis=1)
            t += cup.roll(u, k, axis=1)
            t *= 0.25

            # zero alpha = zero height
            u = t + n
            u = u * A + cup.max(u) * (1 - A)

    u = -u
    u -= cup.min(u)
    u /= cup.max(u)

    # u *= image[..., 3]

    # u -= cup.mean(u)
    # u /= max(abs(cup.min(u)), abs(cup.max(u)))
    # u *= 0.5
    # u += 0.5
    # u = 1.0 - u

    # return cup.dstack([(u - image[..., 0]) * 0.5 + 0.5, u, u, image[..., 3]])
    u = (image[..., 0] - u) * 0.5 + 0.5
    return cup.dstack([u, u, u, image[..., 3]])
Esempio n. 13
0
def clipped_eigh(a, clip_scale=1e-14):
    assert clip_scale >= 0
    w, v = cupy.linalg.eigh(a)
    #- clip eigenvalues relative to maximum eigenvalue
    #- TODO: assuming w is sorted, can skip cupy.max and use the appropriate index
    w = cupy.clip(w, a_min=clip_scale * cupy.max(w))
    return w, v
Esempio n. 14
0
	def forward(self, bottom, top):
		self.label = cp.asarray(copy.deepcopy(bottom[1].data),cp.uint8)
		prob = cp.asarray(copy.deepcopy(bottom[0].data),cp.float64)
		prob = cp.subtract(prob,cp.max(prob,axis=1)[:,cp.newaxis,...])
		prob = cp.exp(prob)
		self.softmax = cp.divide(prob,cp.sum(prob,axis=1)[:,cp.newaxis,...])

		## mask
		self.weight_mask = cp.ones_like(self.label, cp.float64)
		for weight_id in self.weight_dic:
			self.weight_mask[self.label == weight_id] = self.weight_dic[weight_id]

		if self.has_ignore_label:
			self.weight_mask[self.label == self.ignore_label] = 0
		# self.weight_mask[self.label == 0] = 0.3
		# self.weight_mask[self.label == 1] = 0.25
		# self.weight_mask[self.label == 2] = 5
		# self.weight_mask[self.label == 4] = 2
		self.label[self.label == 3] = 2


		compute_count = self.weight_mask[self.weight_mask != 0].size

		## nomalize mask
		self.weight_mask = cp.divide(self.weight_mask, cp.divide(cp.sum(self.weight_mask), compute_count))


		## compute loss
		prob_compute_matrix = copy.deepcopy(self.softmax[self.index_0,self.label,self.index_2,self.index_3])
		prob_compute_matrix[prob_compute_matrix < (1e-10)] = 1e-10

		loss = - cp.divide(cp.sum(cp.multiply(cp.log(prob_compute_matrix),self.weight_mask)),compute_count)

		loss = cp.asnumpy(loss)
		top[0].data[...] = loss
Esempio n. 15
0
 def test_forward4(self):
     shape = (10, 20, 30)
     axis = (0, 1)
     x = Variable(np.random.rand(*shape))
     y = F.max(x, axis=axis, keepdims=True)
     expected = np.max(x.data, axis=axis, keepdims=True)
     self.assertTrue(array_allclose(y.data, expected))
Esempio n. 16
0
def getMeWtW(W, U0, Nnearest=None):
    # this function computes correlation between templates at ALL timelags from each other
    # takes the max over timelags to obtain a similarity score
    # also returns lists of most similar templates to each template
    # takes as input the low-rank factorization of templates (W for time and U0
    # for space)

    # W is timesamples (default = 61 ), by number of templates, by rank (default = 3)
    nt0, Nfilt, Nrank = W.shape

    Params = [1, Nfilt, 0, 0, 0, 0, 0, 0, 0, nt0]

    # initialize correlation matrix for all timelags
    WtW = cp.zeros((Nfilt, Nfilt, 2 * nt0 - 1), dtype=np.float32, order='F')
    for i in range(Nrank):
        for j in range(Nrank):
            # the dot product factorizes into separable products for each spatio-temporal component
            utu0 = cp.dot(U0[:, :, i].T, U0[:, :, j])  # spatial products
            # temporal convolutions get multiplied wit hthe spatial products
            wtw0 = mexWtW2(Params, W[:, :, i], W[:, :, j], utu0)
            # add it to the full correlation array
            WtW = WtW + wtw0

    # the maximum across timelags accounts for sample alignment mismatch
    cc = cp.max(WtW, axis=2)

    if Nnearest:
        isort = cp.argsort(cc, axis=0)[::-1]
        # if we don't have enough templates yet, just wrap the indices around the range 1:Nfilt
        iNear = cp.mod(cp.arange(Nnearest), Nfilt)
        iList = isort[iNear, :]  # return the list of pairs for each template
        return WtW, iList
    else:
        return WtW
Esempio n. 17
0
    def pool(self, x, filter_size, strides=2, type='MAX'):

        batches = x.shape[0]
        depth_i = x.shape[1]
        input_size = x.shape[2]  
        x_per_filter = filter_size * filter_size
        output_size = math.ceil((input_size - filter_size) / strides) + 1
        y_per_o_layer = output_size * output_size  
        x_vec = cp.zeros((batches, depth_i, y_per_o_layer, x_per_filter))
        padding_size= (output_size-1)*strides+filter_size
        if(padding_size==input_size):
            padding_x=x
        else:
            padding_x= cp.zeros((batches, depth_i,padding_size,padding_size))-cp.inf
            padding_x[:,:,0:input_size,0:input_size]=x
        for j in range(y_per_o_layer):
            b = int(j / output_size) * strides
            c = (j % output_size) * strides
            x_vec[:, :, j, 0:x_per_filter] = padding_x[:, :, b:b + strides, c:c + strides].reshape(batches, depth_i,
                                                                                           x_per_filter)

        pooling = cp.max(x_vec, axis=3).reshape(batches, depth_i, output_size, output_size)
        pooling_idx = cp.eye(x_vec.shape[3], dtype=int)[x_vec.argmax(3)]


        return cp.asnumpy(pooling), cp.asnumpy(pooling_idx)
Esempio n. 18
0
def _regularize(ATNinv, regularize, weight_scale):
    fluxweight = ATNinv.sum(axis=-1)
    minweight = weight_scale * cp.max(fluxweight)
    ibad = fluxweight <= minweight
    lambda_squared = ibad * (minweight -
                             fluxweight) + ~ibad * regularize * regularize
    return lambda_squared
Esempio n. 19
0
def weight_sigmod(xy, xyi, ratio=0.05, s=20.0):
    """
    Description
    -----------
    (2D only)
    local weight/interpolate by sigmod function (GREIT3D)

    Parameters
    ----------
    xy : NDArray
        (x, y) of values
    xyi : NDArray
        (xi, yi) of interpolated locations
    ratio : float
        R0 = d_max * ratio
    s : float
        control the decay ratio

    Returns
    -------
    w_mat : NDArray
        weighting matrix mapping from xy to xyi (xy meshgrid)
    """
    d_mat = _distance_matrix2d(cp.array(xy), cp.array(xyi))
    # normalize distance
    d_max = cp.max(cp.array(d_mat))
    d_mat = 5.0 * d_mat / d_max
    # desired radius (a ratio of max pairwise distance)
    r0 = 5.0 * ratio
    # weights is the sigmod function
    weight = 1./(1 + cp.exp(s*(d_mat - r0)))
    # normalized
    w_mat = weight / cp.sum(weight, axis=0)
    return cp.asnumpy(w_mat)
Esempio n. 20
0
def train(X, W1, W2, W3, pad=0):

    # Conv 1
    H1, cache1 = conv.conv_forward(X, W1, pad)

    # ReLu 1
    H1_relu = np.copy(H1)
    H1_relu[H1 < 0] = 0
    
    # cifar10.plotH(H1_relu[:,:,:4])

    # Pool
    for m in range(15):
        for n in range(15):
            x_slice = H1_relu[2*m:2*m+2, 2*n:2*n+2]
            P1[m, n] = np.max(x_slice, axis=(0, 1))

    # Conv 2
    H2, cache2 = conv.conv_forward(P1, W2, pad)

    # ReLu 2
    H2_relu = np.copy(H2)
    H2_relu[H2 < 0] = 0

    # cifar10.plotH(H2_relu[:,:,:4])

    # FC 1
    x = H2_relu.flatten()
    scores = x.dot(W3)

    # Softmax
    ex = np.exp(scores)
    probs[sample] = ex/np.sum(ex, keepdims=True)
    loss = -np.log(probs[sample, Ytr[sample]])
    dscores = np.copy(probs)
    dscores[sample, Ytr[sample]] -= 1

    # Backprop FC 1
    dW3 = np.dot(H2_relu.reshape(3042, 1), dscores[sample].reshape(1, 10))
    dH2 = np.dot(dscores[sample], W3.T).reshape(13, 13, depth2)

    # Backprop ReLu 2
    dH2[H2 <= 0] = 0

    # Backprop Conv 2
    dP1, dW2 = conv.conv_backward(dH2, cache2)

    # Backprop Pool
    dH1 = np.zeros(H1.shape)
    for m in range(15):
        for n in range(15):
            dH1[2*m:2*m+2, 2*n:2*n+2] = dP1[m, n]

    # Backprop ReLu 1
    dH1[H1 <= 0] = 0

    # Backprop Conv 1
    dX, dW1 = conv.conv_backward(dH1, cache1)

    return loss, dW1, dW2, dW3
Esempio n. 21
0
    def transform(self, X):
        """[summary].

        Args:
            X (cupy.ndarray): [description].
        Returns:
            cupy.ndarray: [description].
        """
        check_is_fitted(self, "class_means_")
        # TODO(smly):
        # X = column_or_1d(X, warn=True)

        # Label encoding if necessary
        if self._label_encoding_uniques is not None:
            X = self._label_encoding_uniques.get_indexer(X.to_pandas())
        X = cupy.asarray(X)

        missing_mask = cupy.isnan(X)
        encode_mask = cupy.invert(missing_mask)
        unseen_mask = cupy.bitwise_xor(
            cupy.isin(X, self.classes_, invert=True), missing_mask)

        X = X.copy()
        X[unseen_mask] = cupy.max(self.classes_)

        indices = _get_index_cupy(self.classes_, X[encode_mask])

        _classes_index_list = cupy.searchsorted(self.lut_[:, 0], self.classes_)
        encoded_values = cupy.zeros(X.shape[0], dtype=cupy.float32)
        encoded_values[encode_mask] = cupy.take(
            self.lut_[:, 1], cupy.take(_classes_index_list, indices))

        encoded_values[unseen_mask] = self.default_unseen_
        return encoded_values
Esempio n. 22
0
 def __init__(self, in_values, out_values):
     self.in_values = in_values
     self.out_values = out_values
     self._max_str_lines = 4
     self._array = None
     # cache max value to avoid repeated device->host transfer
     self._max_label = int(cp.max(self.in_values))
Esempio n. 23
0
def test_multichannel():
    """Test that computing multichannel properties works."""
    astro = data.astronaut()[::4, ::4]
    labels = slic(astro.astype(float), start_label=1)

    astro = cp.asarray(astro)
    astro_green = astro[..., 1]
    labels = cp.asarray(labels)

    segment_idx = int(cp.max(labels) // 2)
    region = regionprops(labels, astro_green)[segment_idx]
    region_multi = regionprops(labels, astro)[segment_idx]
    for prop in PROPS:
        p = region[prop]
        p_multi = region_multi[prop]
        if isinstance(p, (list, tuple)):
            p = tuple([cp.asnumpy(p_) for p_ in p])
            p = np.stack(p)
        if isinstance(p_multi, (list, tuple)):
            p_multi = tuple([cp.asnumpy(p_) for p_ in p_multi])
            p_multi = np.stack(p_multi)
        if np.shape(p) == np.shape(p_multi):
            # property does not depend on multiple channels
            assert_array_equal(p, p_multi)
        else:
            # property uses multiple channels, returns props stacked along
            # final axis
            assert_array_equal(p, p_multi[..., 1])
def curvature_to_height(image, h2, iterations=2000):
    f = image[..., 0]
    A = image[..., 3]
    u = cup.ones_like(f) * 0.5

    k = 1
    t = np.empty_like(u, dtype=np.float32)

    # periodic gauss seidel iteration
    for ic in range(iterations):
        if ic % 100 == 0:
            print(ic)

        # roll k, axis=0
        t[:-k, :] = u[k:, :]
        t[-k:, :] = u[:k, :]
        # roll -k, axis=0
        t[k:, :] += u[:-k, :]
        t[:k, :] += u[-k:, :]
        # roll k, axis=1
        t[:, :-k] += u[:, k:]
        t[:, -k:] += u[:, :k]
        # roll -k, axis=1
        t[:, k:] += u[:, :-k]
        t[:, :k] += u[:, -k:]

        t -= h2 * f
        t *= 0.25
        u = t * A

    u = -u
    u -= cup.min(u)
    u /= cup.max(u)

    return cup.dstack([u, u, u, image[..., 3]])
Esempio n. 25
0
 def forward(self, x):
     """Foward implementation of pooling using stride tricks
     Args:
         x (np.array): input values (m, n_x, n_y, n_c)
     Returns:
         np.array: output_values
     """
     if self.dim_in != x.shape:
         self.dim_in = x.shape
         self.dX = np.zeros(self.dim_in)
     self.X = x
     n_h = self.dim_out[1]
     n_w = self.dim_out[2]
     shape = (
         self.X.shape[0],  # m
         n_h,
         n_w,
         self.f,
         self.f,
         self.X.shape[-1])  # n_c
     strides = (self.X.strides[0], self.X.strides[1] * self.stride,
                self.X.strides[2] * self.stride, self.X.strides[1],
                self.X.strides[2], self.X.strides[3])
     M = np.lib.stride_tricks.as_strided(self.X,
                                         shape=shape,
                                         strides=strides)
     Z = np.max(M, axis=(-3, -2))
     return Z
Esempio n. 26
0
 def backward_naive(self, dA):
     """Implementation of backward pooling.
     Args:
         dA (np.array): derivative of output values
     Returns:
         np.array: derivative of intput values
     """
     if len(dA.shape) == 2:
         dA = dA.reshape(self.dim_out)
     self.dX[:, :, :, :] = 0
     m, n_h, n_w, n_c = self.dim_out
     for i in range(m):
         for h in range(n_h):
             v_s = h * self.stride
             v_e = h * self.stride + self.f
             for w in range(n_w):
                 h_s = w * self.stride
                 h_e = w * self.stride + self.f
                 for c in range(n_c):
                     if self.mode == "max":
                         mask = np.max(self.X[i, v_s:v_e, h_s:h_e,
                                              c]) == self.X[i, v_s:v_e,
                                                            h_s:h_e, c]
                         self.dX[i, v_s:v_e, h_s:h_e, c] += mask * \
                             dA[i, h, w, c]
                     elif self.mode == "average":
                         da = dA[i, h, w, c]
                         self.dX[i, v_s:v_e, h_s:h_e, c] += np.ones(
                             (self.f, self.f)) * da / self.f**2
     return self.dX
Esempio n. 27
0
 def backward(self, dA):
     """Implementation of backward pooling using stride tricks.
     Args:
         dA (np.array): derivative of output values
     Returns:
         np.array: derivative of intput values
     """
     if len(dA.shape) == 2:
         dA = dA.reshape(dA.shape[1], *self.dim_out[1:])
     self.dX[:, :, :, :] = 0
     n_h = self.dim_out[1]
     n_w = self.dim_out[2]
     shape = (
         self.X.shape[0],  # m
         n_h,
         n_w,
         self.f,
         self.f,
         self.X.shape[-1])  # n_c
     strides = (self.X.strides[0], self.X.strides[1] * self.stride,
                self.X.strides[2] * self.stride, self.X.strides[1],
                self.X.strides[2], self.X.strides[3])
     M = np.lib.stride_tricks.as_strided(
         self.X, shape=shape, strides=strides)  # , writeable=False)
     # dangerous: writing into memory, don't mess up strides !
     M_dX = np.lib.stride_tricks.as_strided(
         self.dX, shape=shape, strides=strides)  # , writeable=True)
     mask = np.max(M, axis=(-3, -2), keepdims=True) == M
     M_dX += np.multiply(mask, dA[:, :, :, None, None])
     return self.dX
Esempio n. 28
0
 def __setitem__(self, indices, values):
     if self._array is None:
         self._array = self._ascupy()
     self._array[indices] = values
     self.in_values = cp.flatnonzero(self._array)
     self._max_label = int(cp.max(self.in_values))
     self.out_values = self._array[self.in_values]
Esempio n. 29
0
def show_qq_plot(df, x_axis, y_axis):

    x_values = cupy.fromDlpack(df[x_axis].to_dlpack())
    y_values = cupy.fromDlpack(df[y_axis].to_dlpack())

    x_max = cupy.max(x_values).tolist()
    y_max = cupy.max(y_values).tolist()

    qq_fig = figure(x_range=(0, x_max), y_range=(0, y_max))
    qq_fig.circle(-cupy.log10(x_values + 1e-10).get(),
                  -cupy.log10(y_values).get())
    qq_fig.line([0, x_max], [0, y_max])

    qq_handle = show(qq_fig, notebook_handle=True)
    push_notebook(handle=qq_handle)
    return qq_fig
Esempio n. 30
0
def norm(tensor, order=2, axis=None):
    """Computes the l-`order` norm of tensor

    Parameters
    ----------
    tensor : ndarray
    order : int
    axis : int or tuple

    Returns
    -------
    float or tensor
        If `axis` is provided returns a tensor.
    """
    # handle difference in default axis notation
    if axis == ():
        axis = None

    if order == 'inf':
        res = cp.max(cp.abs(tensor), axis=axis)
    elif order == 1:
        res = cp.sum(cp.abs(tensor), axis=axis)
    elif order == 2:
        res = cp.sqrt(cp.sum(tensor**2, axis=axis))
    else:
        res = cp.sum(cp.abs(tensor)**order, axis=axis)**(1/order)

    if res.shape == ():
        return to_numpy(res)
    return res
Esempio n. 31
0
def rand_noise_write(_filename: str, _seconds: int, _framerate: int = 44100) -> None:
    """Generate a 16-bit mono WAV file containing random noise & write it to a file"""
    _data = xp.random.uniform(-1, 1, _framerate * _seconds)
    _scaled = xp.int16(_data / xp.max(xp.abs(_data)) * 32767)
    _wav_data: bytes = bytes(_scaled)
    with wave.open(_filename, mode=r'wb') as _file:
        _file.setparams((1, 2, _framerate, _scaled.shape[0], r'NONE', r'not compressed'))  # pylint: disable=E1101,E1136
        _file.writeframes(_wav_data)  # pylint: disable=E1101
Esempio n. 32
0
def bincount(x, weights=None, minlength=None):
    """Count number of occurrences of each value in array of non-negative ints.

    Args:
        x (cupy.ndarray): Input array.
        weights (cupy.ndarray): Weights array which has the same shape as
            ``x``.
        minlength (int): A minimum number of bins for the output array.

    Returns:
        cupy.ndarray: The result of binning the input array. The length of
            output is equal to ``max(cupy.max(x) + 1, minlength)``.

    .. seealso:: :func:`numpy.bincount`

    """
    if x.ndim > 1:
        raise ValueError('object too deep for desired array')
    if x.ndim < 1:
        raise ValueError('object of too small depth for desired array')
    if x.dtype.kind == 'f':
        raise TypeError('x must be int array')
    if (x < 0).any():
        raise ValueError('The first argument of bincount must be non-negative')
    if weights is not None and x.shape != weights.shape:
        raise ValueError('The weights and list don\'t have the same length.')
    if minlength is not None:
        minlength = int(minlength)
        if minlength <= 0:
            raise ValueError('minlength must be positive')

    size = int(cupy.max(x)) + 1
    if minlength is not None:
        size = max(size, minlength)

    if weights is None:
        # atomicAdd for int64 is not provided
        b = cupy.zeros((size,), dtype=cupy.int32)
        cupy.ElementwiseKernel(
            'S x', 'raw U bin',
            'atomicAdd(&bin[x], 1)',
            'bincount_kernel'
        )(x, b)
        b = b.astype(numpy.intp)
    else:
        # atomicAdd for float64 is not provided
        b = cupy.zeros((size,), dtype=cupy.float32)
        cupy.ElementwiseKernel(
            'S x, T w', 'raw U bin',
            'atomicAdd(&bin[x], w)',
            'bincount_with_weight_kernel'
        )(x, weights, b)
        b = b.astype(cupy.float64)

    return b
Esempio n. 33
0
def rand_noise_wav(_seconds: int, _framerate: int = 44100) -> dict:
    """Generate a 16-bit mono WAV file containing random noise & return the data"""
    _data = xp.random.uniform(-1, 1, _framerate * _seconds)
    _scaled = xp.int16(_data / xp.max(xp.abs(_data)) * 32767)
    _out: dict = {
        r'num_frames': _scaled.shape[0],  # pylint: disable=E1136
        r'frame_rate': _framerate,
        r'num_channels': 1,
        r'sample_width': 2,
        r'data': bytes(_scaled)
    }
    return _out
Esempio n. 34
0
    def forward(self, inputs):
        e1 = array.as_mat(inputs[0])
        #print 'e1.shape',
        #print e1.shape
        e2 = array.as_mat(inputs[1])
        #print 'e2.shape',
        #print e2.shape
        W = inputs[2]
        #print 'W.shape',
        #print W.shape

        #modified algorythm
        y = e1 + e2 - e2.sum(1).reshape(len(e2), 1) / len(e2[0])
        #print 'y.dtype',
        #print y.dtype
        print 'cupy.max(e1) = ',
        print cupy.max(e1)
        print 'cupy.min(e1) = ',
        print cupy.min(e1)
        print 'cupy.max(e2) = ',
        print cupy.max(e2)
        print 'cupy.min(e2) = ',
        print cupy.min(e2)
        print 'cupy.max(y) = ',
        print cupy.max(y)
        print 'cupy.min(y) = ',
        print cupy.min(y)
        #sum_e1e2.astype(dtype=e1.dtype, copy=False)
        
        #print 'y.shape',  
        #print y.shape
        #print 'e2.sum(1).reshape(len(e2), 1).shape',
        #print e2.sum(1).reshape(len(e2), 1).shape

        
        '''
        xp = cuda.get_array_module(*inputs)
        if xp is numpy:
            y = numpy.einsum('ij,ik,jkl->il', e1, e2, W)
        else:
            i_len, j_len = e1.shape
            k_len = e2.shape[1]
            # 'ij,ik->ijk'
            e1e2 = e1[:, :, None] * e2[:, None, :]
            # ijk->i[jk]
            e1e2 = e1e2.reshape(i_len, j_len * k_len)
            # jkl->[jk]l
            W_mat = W.reshape(-1, W.shape[2])
            # 'i[jk],[jk]l->il'
            y = e1e2.dot(W_mat)
       
        if len(inputs) == 6:
            V1, V2, b = inputs[3:]
            y += e1.dot(V1)
            y += e2.dot(V2)
            y += b
        '''
        #print 'y.shape',
        #print y.shape
        return y,
Esempio n. 35
0
	def forward(self, state, action, reward, new_state, is_terminal):
		q = self.get_q(Variable(state))
		q_target = self.get_target_q(Variable(new_state))

		max_target_q = cp.max(q_target.data, axis=1)

		target = cp.copy(q.data)

		for i in xrange(target.shape[0]):
			curr_action = int(action[i])
			if is_terminal[i]:
				target[i, curr_action] = reward[i]
			else:
				target[i, curr_action] = reward[i] + self.gamma * max_target_q[i]
		
		loss = F.mean_squared_error(Variable(target), q)
		return loss, 0.0 #cp.mean(q.data[:, action[i]])
Esempio n. 36
0
    def backward(self, inputs, grad_outputs):
        e1 = array.as_mat(inputs[0])
        e2 = array.as_mat(inputs[1])
        W = inputs[2]
        gy = grad_outputs[0]
        print 'cupy.max(gy) = ',
        print cupy.max(gy)
        print 'cupy.min(gy) = ',
        print cupy.min(gy)
        #print 'backward'
        #print 'gy.shape',
        #print gy.shape
        '''
        xp = cuda.get_array_module(*inputs)
        if xp is numpy:
            gW = numpy.einsum('ij,ik,il->jkl', e1, e2, gy)
            ge1 = numpy.einsum('ik,jkl,il->ij', e2, W, gy)
            ge2 = numpy.einsum('ij,jkl,il->ik', e1, W, gy)
        else:
            kern = cuda.reduce('T in0, T in1, T in2', 'T out',
                               'in0 * in1 * in2', 'a + b', 'out = a', 0,
                               'bilinear_product')

            e1_b = e1[:, :, None, None]  # ij
            e2_b = e2[:, None, :, None]  # ik
            gy_b = gy[:, None, None, :]  # il
            W_b = W[None, :, :, :]  # jkl

            gW = kern(e1_b, e2_b, gy_b, axis=0)  # 'ij,ik,il->jkl'
            ge1 = kern(e2_b, W_b, gy_b, axis=(2, 3))  # 'ik,jkl,il->ij'
            ge2 = kern(e1_b, W_b, gy_b, axis=(1, 3))  # 'ij,jkl,il->ik'
        '''
        #ge1_ext = e1*gy.astype(dtype=gy.dtype, copy=False) #Hadamard product
        #print 'ge1_ext.shape',
        #print ge1_ext.shape
        #ge1 = cupy.sum(ge1_ext, axis=1).astype(dtype=gy.dtype, copy=False)
        #print 'ge1.shape',
        #print ge1.shape

        ge1 = cupy.sum(gy, axis=1).reshape(len(gy), 1).astype(dtype=gy.dtype, copy=False)
        print 'cupy.max(ge1) = ',
        print cupy.max(ge1)
        print 'cupy.min(ge1) = ',
        print cupy.min(ge1)

        gy_sum = cupy.sum(gy, axis=1).reshape(len(gy), 1).astype(dtype=gy.dtype, copy=False)
        #print 'gy_sum.shape',
        #print gy_sum.shape
        gy_tile = cupy.tile(gy_sum, len(gy[0])).astype(dtype=gy.dtype, copy=False)
        #print 'gy_tile.shape',
        #print gy_tile.shape
        #print 'gy.shape',
        #print gy.shape
        #print 'gy_tile.shape',
        #print gy_tile.shape
        #print 'gy_tile / len(gy[0]).dtype',
        #print (gy_tile / len(gy[0])).dtype
        #ge_tmp1 = gy_tile / len(gy[0])
        #ge_tmp2 = gy - gy_tile

        ge2 = (gy - gy_tile / len(gy[0])).astype(dtype=gy.dtype, copy=False)
        #print 'ge2.shape',
        #print ge2.shape
        print 'cupy.max(ge2) = ',
        print cupy.max(ge2)
        print 'cupy.min(ge2) = ',
        print cupy.min(ge2)
  
        gW = cupy.zeros(len(e1[0])*len(e2[0])*len(e2[0])).reshape(len(e1[0]), len(e2[0]), len(e2[0])).astype(dtype=gy.dtype, copy=False)
        #print 'gW.shape',
        #print gW.shape

        ret = ge1.reshape(inputs[0].shape), ge2.reshape(inputs[1].shape), gW
        if len(inputs) == 6:
            V1, V2, b = inputs[3:]
            gV1 = e1.T.dot(gy)
            gV2 = e2.T.dot(gy)
            gb = gy.sum(0)
            ge1 += gy.dot(V1.T)
            ge2 += gy.dot(V2.T)
            ret += gV1, gV2, gb
        #print 'len(ret)',
        #print len(ret)
        #print 'ret[0].shape',
        #print ret[0].shape
        #print 'ret[1].shape',
        #print ret[1].shape
        #print 'ret[2].shape',
        #print ret[2].shape

        return ret
Esempio n. 37
0
import cProfile
import StringIO
import pstats
import skimage.io
import skimage.color
import cupy
import numpy

if __name__ == '__main__':
    pr = cProfile.Profile()
    filename = '/home/dzhoshkun/data/sample-images/1920x1080.jpg'
    img_orig = skimage.io.imread(filename)
    img_lab = skimage.color.rgb2lab(img_orig)
    d_img_a = cupy.array(img_lab[:, :, 1])
    pr.enable()
    img_a_idx = numpy.argsort(img_lab[:, :, 1].flatten())
    d_img_a_idx = cupy.argsort(d_img_a.flatten())
    pr.disable()
    s = StringIO.StringIO()
    sortby = 'tottime'
    ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
    ps.print_stats()
    print(s.getvalue())
    print(cupy.max(d_img_a_idx),
          img_orig.shape[0] * img_orig.shape[1])
    print(d_img_a_idx.shape)