示例#1
0
def softmax_loss_vectorized(theta, X, y, reg):
    """
  Softmax loss function, vectorized version.
  Inputs and outputs are the same as softmax_loss_naive.
  """
  # Initialize the loss and gradient to zero.

    J = 0.0
    grad = np.zeros_like(theta)
    m, dim = X.shape

    #############################################################################
    # TODO: Compute the softmax loss and its gradient using no explicit loops.  #
    # Store the loss in J and the gradient in grad. If you are not careful      #
    # here, it is easy to run into numeric instability. Don't forget the        #
    # regularization term!                                                      #
    #############################################################################
    
    preds_col = (X@theta).T
    exps_adj = np.exp(preds_col - np.amax(preds_col, axis=0))

    ind = np.zeros_like(preds_col)
    np.put_along_axis(ind, np.atleast_2d(y), 1, axis=0)

    J = -1/m * np.sum(np.log(np.take_along_axis(exps_adj, np.atleast_2d(y), axis=0)/np.sum(exps_adj, axis=0)))

    grad = -1/m * ((ind - exps_adj/np.sum(exps_adj, axis=0)) @ X).T
    grad += reg/m * theta

    #############################################################################
    #                          END OF YOUR CODE                                 #
    #############################################################################

    return J, grad
示例#2
0
def optimize_T_fast(transition_matrix, values, eps_T, num_states, num_actions,
                    xv, yv):
    T = transition_matrix.copy()
    T_prev = np.ones_like(T)
    diff = np.zeros([num_states, num_actions])

    iters = 0
    opt = np.argmax(values)

    while np.any(np.abs(T - T_prev) > FLOAT_TOLERANCE):
        T_prev = T.copy()

        worst = np.argmin(np.where(T > FLOAT_TOLERANCE,
                                   np.reshape(values, [1, 1, -1]), np.inf),
                          axis=2)
        worst_i = (yv, xv, worst)

        change = np.where(T[worst_i] < (eps_T - diff) / 2, T[worst_i],
                          (eps_T - diff) / 2)
        T[:, :, opt] += change
        # np.put(T, worst_i, T[worst_i] - change)
        np.put_along_axis(T,
                          worst.reshape([num_states, num_actions, 1]),
                          (T[worst_i] - change).reshape(
                              [num_states, num_actions, 1]),
                          axis=2)

        diff = np.linalg.norm(T - transition_matrix, 1, axis=2)

        iters += 1

    return T
def error_ambiguity(i, ensemble_proba, target):
    '''
    Compute the error for the individual classifier according to the ambiguity decomposition. I am fairly sure that this implementation is correct, however, the paper is not super clear on what they do from an algorithmic point of view. From what I can tell is, that the authors compute the ambiguity scores for each classifier only once and then "greedily" pick the best K models. 

    Note: The paper only considers binary classification problems and specifically focuses on the logistic loss function. Luckily, Hastie et al. proposed a multi-class boosting algorithm which uses a multi class variation of the (binary) logistic loss. Both loss functions are equal for 2 classes and thus we implement the multi-class version here. For more details see the reference.

    Reference:
        Jiang, Z., Liu, H., Fu, B., & Wu, Z. (2017). Generalized ambiguity decompositions for classification with applications in active learning and unsupervised ensemble pruning. 31st AAAI Conference on Artificial Intelligence, AAAI 2017, 2073–2079.
        
        Hastie, T., Rosset, S., Zhu, J., & Zou, H. (2009). Multi-class AdaBoost. Statistics and Its Interface, 2(3), 349–360. https://doi.org/10.4310/sii.2009.v2.n3.a8
    '''
    iproba = ensemble_proba[i, :, :]
    all_proba = ensemble_proba.mean(axis=0)
    sqdiff = (iproba - all_proba)**2

    C = iproba.shape[1]

    A = 1.0 / C**2 * np.exp(-1.0 / C * iproba)
    B = 1.0 / C**2 * (1.0 / (C - 1))**2 * np.exp(1.0 / C * 1.0 /
                                                 (C - 1) * iproba)

    bitmask = np.zeros(A.shape)
    # bitmask[:,target] = 1.0
    np.put_along_axis(bitmask, target[:, None], 1.0, 1)
    return (bitmask * A + (1.0 - bitmask) * B).sum() + sqdiff.sum()
示例#4
0
    def _CMI(self, x, y, i, j, cond):
        '''
        compute CMI for a list of conditions
        '''
        N = len(cond)
        d = x.shape[1]
        Nt = y.shape[1]
        b2 = np.zeros([N, d + Nt], dtype=np.float32)
        for n, c in enumerate(cond):
            if c:
                np.put_along_axis(b2[n], np.array(c), 1., 0)
        m2 = b2.copy()
        m2[:, i] = 1.
        b1 = b2.copy()
        b1[:, j] = 1.
        m1 = b1.copy()
        m1[:, i] = 1.
        m = np.concatenate([m1, m2], axis=0)  # [N*2, d]
        b = np.concatenate([b1, b2], axis=0)  # [N*2, d]
        # run
        B = self.hps.batch_size
        num_batches = x.shape[0] // B
        cmi_est = []
        for n in range(num_batches):
            xx = x[n * B:(n + 1) * B]
            yy = y[n * B:(n + 1) * B]
            cmi = self._batch_CMI(xx, yy, b, m)
            cmi_est.append(cmi)
        cmi_est = np.concatenate(cmi_est, axis=0)
        cmi = np.mean(cmi_est, axis=0)

        return cmi
def mutual_selection(score_mat):
    """
    Return a {0,1} matrix, the element is 1 if and only if it's maximum along both row and column
    
    Args: np.array()
        score_mat:  [B,N,N]
    Return:
        mutuals:    [B,N,N] 
    """
    score_mat = to_array(score_mat)
    if (score_mat.ndim == 2):
        score_mat = score_mat[None, :, :]

    mutuals = np.zeros_like(score_mat)
    for i in range(score_mat.shape[0]):  # loop through the batch
        c_mat = score_mat[i]
        flag_row = np.zeros_like(c_mat)
        flag_column = np.zeros_like(c_mat)

        max_along_row = np.argmax(c_mat, 1)[:, None]
        max_along_column = np.argmax(c_mat, 0)[None, :]
        np.put_along_axis(flag_row, max_along_row, 1, 1)
        np.put_along_axis(flag_column, max_along_column, 1, 0)
        mutuals[i] = (flag_row.astype(np.bool)) & (flag_column.astype(np.bool))
    return mutuals.astype(np.bool)
示例#6
0
def softmax_with_cross_entropy(predictions, target_index):
    """
    Computes softmax and cross-entropy loss for model predictions,
    including the gradient

    Arguments:
      predictions, np array, shape is either (N) or (batch_size, N) -
        classifier output
      target_index: np array of int, shape is (1) or (batch_size) -
        index of the true class for given sample(s)

    Returns:
      loss, single value - cross-entropy loss
      dprediction, np array same shape as predictions - gradient of loss by predictions
    """
    y_true = np.zeros(predictions.shape)
    if predictions.ndim == 1:
        y_true[target_index] = 1
    else:
        target_index_vect = target_index.reshape(-1, 1)
        np.put_along_axis(y_true, target_index_vect, 1, axis=1)

    probs = softmax(predictions)
    loss = cross_entropy_loss(probs, target_index)

    dprediction = (probs - y_true)
    if predictions.ndim != 1:
        dprediction /= predictions.shape[0]

    return loss, dprediction
示例#7
0
    def replay(self, batch_size):
        minibatch = random.sample(self.memory, min(len(self.memory),
                                                   batch_size))

        batch_states = np.array([i[0] for i in minibatch]).reshape(
            (len(minibatch), 84, 84, 4))
        batch_actions = np.array([i[1] for i in minibatch]).reshape(
            (len(minibatch), 1, 1))
        batch_rewards = np.array([i[2] for i in minibatch]).reshape(
            (len(minibatch), 1, 1))
        batch_next_states = np.array([i[3] for i in minibatch]).reshape(
            (len(minibatch), 84, 84, 4))
        batch_dones = np.array([i[4] for i in minibatch]).reshape(
            (len(minibatch), 1, 1))
        random_states = self.model.get_nprandomstates()
        y_target = self.model.feedforward(batch_states)
        target = (
            batch_rewards + (np.invert(batch_dones) * (self.gamma * np.amax(
                self.model.feedforward(batch_next_states), 1, keepdims=True)))
        ).reshape((len(minibatch), 1, 1))
        np.put_along_axis(y_target, batch_actions, target, 1)

        self.model.sgd_fit(batch_states,
                           y_target,
                           batch_size=batch_size,
                           epochs=1,
                           train_pct=1.0,
                           shuffle_inputs=False,
                           random_states=random_states)

        if self.epsilon > self.epsilon_min:
            self.epsilon *= self.epsilon_decay
示例#8
0
    def evaluate_array(self, us, vs):
        result = np.empty((len(us), 3))
        v_to_u = defaultdict(list)
        v_to_i = defaultdict(list)
        for i, (u, v) in enumerate(zip(us, vs)):
            v_to_u[v].append(u)
            v_to_i[v].append(i)

        # here we rely on fact that in Python 3.7+ dicts are ordered.
        all_vs = np.array(list(v_to_u.keys()))
        v_spline_points = np.array(
            [spline.evaluate_array(all_vs) for spline in self.v_splines])

        for v_idx, (v, us_by_v) in enumerate(v_to_u.items()):
            is_by_v = v_to_i[v]
            spline_vertices = []
            for spline_idx, spline in enumerate(self.v_splines):
                point = v_spline_points[spline_idx, v_idx]
                #point = spline.evaluate(v)
                spline_vertices.append(point)
            u_spline = self.get_u_spline(v, spline_vertices)
            points = u_spline.evaluate_array(np.array(us_by_v))
            idxs = np.array(is_by_v)[np.newaxis].T
            np.put_along_axis(result, idxs, points, axis=0)
        return result
    def analyse(self, a):

        g = np.copy(a)
        
    #set blue and red channels to 0
        g[:, :, 0] = 0
        g[:, :, 2] = 0
        
    
        GBthreshold = 50
    
        GB = np.where(g>GBthreshold,255,0)
    
        GBB = np.where(g>GBthreshold,1,0)

        GBB = GBB[:,:,1]
        GB2 = GB[:,:,1]


   # GREEN
        sumG2 = np.sum(GBB, axis=0)
        sumsizeG = sumG2.size
        xaxis = np.argmax(sumG2, axis=0)
        print("x: ")
        print(xaxis)


        sumG1 = np.sum(GBB, axis=1)
        sumsizeG1 = sumG1.size
        yaxis = np.argmax(sumG1, axis=0)
        print("y: ")
        print(yaxis)


        GreenFound = GB2
        dimG1 = 320
        dimG2 = 192
        indices1 = np.ones((dimG2,dimG1),dtype = np.int) * yaxis
        indices2 = np.ones((dimG2,dimG1),dtype = np.int) * xaxis
        np.put_along_axis(GreenFound,indices1,255,axis=0)
        np.put_along_axis(GreenFound,indices2,255,axis=1)
        
        
        GreenFound = GreenFound.astype(np.uint8)
        cv2.imshow('image',GreenFound)
        cv2.waitKey(1)
        cv2.imwrite('GreenFound.png',GreenFound)
        
        
        anglepwm = translate(xaxis, 0, 320, 65, 0)
        
        #if dot is gone then steer to middle
        if anglepwm == 65:
            anglepwm = 33
        
        SetAngle(anglepwm)
        
        print("Servo value: ")
        print(anglepwm)
        print("")
示例#10
0
文件: move.py 项目: beckerrh/simfempy
def move_nodes(mesh, beta, second=False):
    d, nn, nc = mesh.dimension, mesh.nnodes, mesh.ncells
    assert beta.shape == (nc, d)
    lambdas = np.eye(d + 1)
    md = MoveData(nn, d, second=second)
    for i in range(mesh.ncells):
        betacoef = _coef_beta_in_simplex(i, mesh, beta[i])
        for ipl in range(d + 1):
            delta, mu = _move_in_simplex_opt(lambdas[ipl], betacoef)
            if delta > 0:
                ip = mesh.simplices[i, ipl]
                md.cells[ip] = i
                md.deltas[ip] = delta
                md.mus[ip] = mu
    ind = np.argmin(md.mus, axis=1)
    # print(f"{md.mus.shape=} {ind.shape=}")
    np.put_along_axis(md.mus, ind[:, np.newaxis], 0, axis=1)
    if second:
        for ip in range(nn):
            ic = md.cells[ip]
            if ic == md.imax: continue
            indf = mesh.facesOfCells[ic, md.mus[ip] == 0]
            ic2 = mesh.cellsOfFaces[indf, 0][0]
            if ic2 == ic: ic2 = mesh.cellsOfFaces[indf, 1][0]
            if ic2 < 0: continue
            # print(f"{ic2=}")
            mu2 = _coef_mu_in_neighbor(mesh, ic2, ic, md.mus[ip])
            betacoef = _coef_beta_in_simplex(ic2, mesh, beta[ic])
            delta, mu = _move_in_simplex_opt(mu2, betacoef)
            # print(f"{delta=} {betacoef=} {md.mus[ip]} {mu=}")
            if delta > 0.1 * md.deltas[ip] / np.sqrt(2):
                md.cells2[ip] = ic2
                md.deltas2[ip] = delta
                md.mus2[ip] = mu
    return md
示例#11
0
def normalize_activation(arr, threshold=0.5, mono=False):
    """

    :param mono:
    :param arr: (nb_instruments, (batch=1), nb_steps=1, length, 88, 2)
    :param threshold:
    :return: the same array but only with one and zeros for the activation part ([:, :, :, 0])
    """
    if mono:
        axis = len(arr.shape) - 2
        argmax = np.argmax(arr, axis=axis)
        new_arr = np.zeros(arr.shape)
        idx = list(np.ogrid[[
            slice(arr.shape[ax]) for ax in range(arr.ndim) if ax != axis
        ]])
        idx.insert(axis, argmax)
        new_arr[tuple(idx)] = 1
    else:
        activations = np.take(arr, axis=-1, indices=0)
        np.place(activations, threshold <= activations, 1)
        np.place(activations, activations < threshold, 0)
        np.put_along_axis(arr=arr,
                          indices=np.zeros(tuple(1 for i in arr.shape),
                                           dtype=int),
                          values=np.expand_dims(activations, axis=-1),
                          axis=-1)
        new_arr = arr
    return new_arr
示例#12
0
文件: base.py 项目: srsohn/mtsgi
    def observe(
        self,
        action: types.NestedArray,
        next_timestep: dm_env.TimeStep,
    ):
        action = np.expand_dims(action, axis=-1)
        next_rewards = np.expand_dims(next_timestep.reward, axis=-1)
        is_first = np.expand_dims(next_timestep.first(), axis=-1)  # for mask
        avg_rewards = np.take_along_axis(self._avg_rewards, action, axis=-1)
        counts = np.take_along_axis(self._counts, action, axis=-1)

        # Compute & update avg rewards.
        update_values = 1 / counts * (next_rewards - avg_rewards)
        next_avg_rewards = avg_rewards + np.where(
            is_first, 0, update_values)  # skip first timestep.
        np.put_along_axis(self._avg_rewards,
                          action,
                          values=next_avg_rewards,
                          axis=-1)

        # Update counts.
        np.put_along_axis(self._counts,
                          action,
                          values=counts + (1 - is_first),
                          axis=-1)
        self._total_counts += (1 - is_first).squeeze()
示例#13
0
def train_eval_bert(params, df, train, test, evaluate=True):
    train_dataset, val_dataset, MAX_LEN = create_train_val(
        df['content'], df['labels'], train, test)

    print("training bert with these params")
    print(params)
    model = init_model('distilbert-base-uncased', len(targets), params)
    model.fit(train_dataset.shuffle(100).batch(params['batch_size']),
              epochs=params['num_epochs'],
              batch_size=params['batch_size'],
              class_weight=params['class_weight'])

    preds = model.predict(val_dataset.batch(1)).logits
    y_pred = tf.keras.activations.sigmoid(tf.convert_to_tensor(preds)).numpy()
    ai = np.expand_dims(np.argmax(y_pred, axis=1), axis=1)
    maximums = np.maximum(y_pred.max(1), 0.51)
    np.put_along_axis(y_pred, ai, maximums.reshape(ai.shape), axis=1)

    if evaluate:
        eps = evaluate_preds(df['relevant'][test], y_pred[:, 0])
        for key, value in params.items():
            eps[key] = value
        return eps, y_pred
    else:
        return y_pred
示例#14
0
 def fit(self, X, y: np.uint8):
     # label validation
     y = check_array(y, dtype='uint8', ensure_2d=False)
     if len(y.shape) == 1:
         y = y.reshape(-1, 1)
     # get classes
     self.classes_ = list(range(0, y.max() + 1))
     # Check that X and y have correct shape
     X, y = check_X_y(X, y, y_numeric=True, multi_output=True)
     # create one hot encoded matrix
     y_encoded = np.zeros((len(y), len(self.classes_)))
     # put on correct columns 1 value
     np.put_along_axis(y_encoded, y, 1, axis=1)
     # add bias to X
     X = np.hstack((np.ones((X.shape[0], 1)), X))
     # generate random matrix if it does not exist or it is a different size
     rng = np.random.default_rng(seed=self.seed)
     self.V_ = rng.normal(scale=0.2, size=(X.shape[1], self.neurons))
     # calculate activator for mid-layer
     #H = np.tanh(X.dot(self.V_))
     # add bias to H
     H = np.append(np.ones((X.shape[0], 1)), np.tanh(X.dot(self.V_)), axis=1)
     # if n <= N
     if self.neurons <= X.shape[0]:
         #  w = (h_t*h + gamma*I)^-1 * h_t*y
         self.weights_ = np.linalg.inv(H.transpose().dot(H) + self.gamma * np.identity(H.shape[1])).dot(
             H.transpose().dot(y_encoded))
     else:
         # n > N
         #  w = h_t(h*h_t + gamma*I)^-1 * y
         self.weights_ = H.transpose().dot(
             np.linalg.inv(H.dot(H.transpose()) + self.gamma * np.identity(H.shape[1]))).dot(y_encoded)
     return self
示例#15
0
 def cost(outputs: np.ndarray, label: np.ndarray) -> np.ndarray:
     """
     Cross entropy cost function, consumes 2d array of outputs and 1d array of expected values.
     """
     y = np.zeros(outputs.shape).reshape(-1, 10)
     np.put_along_axis(y, label.reshape(-1, 1), 1, axis=1)
     return -np.nan_to_num(np.sum(y * np.log(outputs) + (1 - y) * np.log(1 - outputs), axis=1))
示例#16
0
 def prime(outputs: np.ndarray, label: np.ndarray) -> np.ndarray:
     """
     Derivative of cross entropy cost function, consumes 2d array of outputs and 1d array of expected values.
     """
     y = np.zeros(outputs.shape).reshape(-1, 10)
     np.put_along_axis(y, label.reshape(-1, 1), 1, axis=1)
     return -(y - outputs) / (outputs * (1 - outputs))
示例#17
0
 def cost(outputs: np.ndarray, label: np.ndarray) -> np.ndarray:
     """
     Quadratic cost function, consumes 2d array of outputs and 1d array of expected values
     """
     y = np.zeros(outputs.shape).reshape(-1, 10)  # Generate zeros array of same shape as output
     np.put_along_axis(y, label.reshape(-1, 1), 1, axis=1)  # Place a 1 at the label index of each array of y
     return np.mean((outputs - y) ** 2, axis=1) / 2
def train_age_regressor ():# train_age_regressor()

    # Load data
    X_tr_raw = (np.load("fashion_mnist_train_images.npy"))
    y_tr_raw = np.load("fashion_mnist_train_labels.npy")
    X_te_raw = (np.load("fashion_mnist_test_images.npy"))
    y_te_raw = np.load("fashion_mnist_test_labels.npy")

    no_data = X_tr_raw.shape[0]

    brightness_value = 256
    X_te = X_te_raw/brightness_value
    X_tr = X_tr_raw/brightness_value
    
    y_tr = np.zeros([X_tr_raw.shape[0], no_of_classes])
    y_tr_raw = (np.atleast_2d(y_tr_raw).T)
    np.put_along_axis(y_tr, y_tr_raw, 1, axis=1)
    
    y_te = np.zeros([X_te_raw.shape[0], no_of_classes])
    y_te_raw = (np.atleast_2d(y_te_raw).T)
    np.put_along_axis(y_te, y_te_raw, 1, axis=1)

    # X_show = (X_tr_raw[40].reshape((28, 28)))
    # plt.imshow(X_show, interpolation='nearest', cmap='gray', vmin=0, vmax=255)
    # plt.show()

    w,b = stoch_grad_regression(X_tr, y_tr)
    testing_age = test_data(X_te, y_te, w, b)
    print("################################")
    print("FINAL TESTING ERROR:", testing_age)
    print("################################")
示例#19
0
文件: dcgan.py 项目: kennuu/GAN
    def generate_sample(self, epoch):
        c = 7
        r = 7
        z = np.random.uniform(-1, 1, (self.batch_size, self.z_shape))
        z[:, :10] = 0.
        y = np.random.randint(10, size=self.batch_size)
        np.put_along_axis(z, y[..., np.newaxis], 1, axis=1)
        imgs = self.sess.run(self.gen_out, feed_dict={self.phZ:z})
        imgs = imgs*0.5 + 0.5
        result = np.argmax(imgs[:, :, -1, 0], axis=1)
        self.matching = np.sum(y == result)

        # scale between 0, 1
        fig, axs = plt.subplots(c, r)
        fig.suptitle(f"Matching indices: {self.matching}")
        cnt = 0
        for i in range(c):
            for j in range(r):
                axs[i, j].imshow(imgs[cnt, :, :, 0], cmap="gray")
                axs[i, j].axis('off')
                # if discs[cnt]:
                #     col = 'g'
                # else:
                #     col = 'r'
                axs[i, j].set_title(str(y[cnt]), size=7, pad=0.5) #, color = col)
                axs[i, j].text(30, 13.5, str(result[cnt]), size=7,
                               verticalalignment='center')
                cnt += 1
        fig.savefig("samples/targets_swapped_" + str(epoch).zfill(len(str(self.epochs))) + ".png")
        plt.close()
示例#20
0
    def normal_array(self, us, vs):
        h = 0.001
        result = np.empty((len(us), 3))
        v_to_u = defaultdict(list)
        v_to_i = defaultdict(list)
        for i, (u, v) in enumerate(zip(us, vs)):
            v_to_u[v].append(u)
            v_to_i[v].append(i)
        for v, us_by_v in v_to_u.items():
            us_by_v = np.array(us_by_v)
            is_by_v = v_to_i[v]
            spline_vertices = []
            spline_vertices_h = []
            for v_spline in self.v_splines:
                v_min, v_max = v_spline.get_u_bounds()
                vx = (v_max - v_min) * v + v_min
                point = v_spline.evaluate(vx)
                point_h = v_spline.evaluate(vx + h)
                spline_vertices.append(point)
                spline_vertices_h.append(point_h)
            u_spline = self.get_u_spline(v, spline_vertices)
            u_spline_h = self.get_u_spline(v+h, spline_vertices_h)
            points = u_spline.evaluate_array(us_by_v)
            points_v_h = u_spline_h.evaluate_array(us_by_v)
            points_u_h = u_spline.evaluate_array(us_by_v + h)
            dvs = (points_v_h - points) / h
            dus = (points_u_h - points) / h
            normals = np.cross(dus, dvs)
            norms = np.linalg.norm(normals, axis=1, keepdims=True)
            normals = normals / norms

            idxs = np.array(is_by_v)[np.newaxis].T
            np.put_along_axis(result, idxs, normals, axis=0)
        return result
示例#21
0
文件: nddops.py 项目: b1quint/DRAGONS
    def minmax(data, mask=None, variance=None, nlow=0, nhigh=0):
        # minmax rejection, following IRAF rules when pixels are rejected
        # We flag the pixels to be rejected as DQ.bad_pixel. For any pixels
        # to be flagged this way, there have to be good (or nonlin/saturated)
        # pixels around so they will get combined before the DQhierarchy
        # looks at DQ.bad_pixel
        num_img = data.shape[0]
        if nlow + nhigh >= num_img:
            raise ValueError("Only {} images but nlow={} and nhigh={}".format(
                num_img, nlow, nhigh))
        if mask is None:
            nlo = int(nlow + 0.001)
            nhi = data.shape[0] - int(nhigh + 0.001)
            # Sorts data and apply this to the mask
            arg = np.argsort(data, axis=0)
            mask = np.zeros_like(data, dtype=bool)
            np.put_along_axis(mask, arg[:nlo], True, axis=0)
            np.put_along_axis(mask, arg[nhi:], True, axis=0)
        else:
            # Because I'm sorting, I'll put large dummy values in a numpy array
            # Have to keep all values if all values are masked!
            # Sorts variance and mask with data
            arg = np.argsort(np.where(mask == DQ.max, np.inf, data), axis=0)

            # IRAF imcombine maths
            num_good = NDStacker._num_good(mask == DQ.max)
            nlo = (num_good * nlow / num_img + 0.001).astype(int)
            nhi = num_good - (num_good * nhigh / num_img +
                              0.001).astype(int) - 1

            arg2 = np.argsort(arg, axis=0)
            mask[arg2 < nlo] = DQ.max
            mask[(arg2 > nhi) & (arg2 < num_good)] = DQ.max

        return data, mask, variance
示例#22
0
def change_channel(image, channel_id, percentage):
    """Modifies an output channel by multiplying it by a percentage."""
    mask = np.ones(image.shape, dtype=float)
    indices = channel_id * np.ones((image.shape[0], image.shape[1], 1),
                                   dtype=int)
    np.put_along_axis(mask, indices, percentage, axis=2)
    return np.clip((image * mask).astype(int), 0, 255)
示例#23
0
        def grad_fn_left(grad: Array) -> Array:
            adjoint = np.zeros_like(input.data)
            np.put_along_axis(adjoint, target_exp, -1, axis=1)
            adjoint += special.softmax(input.data, axis=1)
            adjoint /= np.prod(input.shape) / input.shape[1]

            return grad * adjoint
示例#24
0
    def _lle(self, idx_nn):
        nn = self.X_fit[idx_nn]  # (N, n_neighbors, n_features)
        Z = nn - np.expand_dims(self.X_fit, 1)  # (N, n_neighbors, n_features)
        G = np.matmul(Z,
                      np.transpose(Z,
                                   (0, 2, 1)))  # (N, n_neighbors, n_neighbors)
        one = np.ones((len(self.X_fit), self.n_neighbors, 1))
        G_ = np.linalg.pinv(G)
        a = np.matmul(G_, one)  # (N, n_neighbors, 1)
        b = np.matmul(np.matmul(np.transpose(one, [0, 2, 1]), G_),
                      one)  # (N, 1, 1)
        value = np.squeeze(a / b, 2)  # (N, n_neighbors, 1)

        W = np.zeros((len(self.X_fit), len(self.X_fit)))
        np.put_along_axis(W, idx_nn, value, 1)

        I = np.eye(len(self.X_fit))
        tmp = I - W
        M = np.matmul(tmp.T, tmp)
        evl, evc = sp.linalg.eigh(
            M,
            subset_by_index=(self.k_skip, self.n_components + self.k_skip - 1))
        idx = np.abs(evl).argsort()
        evl, evc = evl[idx], evc[:, idx]
        self.embeddings = evc[:, :self.n_components]
        return self.embeddings
示例#25
0
    def _fix_number_of_classes(
        self,
        n_classes_training: NDArray,
        y_proba: NDArray
    ) -> NDArray:
        """
        Fix shape of y_proba of validation set if number of classes
        of the training set used for cross-validation is different than
        number of classes of the original dataset y.

        Parameters
        ----------
        n_classes_training : NDArray
            Classes of the training set.
        y_proba : NDArray
            Probabilities of the validation set.

        Returns
        -------
        NDArray
            Probabilities with the right number of classes.
        """
        y_pred_full = np.zeros(
            shape=(len(y_proba), self.n_classes_)
        )
        y_index = np.tile(n_classes_training, (len(y_proba), 1))
        np.put_along_axis(
            y_pred_full,
            y_index,
            y_proba,
            axis=1
        )
        return y_pred_full
示例#26
0
def softmax_with_cross_entropy(predictions, target_index):
    """
    Computes softmax and cross-entropy loss for model predictions,
    including the gradient

    Arguments:
      predictions, np array, shape is either (N) or (batch_size, N) -
        classifier output
      target_index: np array of int, shape is (1) or (batch_size) -
        index of the true class for given sample(s)

    Returns:
      loss, single value - cross-entropy loss
      dprediction, np array same shape as predictions - gradient of predictions by loss value
    """
    # Softmax and cross-entropy loss
    probs = softmax(predictions)
    loss = cross_entropy_loss(probs, target_index)

    # Gradient
    dprediction = np.zeros(probs.shape)
    if isinstance(target_index, int):
        dprediction[target_index] = 1
        dprediction = probs - dprediction
    else:
        if target_index.ndim == 1:
            target_index = target_index[:, None]
        np.put_along_axis(dprediction, target_index, 1, axis=1)
        dprediction = (probs - dprediction) / dprediction.shape[0]

    return loss, dprediction
示例#27
0
    def gen() -> Generator[SignalDataFrame, None, None]:
        for i in range(n_samples):
            data = np.random.randint(0, modulator.n_channels, sample_len,
                                     np.int32)
            target_one_hot = np.zeros((sample_len, modulator.n_channels),
                                      dtype=np.float32)
            np.put_along_axis(target_one_hot,
                              np.expand_dims(data, 1),
                              1,
                              axis=1)

            wave = modulator.modulate(data)
            if add_noise is not None:
                wave = add_noise(wave)
            wave = np.expand_dims(wave, 1)
            x = wave

            if return_sg:
                spectrogram = modulator.wave_to_spectrogram(wave.ravel()).T
                spectrogram = np.expand_dims(spectrogram, 2)
                if normalize:
                    _norm_spectrogram(spectrogram)
                x = (wave, spectrogram) if return_wave else spectrogram

            if normalize:
                _norm_wave(wave)

            yield x, target_one_hot
示例#28
0
    def _get_ndarray(self, statuses, revcomp=False):
        """
        `statuses` is an N-array of 1s and 0s, denoting positives and negatives.
        Returns a NumPy array of one-hot encoded sequences parallel to
        `statuses` (i.e. wherever there is a 0, a negative sequence is returned,
        and wherever there is a 1, a positive sequence with a motif is
        returned). If `revcomp` is True, also include the reverse complement
        sequences concatenated at the end (i.e. the returned array will be
        twice as long as `statuses`).
        """
        onehot = np.empty((len(statuses), self.sequence_length, 4))
        pos_inds = np.where(statuses == 1)[0]
        neg_inds = np.where(statuses == 0)[0]

        if len(pos_inds):
            pos_onehot = self._get_one_hot_seqs(self.pos_seq_generator,
                                                len(pos_inds))
            np.put_along_axis(onehot,
                              pos_inds[:, None, None],
                              pos_onehot,
                              axis=0)

        if len(neg_inds):
            neg_onehot = self._get_one_hot_seqs(self.neg_seq_generator,
                                                len(neg_inds))
            np.put_along_axis(onehot,
                              neg_inds[:, None, None],
                              neg_onehot,
                              axis=0)

        if revcomp:
            rc = onehot[:, ::-1, ::-1]
            onehot = np.concatenate([onehot, rc])

        return onehot
示例#29
0
def filter_to_topp(top_p, dist):
    """
    Args:
        top_k (int):
        dist (torch.Tensor): (num_batch, -1) dimensioned torch tensor.
    Returns:
        torch.Tensor: (num_batch, -1) dimensioned torch tensor.
    """
    dist = np.copy(dist)

    batch_size = dist.shape[0]

    sorted_index = np.argsort(-dist)
    sorted_value = np.take_along_axis(dist, sorted_index, axis=-1)

    sorted_prob = softmax(sorted_value, axis=1)
    sorted_prob_cumsum = np.cumsum(sorted_prob, axis=-1)

    # shift right side
    # Algorithm
    # cumsum = [0.1, 0.3, 0.93, 0.1] and top_p = 0.9
    # cumsum > top_p -> [False, False, True,  True]
    # shift          -> [False, False, False, True]
    sorted_index_mask = np.concatenate([
        np.full((batch_size, 1), False), (sorted_prob_cumsum > top_p)[:, :-1]
    ],
                                       axis=-1)

    mask = np.full(dist.shape, False)
    np.put_along_axis(mask, sorted_index, sorted_index_mask, axis=-1)

    dist[mask] = -float("Inf")

    return dist
示例#30
0
文件: fourier.py 项目: bennosski/elph
def w2t_boson(I, beta, axis):
    Ishape = np.shape(I)
    dim = len(Ishape)

    N = 2 * (Ishape[axis] - 1)
    delta = beta / N
    w = np.fft.fftfreq(N, beta / (np.pi * 2 * N))
    theta = w * delta

    indices = expand(np.arange(N // 2 - 1, 0, -1), axis, dim)

    I = np.concatenate((I, np.take_along_axis(np.conj(I), indices, axis)),
                       axis)

    I = I / (delta * expand(W_boson(theta), axis, dim))

    shape = np.array(np.shape(I))
    shape[axis] = N + 1
    out = np.zeros(shape, dtype=complex)

    indices = expand(np.arange(N), axis, dim)
    np.put_along_axis(out, indices, np.fft.fft(I, axis=axis) / N, axis)
    indices = expand(np.arange(N, N + 1), axis, dim)
    np.put_along_axis(
        out, indices,
        np.take_along_axis(out, expand(np.arange(1), axis, dim), axis), axis)
    return out
示例#31
0
    def agregar(símismo, nuevos, edad=0, etapas=None):

        # Limpiar edades de cohortes
        símismo._edades[símismo._pobs == 0] = 0

        if etapas is None:
            rbn = slice(None)
            nuevos = símismo._proc_matr_datos(nuevos)
        else:
            rbn = símismo.rebanar(etapas)

        # Las edades y las poblaciones actuales de las etapas
        pobs = símismo._pobs[rbn]
        edades = símismo._edades[rbn]

        eje_coh = símismo.eje_coh()

        # Los índices de los días cuyos cohortes tienen la edad mínima. Si hay más que un día (cohorte) con la
        # edad mínima, tomará el primero.
        í_cohs = np.expand_dims(np.argmin(edades, axis=eje_coh), axis=eje_coh)

        # Las edades de los cohortes con las edades mínimas.
        eds_mín = np.take_along_axis(edades, í_cohs, axis=eje_coh)

        # Las poblaciones que corresponden a estas edades mínimas.
        pobs_coresp = np.take_along_axis(pobs, í_cohs, axis=eje_coh)

        # Dónde no hay población existente, reinicializamos la edad.
        eds_mín = np.where(pobs_coresp == 0, [0], eds_mín)

        # Calcular el peso de las edades existentes, según sus poblaciones existentes (para combinar con el nuevo
        # cohorte si hay que combinarlo con un cohorte existente).
        peso_ed_ya = np.divide(pobs_coresp, np.add(nuevos, pobs_coresp))
        peso_ed_ya[np.isnan(peso_ed_ya)] = 0

        # Los edades promedios. Si no había necesidad de combinar cohortes, será la población del nuevo cohorte.
        eds_prom = np.add(np.multiply(eds_mín, peso_ed_ya), np.multiply(edad, np.subtract(1, peso_ed_ya)))

        # Guardar las edades actualizadas en los índices apropiados
        np.put_along_axis(edades, í_cohs, eds_prom, axis=eje_coh)

        # Guardar las poblaciones actualizadas en los índices apropiados
        np.put_along_axis(pobs, í_cohs, nuevos + pobs_coresp, axis=eje_coh)

        símismo._pobs[rbn] = pobs
        símismo._edades[rbn] = edades