Пример #1
0
 def minimize(self):
     
     # self.create_3d_mask()
     
     p= cupy.array(self.gradient(0*self.inputImage))
     self.iterationNumber += 10
     for ind in tqdm(range(0, self.iterationNumber),desc = "tvmin"):
         if self.verbose:
             print("Itertion: ",ind)
         if ind < 10:
             lamb = 0.5
         else:
             lamb = self.lamb
             
         midP = self.divergence(p) - self.inputImage/lamb
         psi=cupy.array(self.gradient(midP))
         r = self.getSquareSum(psi)
         p = (p + self.to*psi)/(1 + self.to*r)
         self.resultImage = (self.inputImage - self.divergence(p)*lamb)
         self.resultImage = self.positive_constrain(self.resultImage,self.dF_3D_2)
         if ind == 9 :
             n_3D = cupy.asnumpy(self.resultImage).astype(float)
             otsu_val = filters.threshold_otsu(n_3D)
             n_3D_bi = np.zeros_like(n_3D)
             n_3D_bi[n_3D >= otsu_val]= 1
             self.dn_3D_bi = cupy.asarray(n_3D_bi) 
         elif ind > 9:
             self.resultImage[cupy.equal(self.dn_3D_bi , 0)] = 0
Пример #2
0
def test_preprocess():
    expected_tokens, expected_masks, expected_metadata = get_expected_preprocess(
    )
    actual_tokens, actual_masks, actual_metadata = cyparse.preprocess(
        input_logs)
    assert actual_tokens.equal(expected_tokens)
    assert actual_masks.equal(expected_masks)
    assert cupy.equal(actual_metadata, expected_metadata).all()
Пример #3
0
def equal(x1: Array, x2: Array, /) -> Array:
    """
    Array API compatible wrapper for :py:func:`np.equal <numpy.equal>`.

    See its docstring for more information.
    """
    # Call result type here just to raise on disallowed type combinations
    _result_type(x1.dtype, x2.dtype)
    x1, x2 = Array._normalize_two_args(x1, x2)
    return Array._new(np.equal(x1._array, x2._array))
Пример #4
0
def test_accuracy(W1, b1, W2, b2, W3, b3, images, labels):
    nums = labels.shape[1]
    z1 = np.matmul(W1, images) + b1
    a1 = common.relu(z1)
    z2 = np.matmul(W2, a1) + b2
    a2 = common.relu(z2)
    z3 = np.matmul(W3, a2) + b3
    cost = common.cross_entropy_with_softmax(labels, z3) / nums
    pred = np.argmax(labels, axis=0)
    label = np.argmax(z3, axis=0)
    return (np.sum(np.equal(pred, label)) / nums, cost)
Пример #5
0
def test_accuracy(W1, W2, W3, images, labels):
    nums = labels.shape[1]
    z1 = np.matmul(W1, images)
    a1 = para_func.relu(z1)
    z2 = np.matmul(W2, a1)
    a2 = para_func.relu(z2)
    z3 = np.matmul(W3, a2)
    # print("output shape", z3.shape, labels.shape)
    cost = para_func.cross_entropy_with_softmax(labels, z3) / nums
    pred = np.argmax(labels, axis=0)
    label = np.argmax(z3, axis=0)
    return (100.0 * np.sum(np.equal(pred, label)) / nums, cost)
def fetch_neighborhood(user, tabular_np, k):
    user_vector, user_indeces = fetch_user_vector(user, tabular_np)
    data_vector = cupy.take(tabular_np, user_indeces,
                            axis=1).astype(cupy.int64)
    neighbors_data = cupy.zeros(k, dtype=cupy.float64)
    neighbors_indeces = cupy.zeros(k, dtype=int)
    for i in range(data_vector.shape[0]):
        if i != user:
            pearson = evaluate_chromosome(data_vector[i][0], user_vector)
            # check if the given vectors have the same non-zero indeces
            if cupy.equal(
                    cupy.where(user_vector == 0)[0].shape,
                    cupy.where(data_vector[i][0] == 0)[0].shape):
                if cupy.equal(
                        cupy.where(user_vector == 0)[0],
                        cupy.where(data_vector[i][0] == 0)[0]):
                    # adjust pearson coefficient to minimum for we do not want that vector to be chosen
                    pearson = -2
                    print(
                        'Warning [2]: Same indeces are evaluated [at fetch neighborhood]'
                    )
            # sort pearson correlation coefficients
            for a in range(k):
                for b in range(k):
                    if neighbors_data[a] < neighbors_data[b]:
                        tmp = neighbors_data[b]
                        neighbors_data[b] = neighbors_data[a]
                        neighbors_data[a] = tmp
                        tmp = neighbors_indeces[b]
                        neighbors_indeces[b] = neighbors_indeces[a]
                        neighbors_indeces[a] = tmp
            # check if greater pearson coefficient was found
            for j in range(k):
                if neighbors_data[j] < pearson:
                    neighbors_data[j] = pearson
                    neighbors_indeces[j] = i
                    break
    return neighbors_data, neighbors_indeces
Пример #7
0
def gpu_resize(dPhi, dAmp, src_x, src_y, nx, ny):
    ratio_x = nx/src_x
    ratio_y = ny/src_y
    # print(ratio_x , ratio_y)
    dPhi[cupy.isnan(dPhi)] = 0
    dPhi[cupy.isinf(dPhi)] = 0
    dAmp[cupy.isnan(dAmp)] = 1
    dAmp[cupy.isinf(dAmp)] = 1
    dAmp[cupy.equal(dAmp,0.0)] = 0.01;
    dAmp = cupy.absolute(dAmp)
    dPhi = cupyx.scipy.ndimage.zoom(dPhi, (ratio_y,ratio_x))
    dAmp = cupyx.scipy.ndimage.zoom(dAmp, (ratio_y,ratio_x))
    dField = cupy.log(dAmp) + 1j*(dPhi)
    return dField
Пример #8
0
    def criterion(self, y_gt: np.array, y_pred: np.array) -> float:
        '''
        critertion for KNN Model, return the accuracy for predict result
        :param y_gt: ground truth in numpy array
        :param y_pred: predict result in numpy array
        :return: accuracy in float
        '''
        assert y_gt.shape == y_pred.shape

        total = y_gt.shape[0]

        correct = np.equal(y_gt, y_pred).sum()

        return correct / total
def disposeRepeated(ex_mat):
    #get rid of all repeated source sink pairs in ex_mat (information about them will be kept in new_ind array)
    
    index_XM = cp.sum(cp.equal(ex_mat[:, None, :], ex_mat[None]), axis=2) == 2
    indices = cp.where(index_XM)
    ind = (indices[0] > indices[1])
    indices = [indices[0][ind], indices[1][ind]]
    i = cp.ones(len(ex_mat), dtype='i4')
    indices = cp.unique(indices[0])
    i[indices] = 0
    i= i.astype(bool)
    ex_mat = ex_mat[i]
    
    return ex_mat
Пример #10
0
 def isDifferent(self, other):
     # Checking whether the input is a vector or not
     if not isinstance(other, VectorCupy):
         raise TypeError("Provided input vector not a %s!" % self.whoami)
     if not self._check_same_device(other):
         raise ValueError('Provided input has to live in the same device')
     # Using Hash table for python2 and numpy built-in function array_equal otherwise
     if version_info[0] == 2:
         # First make both array buffers read-only
         self.arr.flags.writeable = False
         other.arr.flags.writeable = False
         chcksum1 = hash(self.getNdArray().data)
         chcksum2 = hash(other.getNdArray().data)
         # Remake array buffers writable
         self.arr.flags.writeable = True
         other.arr.flags.writeable = True
         isDiff = (chcksum1 != chcksum2)
     else:
         isDiff = (not cp.equal(self.getNdArray(),
                                other.getNdArray()).all())
     return isDiff
Пример #11
0
z = squared_diff(x, y)
print(z)
z = squared_diff(x, 5)
print(z)

# call vector_add kernel
print('test vector_add kernel')
x = cp.random.random_sample(5, dtype=np.float32)
print(x)
y = cp.random.random_sample(5, dtype=np.float32)
print(y)
z = vector_add(x, y)
print(z)
w = x + y
print(w)
print(cp.equal(w, z))

# call mat_add kernel
print('test mat_add kernel')
x = cp.random.random_sample((2, 3), dtype=np.float32)
# test broadcast op
# x = cp.random.random_sample((2, 1), dtype=np.float32)
print(x)
y = cp.random.random_sample((2, 3), dtype=np.float32)
print(y)
z = mat_add(x, y)
print(z)
w = x + y
print(w)
print(cp.equal(w, z))
Пример #12
0
def test___array__():
    a = ones((2, 3), dtype=int16)
    assert cp.asarray(a) is a._array
    b = cp.asarray(a, dtype=cp.float64)
    assert cp.all(cp.equal(b, cp.ones((2, 3), dtype=cp.float64)))
    assert b.dtype == cp.float64
Пример #13
0
def accuracy(a_hat, a_true):
    result = cp.equal(a_hat, a_true)
    right = cp.count_nonzero(result)
    return right / a_hat.shape[1]
def getNextPrediction(fileJac: str, measuring_electrodes: np.ndarray, voltages: np.ndarray, 
              num_returned: int=10, n_el: int=20, n_per_el: int=3, n_pix: int=64, pert: float=0.5, 
              p_influence: float=-10., p_rec: float=10., p: float=0.2, lamb:float=0.1) -> np.ndarray:
    # extract const permittivity jacobian and voltage (& other)
    file = h5.File(fileJac, 'r')

    meas = file['meas'][()]
    new_ind = file['new_ind'][()]
    p = file['p'][()]
    t = file['t'][()]
    file.close()
    # initialise const permitivity and el_pos variables
    perm = np.ones(t.shape[0], dtype=np.float32)
    el_pos = np.arange(n_el * n_per_el).astype(np.int16)
    mesh_obj = {'element': t,
        'node':    p,
        'perm':    perm}
    # list all possible active/measuring electrode permutations of this measurement
    meas = cp.array(meas)
    # find their indices in the already calculated const. permitivity Jacobian (CPJ)
    measuring_electrodes = cp.array(measuring_electrodes)
    measurements_0 = cp.amin(measuring_electrodes[:, :2], axis=1)
    measurements_1 = cp.amax(measuring_electrodes[:, :2], axis=1)
    measurements_2 = cp.amin(measuring_electrodes[:, 2:], axis=1)
    measurements_3 = cp.amax(measuring_electrodes[:, 2:], axis=1)
    measuring_electrodes = cp.empty((len(measuring_electrodes), 4))
    measuring_electrodes[:, 0] = measurements_0
    measuring_electrodes[:, 1] = measurements_1
    measuring_electrodes[:, 2] = measurements_2
    measuring_electrodes[:, 3] = measurements_3
    index = (cp.sum(cp.equal(measuring_electrodes[:, None, :], meas[None, :, :]), axis=2) == 4)
    index = cp.where(index)
    #print(index)
    ind = cp.unique(index[1])
    #print(ind)
    i = cp.asnumpy(ind)
    j = index[0]
    mask = np.zeros(len(meas), dtype=int)
    mask[i] = 1
    mask = mask.astype(bool)
    # take a slice of Jacobian, voltage readings and B matrix (the one corresponding to the performed measurements)
    file = h5.File(fileJac, 'r')
    jac = file['jac'][mask, :][()]
    v = file['v'][mask][()]
    b = file['b'][mask, :][()]
    file.close()
    # put them in the form desired by the GREIT function
    pde_result = train.namedtuple("pde_result", ['jac', 'v', 'b_matrix'])
    f = pde_result(jac=jac,
           v=v,
           b_matrix=b)
    
    # now we can use the real voltage readings and the GREIT algorithm to reconstruct
    greit = train.greit.GREIT(mesh_obj, el_pos, f=f, ex_mat=(meas[index[1], :2]), step=None)
    greit.setup(p=p, lamb=lamb, n=n_pix)
    h_mat = greit.H
    reconstruction = greit.solve(voltages, f.v).reshape(n_pix, n_pix)
    # fix_electrodes_multiple is in meshing.py
    _, el_coords = train.fix_electrodes_multiple(centre=None, edgeX=0.1, edgeY=0.1, a=2, b=2, ppl=n_el, el_width=0.02, num_per_el=3)
    # find the distances between each existing electrode pair and the pixels lying on the liine that connects them
    pixel_indices, voltage_all_possible = measopt.find_all_distances(reconstruction, h_mat, el_coords, n_el, cutoff=0.8)
    # call function get_total_map that generates the influence map, the gradient map and the log-reconstruction
    total_map, grad_mat, rec_log = np.abs(measopt.get_total_map(reconstruction, voltages, h_mat, pert=pert, p_influence=p_influence, p_rec=p_rec))
    # get the indices of the total map along the lines connecting each possible electrode pair
    total_maps_along_lines = total_map[None] * pixel_indices
    # find how close each connecting line passes to the boundary of an anomaly (where gradient supposed to be higher)
    proximity_to_boundary = np.sum(total_maps_along_lines, axis=(1, 2)) / np.sum(pixel_indices, axis=(1, 2))
    # rate the possible src-sink pairs by their proximity to existing anomalies
    proposed_ex_line = voltage_all_possible[np.argsort(proximity_to_boundary)[::-1]][:num_returned]

    number_of_voltages = 10
    # generate the voltage measuring electrodes for this current driver pair
    proposed_voltage_pairs = measopt.findNextVoltagePair(proposed_ex_line[0], fileJac, total_map, number_of_voltages, 0, npix=n_pix, cutoff=0.97)
    return proposed_ex_line, proposed_voltage_pairs, reconstruction, total_map
Пример #15
0
 def __eq__(self, other):
     return cupy.equal(self, other)
Пример #16
0
def estimate_JEMMIG_cupy(z_mean,
                         z_stddev,
                         y,
                         x_weights=None,
                         num_samples=10000,
                         seed=None,
                         batch_size=10,
                         eps=1e-8,
                         gpu=0):
    """
    We simply need to care about log q(zi,yk) - log q(zi) - log q(yk) =
        log sum_x^(n) (q(zi | x^(n)) q(yk | x^(n)))
    Note that, yk^(m) for a particular x^(n) can be 0.

    :param z_mean: (N, z_dim), mean of q(z|x^(n))
    :param z_stddev: (N, z_dim), stddev of q(z|x^(n))
    :param y: (N, num_factors), an 2D int array contains values of factors
    :param x_weights: A vector of length N
    :param num_samples: Number of samples
    :param seed: Numpy random state used for sampling
    :param batch_size: Batch size during computation
    :param eps: Batch size during computation
    :param gpu: ID of the gpu
    :return:
    """
    print("num_samples: {}".format(num_samples))
    print("batch_size: {}".format(batch_size))

    # Processing z
    # ------------------------------------ #
    assert z_mean.shape == z_stddev.shape and len(z_mean.shape) == 2, \
        "z_mean.shape={} and z_stddev={}".format(z_mean.shape, z_stddev.shape)

    num_x, z_dim = z_mean.shape[0], z_mean.shape[1]
    print("num_x: {}".format(num_x))
    print("z_dim: {}".format(z_dim))

    if x_weights is None:
        x_weights = np.full([num_x], fill_value=1.0 / num_x, dtype=np.float32)

    assert 1.0 - 1e-5 <= np.sum(
        x_weights
    ) <= 1.0 + 1e-5, "'weights' should sum to 1. Found {:.5f}!".format(
        np.sum(x_weights))

    rs = np.random.RandomState(seed)
    rand_ids = rs.choice(num_x, size=num_samples, replace=True, p=x_weights)

    # (S, z_dim)
    noise = rs.randn(num_samples, z_dim)
    z_samples = z_mean[rand_ids] + noise * z_stddev[rand_ids]
    # ------------------------------------ #

    # Processing y
    # ------------------------------------ #
    print("Processing ground truth factors!")

    assert len(y.shape) == 2 and ('int' in y.dtype.name) and len(y) == num_x, \
        "y.shape={} and y.dtype={}!".format(y.shape, y.dtype.name)

    y_dim = y.shape[1]

    y_unique = []
    y_prob = []
    H_yk = []

    for k in range(y_dim):
        yk_unique, yk_count = np.unique(y[:, k], return_counts=True)
        y_unique.append(yk_unique)

        yk_prob = yk_count / (1.0 * np.sum(yk_count))
        y_prob.append(yk_prob)

        H_yk.append(-np.sum(yk_prob * np.log(np.maximum(yk_prob, eps))))

    H_yk = np.asarray(H_yk, dtype=np.float32)

    print("Done!")
    # ------------------------------------ #

    import cupy as cp
    with cp.cuda.Device(gpu):
        # (S, z_dim)
        z_samples = cp.asarray(z_samples)
        # (N, 1, z_dim)
        z_mean = cp.expand_dims(cp.asarray(z_mean), axis=1)
        # (N, 1, z_dim)
        z_stddev = cp.expand_dims(cp.asarray(z_stddev), axis=1)
        # (N, y_dim)
        y = cp.asarray(y)

        print("z_samples.shape: {}".format(z_samples.shape))
        print("z_mean.shape: {}".format(z_mean.shape))
        print("z_stddev.shape: {}".format(z_stddev.shape))

        x_weights = cp.asarray(x_weights)
        print("x_weights.shape: {}".format(x_weights.shape))

        # (z_dim,)
        H_zi_cond_x = cp.asarray(np.zeros(z_dim, dtype=np.float32))
        # (z_dim,)
        H_zi = cp.asarray(np.zeros(z_dim, dtype=np.float32))
        # (z_dim, y_dim)
        H_zi_yk = cp.asarray(np.zeros([z_dim, y_dim], dtype=np.float32))

        progress_bar = tqdm(total=num_samples)
        count = 0

        while count < num_samples:
            b = min(batch_size, num_samples - count)

            # (S_batch, z_dim)
            q_zi_cond_x_batch = normal_density_cupy(
                z_samples[count:count + b],
                z_mean[rand_ids[count:count + b], 0, :],
                z_stddev[rand_ids[count:count + b], 0, :],
                eps=eps,
                gpu=gpu)

            # (1, S_batch, z_dim)
            z_batch = cp.expand_dims(z_samples[count:count + b], axis=0)

            # (N, S_batch, z_dim)
            q_zi_cond_x_all_batch = normal_density_cupy(z_batch,
                                                        z_mean,
                                                        z_stddev,
                                                        eps=eps,
                                                        gpu=gpu)

            # Computing H_zi_cond_x
            # --------------------------------- #
            H_zi_cond_x += -cp.sum(cp.log(cp.maximum(q_zi_cond_x_batch, eps)),
                                   axis=0)
            # --------------------------------- #

            # Computing H_zi
            # --------------------------------- #
            # Sum (p(x^(n)) * q(zi|x^(n)))
            # (N, 1, 1)  * (N, S_batch, z_dim) then sum axis 0 => (S_batch, z_dim)
            q_zi_batch = cp.sum(
                cp.expand_dims(cp.expand_dims(x_weights, axis=-1), axis=-1) *
                q_zi_cond_x_all_batch,
                axis=0)

            # Sum (S_batch, z_dim) over S_batch => (z_dim,)
            H_zi += -cp.sum(cp.log(cp.maximum(q_zi_batch, eps)), axis=0)
            # --------------------------------- #

            for k in range(y_dim):
                # (N, 1) == (1, S_batch) => (N, S_batch)
                q_yk_cond_x_all_batch = cp.equal(
                    cp.expand_dims(y[:, k], axis=-1),
                    cp.expand_dims(y[rand_ids[count:count + b], k], axis=0))

                # print("q_yk_cond_x_all_batch[:10, :10]: {}".format(q_yk_cond_x_all_batch[:10, :10]))

                # (N, S_batch, 1)
                q_yk_cond_x_all_batch = cp.expand_dims(cp.asarray(
                    q_yk_cond_x_all_batch, dtype=cp.float32),
                                                       axis=-1)

                # (N, S_batch, z_dim) * (N, S_batch, 1) then sum over axis=0 => (S_batch, z_dim)
                q_zi_yk_batch = cp.sum(
                    cp.expand_dims(cp.expand_dims(x_weights, axis=-1), axis=-1)
                    * q_zi_cond_x_all_batch * q_yk_cond_x_all_batch,
                    axis=0)

                # Sum (S_batch, z_dim) over S_batch => (z_dim, )
                H_zi_yk[:,
                        k] += -cp.sum(cp.log(cp.maximum(q_zi_yk_batch, eps)),
                                      axis=0)

            count += b
            progress_bar.update(b)

        progress_bar.close()

        # Convert entropies back to numpy
        H_zi_cond_x = cp.asnumpy(H_zi_cond_x)
        H_zi = cp.asnumpy(H_zi)
        H_zi_yk = cp.asnumpy(H_zi_yk)

    # (z_dim, )
    H_zi_cond_x = H_zi_cond_x / (1.0 * num_samples)
    H_zi = H_zi / (1.0 * num_samples)
    H_zi_yk = H_zi_yk / (1.0 * num_samples)

    print("H_yk: {}".format(H_yk))
    print("\nH_zi: {}".format(H_zi))
    print("\nH_zi_yk:\n{}".format(H_zi_yk))

    MI_zi_yk = np.expand_dims(H_zi, axis=-1) + np.expand_dims(H_yk,
                                                              axis=0) - H_zi_yk

    ids_sorted = []
    MI_zi_yk_sorted = []
    H_zi_yk_sorted = []

    RMIG_yk = []
    RMIG_norm_yk = []
    JEMMIG_yk = []

    for k in range(y_dim):
        # Compute RMIG and JEMMI
        ids_sorted_at_k = np.argsort(MI_zi_yk[:, k], axis=0)[::-1]
        MI_zi_yk_sorted_at_k = np.take_along_axis(MI_zi_yk[:, k],
                                                  ids_sorted_at_k,
                                                  axis=0)
        H_zi_yk_sorted_at_k = np.take_along_axis(H_zi_yk[:, k],
                                                 ids_sorted_at_k,
                                                 axis=0)

        RMIG_yk_at_k = MI_zi_yk_sorted_at_k[0] - MI_zi_yk_sorted_at_k[1]
        RMIG_norm_yk_at_k = np.divide(RMIG_yk_at_k, H_yk[k])
        JEMMIG_yk_at_k = H_zi_yk_sorted_at_k[0] - MI_zi_yk_sorted_at_k[
            0] + MI_zi_yk_sorted_at_k[1]

        ids_sorted.append(ids_sorted_at_k)
        MI_zi_yk_sorted.append(MI_zi_yk_sorted_at_k)
        H_zi_yk_sorted.append(H_zi_yk_sorted_at_k)

        RMIG_yk.append(RMIG_yk_at_k)
        RMIG_norm_yk.append(RMIG_norm_yk_at_k)
        JEMMIG_yk.append(JEMMIG_yk_at_k)

    ids_sorted = np.stack(ids_sorted, axis=-1)
    MI_zi_yk_sorted = np.stack(MI_zi_yk_sorted, axis=-1)
    H_zi_yk_sorted = np.stack(H_zi_yk_sorted, axis=-1)

    RMIG_yk = np.asarray(RMIG_yk, dtype=np.float32)
    RMIG_norm_yk = np.asarray(RMIG_norm_yk, dtype=np.float32)
    RMIG_norm = np.mean(RMIG_norm_yk, axis=0)

    JEMMIG_yk = np.stack(JEMMIG_yk, axis=-1)

    results = {
        "H_yk": H_yk,
        "H_zi_cond_x": H_zi_cond_x,
        "H_zi": H_zi,
        "H_zi_yk": H_zi_yk,
        "H_zi_yk_sorted": H_zi_yk_sorted,
        "MI_zi_yk": MI_zi_yk,
        "MI_zi_yk_sorted": MI_zi_yk_sorted,
        "id_sorted": ids_sorted,
        "RMIG_yk": RMIG_yk,
        "RMIG_norm_yk": RMIG_norm_yk,
        "RMIG_norm": RMIG_norm,
        "JEMMIG_yk": JEMMIG_yk,
    }

    return results
Пример #17
0
    h = x.dot(w1)
    h_relu = cp.maximum(h, 0)  # using ReLU as activate function
    y_pred = h_relu.dot(w2)

    # Compute and print loss
    loss = cp.square(y_pred - y).sum()  # loss function
    loss_col.append(loss)
    print("第%d次訓練\n誤差:%f\n輸出:" % (t, loss))
    print(y_pred)
    # Backprop to compute gradients of w1 and w2 with respect to loss
    grad_y_pred = 2.0 * (y_pred - y)  # the last layer's error
    grad_w2 = h_relu.T.dot(grad_y_pred)
    grad_h_relu = grad_y_pred.dot(w2.T)  # the second laye's error
    grad_h = grad_h_relu.copy()
    grad_h[h < 0] = 0  # the derivate of ReLU
    grad_w1 = x.T.dot(grad_h)

    # Update weights
    w1 -= learning_rate * grad_w1
    w2 -= learning_rate * grad_w2
    result = cp.equal(cp.argmax(y, 1), cp.argmax(y_pred, 1))
    for i in result:
        if i:
            k += 1
    t = t + 1
    print("答對筆數/總筆數: %d/%d" % (k, N))
    print()
    if t % 100 == 0:
        plt.plot(loss_col)
        plt.show()
Пример #18
0
def accuracy(y_pred, y_true):
    if len(y_true[0]) == 1:
        return cp.asnumpy(cp.mean(cp.equal(cp.where(y_pred<0.5, 0, 1), y_true).astype(cp.float32)))
    else:
        return cp.asnumpy(cp.mean(cp.equal(cp.argmax(y_pred, 1), cp.argmax(y_true, 1)).astype(cp.float32)))
def simulateMeasurements(fileJac, anomaly=0, measurements=None, v_meas=None, n_el=20, n_per_el=3, n_pix=64, a=2.):
	# extract const permittivity jacobian and voltage (& other)
	file = h5.File(fileJac, 'r')

	meas = file['meas'][()]
	new_ind = file['new_ind'][()]
	p = file['p'][()]
	t = file['t'][()]
	file.close()
	# initialise const permitivity and el_pos variables
	perm = np.ones(t.shape[0], dtype=np.float32)
	el_pos = np.arange(n_el * n_per_el).astype(np.int16)
	mesh_obj = {'element': t,
				'node':	p,
				'perm':	perm}

	#for testing
	if measurements is None:
		el_dist = np.random.randint(1, 20)
		ex_mat = (cp.concatenate((cp.arange(20)[None], (cp.arange(20) + el_dist)[None])) % 20).T
		#print(ex_mat.shape)
		fem_all = Forward(mesh_obj, el_pos)
		measurements = fem_all.voltMeter(ex_mat)
		#ex_mat = mesurements[1]
		measurements = cp.concatenate((measurements[1], measurements[0]), axis=1)
		#print(measurements.shape)
	# list all possible active/measuring electrode permutations of this measurement
	meas = cp.array(meas)
	# find their indices in the already calculated const. permitivity Jacobian (CPJ)
	measurements = cp.array(measurements)
	measurements_0 = cp.amin(measurements[:, :2], axis=1)
	measurements_1 = cp.amax(measurements[:, :2], axis=1)
	measurements_2 = cp.amin(measurements[:, 2:], axis=1)
	measurements_3 = cp.amax(measurements[:, 2:], axis=1)
	measurements = cp.empty((len(measurements), 4))
	measurements[:, 0] = measurements_0
	measurements[:, 1] = measurements_1
	measurements[:, 2] = measurements_2
	measurements[:, 3] = measurements_3
	index = (cp.sum(cp.equal(measurements[:, None, :], meas[None, :, :]), axis=2) == 4)
	index = cp.where(index)
	ind = cp.unique(index[1])
	i = cp.asnumpy(ind)
	j = index[0]
	mask = np.zeros(len(meas), dtype=int)
	mask[i] = 1
	mask = mask.astype(bool)
	# take a slice of Jacobian, voltage readings and B matrix
	file = h5.File(fileJac, 'r')
	jac = file['jac'][mask, :][()]
	v = file['v'][mask][()]
	b = file['b'][mask, :][()]
	file.close()
	pde_result = train.namedtuple("pde_result", ['jac', 'v', 'b_matrix'])
	f = pde_result(jac=jac,
				   v=v,
				   b_matrix=b)
	
	# simulate voltage readings if not given
	if v_meas is None:
		if np.isscalar(anomaly):
			print("generating new anomaly")
			anomaly = train.generate_anoms(a, a)
		true = train.generate_examplary_output(a, int(n_pix), anomaly)
		mesh_new = train.set_perm(mesh_obj, anomaly=anomaly, background=1)
		fem = FEM(mesh_obj, el_pos, n_el)
		new_ind = cp.array(new_ind)
		f2, raw = fem.solve_eit(volt_mat_all=meas[ind, 2:], new_ind=new_ind[ind], ex_mat=meas[ind, :2], parser=None, perm=mesh_new['perm'].astype('f8'))
		v_meas = f2.v
		'''
		#plot
		fig = plt.figure(3)
		x, y = p[:, 0], p[:, 1]
		ax1 = fig.add_subplot(111)
		# draw equi-potential lines
		print(raw.shape)
		raw = cp.asnumpy(raw[5]).ravel()
		vf = np.linspace(min(raw), max(raw), 32)
		ax1.tricontour(x, y, t, raw, vf, cmap=plt.cm.viridis)
		# draw mesh structure
		ax1.tripcolor(x, y, t, np.real(perm),
					  edgecolors='k', shading='flat', alpha=0.5,
					  cmap=plt.cm.Greys)

		ax1.plot(x[el_pos], y[el_pos], 'ro')
		for i, e in enumerate(el_pos):
			ax1.text(x[e], y[e], str(i+1), size=12)
		ax1.set_title('Equipotential Lines of Uniform Permittivity')
		# clean up
		ax1.set_aspect('equal')
		ax1.set_ylim([-1.2, 1.2])
		ax1.set_xlim([-1.2, 1.2])
		fig.set_size_inches(6, 6)
		#plt.show()'''
	elif len(measurements) == len(v_meas):
		measurements = np.array(measurements)
		v_meas = np.array(v_meas[j[:len(ind)]])
	else:
		raise ValueError('Sizes of arrays do not match (have to have voltage reading for each measurement). If you don\'t have readings, leave empty for simulation.')
	print('Number of measurements:', len(v_meas), len(f.v))

	# now we can use the real voltage readings and the GREIT algorithm to reconstruct
	greit = train.greit.GREIT(mesh_obj, el_pos, f=f, ex_mat=(meas[index[1], :2]), step=None)
	greit.setup(p=0.2, lamb=0.01, n=n_pix)
	h_mat = greit.H
	reconstruction = greit.solve(v_meas, f.v).reshape(n_pix, n_pix)
	
	# optional: see reconstruction
	'''
	plt.figure(1)
	im1 = plt.imshow(reconstruction, cmap=plt.cm.viridis, origin='lower', extent=[-1, 1, -1, 1])
	plt.title("Reconstruction")
	plt.colorbar(im1)
	plt.figure(2)
	im2 = plt.imshow(true, cmap=plt.cm.viridis, origin='lower', extent=[-1, 1, -1, 1])
	plt.colorbar(im2)
	plt.title("True Image")
	plt.show()
	'''
	return reconstruction, h_mat, v_meas, f.v, true, len(v_meas)