Ejemplo n.º 1
0
    def make_step(self, signals, dt, rng):
        if self.conv.dimensions > 2:
            # note: we raise the error here, rather than earlier, because
            # other backends might support different convolutions
            raise NotImplementedError("Convolution > 2D not supported")

        W = signals[self.W]
        X = signals[self.X]
        Y = signals[self.Y]
        pad = self.conv.padding.upper()
        stride = self.conv.strides

        X = X.reshape(self.conv.input_shape.shape)
        Y = Y.reshape(self.conv.output_shape.shape)

        if not self.conv.channels_last:
            X = np.moveaxis(X, 0, -1)
            Y = np.moveaxis(Y, 0, -1)

        if self.conv.dimensions == 1:
            # add extra dimension to make it a 2D convolution
            X = X[None, :, :]
            W = W[None, :, :, :]
            Y = Y[None, :, :]
            stride = (1,) + stride

        # add empty batch dimension
        X = X[None, ...]

        def step_conv():
            Y[...] += conv2d.conv2d(X, W, pad=pad, stride=stride)[0]

        return step_conv
Ejemplo n.º 2
0
def get_float_tensor_from_cntk_convolutional_weight_parameter(tensorParameter):
    """Returns an ELL.FloatTensor from a trainable parameter
       Note that ELL's ordering is row, column, channel.
       4D parameters (e.g. those that represent convolutional weights) are stacked vertically in the row dimension.
       CNTK has them in filter, channel, row, column order.
    """
    tensorShape = tensorParameter.shape
    tensorValue = tensorParameter.value

    if (len(tensorShape) == 4):
        orderedWeights = np.moveaxis(tensorValue, 1, -1)
        orderedWeights = orderedWeights.ravel().astype(np.float).reshape(
            tensorShape[0] * tensorShape[2], tensorShape[3], tensorShape[1])
    elif (len(tensorShape) == 3):
        orderedWeights = np.moveaxis(tensorValue, 0, -1)
        orderedWeights = orderedWeights.ravel().astype(np.float).reshape(
            tensorShape[1], tensorShape[2], tensorShape[0])
    elif (len(tensorShape) == 2):
        orderedWeights = np.moveaxis(tensorValue, 0, -1)
        orderedWeights = orderedWeights.ravel().astype(
            np.float).reshape(tensorShape[1], tensorShape[0], 1)
    else:
        orderedWeights = tensorValue.ravel().astype(
            np.float).reshape(1, 1, tensorValue.size)
    return ELL.FloatTensor(orderedWeights)
Ejemplo n.º 3
0
Archivo: numbers.py Proyecto: SKIRT/PTS
def weighed_arithmetic_mean_numpy(data, weights=None):

    """
    Calculate the weighted mean of an array/list using numpy
    """

    # Not weighted
    if weights is None: return arithmetic_mean_numpy(data)

    import numpy as np

    # Get the number of dimensions
    ndim_data = len(data.shape)
    ndim_weights = len(weights.shape)

    #weights = np.array(weights).flatten() / float(sum(weights))
    #return np.dot(np.array(data), weights)

    if ndim_weights > 1:

        weights = np.copy(weights)
        divisors = np.sum(weights, axis=-1)
        #norm_weights = weights /
        norm_weights = np.moveaxis(weights, -1, 0) # move last to first axis
        #print("1", norm_weights.shape)
        # Loop over
        for index in range(norm_weights.shape[0]): norm_weights[index] /= divisors
        #print(norm_weights.shape)
        norm_weights = np.moveaxis(norm_weights, 0, 1)
        #print("2", norm_weights.shape)

    else: norm_weights = weights / float(np.sum(weights))

    return np.dot(data, norm_weights)
Ejemplo n.º 4
0
    def calc(self, pars, elo, xlo, ylo, ehi, xhi, yhi):
        etrue_centers = self.true_energy.log_centers
        if self.use_psf:
            # Convolve the spatial model * exposure by the psf in etrue
            spatial = np.zeros((self.dim_Etrue, self.dim_x, self.dim_y))
            a = self.spatial_model.calc(pars[self._spatial_pars], self.xx_lo.ravel(), self.xx_hi.ravel(),
                                        self.yy_lo.ravel(), self.yy_hi.ravel()).reshape(self.xx_lo.shape)
            for ind_E in range(self.dim_Etrue):
                spatial[ind_E, :, :] = self._fftconvolve(a * self.exposure.data[ind_E, :, :],
                                                         self.psf.data[ind_E, :, :] /
                                                         (self.psf.data[ind_E, :, :].sum()), mode='same')
                # To avoid nan value for the true energy values asked by the user for which the PSF is not defined.
                # The interpolation gives nan when you are outside the range and when you sum over all the true energy bin to calculate the expected
                # number of counts in the reconstucted energy bin, you get nan whereas you just want the bin in true energy
                # for which the PSF is not defined to not count in the sum.
                spatial[np.isnan(spatial)] = 0
        else:
            spatial_2d = self.spatial_model.calc(pars[self._spatial_pars], self.xx_lo.ravel(), self.xx_hi.ravel(),
                                                 self.yy_lo.ravel(), self.yy_hi.ravel()).reshape(self.xx_lo.shape)
            spatial = np.tile(spatial_2d, (len(etrue_centers), 1, 1))
        # Calculate the spectral model in etrue
        spectral_1d = self.spectral_model.calc(pars[self._spectral_pars], etrue_centers)
        spectral = spectral_1d.reshape(len(etrue_centers), 1, 1) * np.ones_like(self.xx_lo)

        # Convolve by the energy resolution
        etrue_band = self.true_energy.bands
        for ireco in range(self.dim_Ereco):
            self.convolve_edisp[:, :, :, ireco] = np.moveaxis(spatial, 0, -1) * np.moveaxis(spectral, 0, -1) * \
                                                  self.edisp[:, ireco] * etrue_band
        # Integration in etrue
        model = np.moveaxis(np.sum(self.convolve_edisp, axis=2), -1, 0)
        if not self.select_region:
            return model.ravel()
        else:
            return model[self.index_selected_region].ravel()
Ejemplo n.º 5
0
def get_float_tensor_from_cntk_dense_weight_parameter(tensorParameter):
    """Returns an ELL.FloatTensor from a trainable parameter
       Note that ELL's ordering is row, column, channel.
       CNTK has them in channel, row, column, filter order.
       4D parameters are converted to ELL Tensor by stacking vertically in the row dimension.
    """
    tensorShape = tensorParameter.shape
    tensorValue = tensorParameter.value

    #orderedWeights = tensorValue
    if (len(tensorShape) == 4):
        orderedWeights = tensorValue
        orderedWeights = np.moveaxis(orderedWeights, 0, -1)
        orderedWeights = np.moveaxis(orderedWeights, 2, 0)
        orderedWeights = orderedWeights.ravel().astype(np.float).reshape(
            tensorShape[3] * tensorShape[1], tensorShape[2], tensorShape[0])
    elif (len(tensorShape) == 3):
        orderedWeights = np.moveaxis(tensorValue, 0, -1)
        orderedWeights = orderedWeights.ravel().astype(np.float).reshape(
            tensorShape[1], tensorShape[2], tensorShape[0])
    elif (len(tensorShape) == 2):
        orderedWeights = np.moveaxis(tensorValue, 0, -1)
        orderedWeights = orderedWeights.ravel().astype(
            np.float).reshape(tensorShape[1], 1, tensorShape[0])
    else:
        orderedWeights = tensorValue.ravel().astype(
            np.float).reshape(1, 1, tensorValue.size)

    return ELL.FloatTensor(orderedWeights)
Ejemplo n.º 6
0
    def test_exceptions(self):
        # test axis must be in bounds
        for ndim in [1, 2, 3]:
            a = np.ones((1,)*ndim)
            np.concatenate((a, a), axis=0)  # OK
            assert_raises(np.AxisError, np.concatenate, (a, a), axis=ndim)
            assert_raises(np.AxisError, np.concatenate, (a, a), axis=-(ndim + 1))

        # Scalars cannot be concatenated
        assert_raises(ValueError, concatenate, (0,))
        assert_raises(ValueError, concatenate, (np.array(0),))

        # test shapes must match except for concatenation axis
        a = np.ones((1, 2, 3))
        b = np.ones((2, 2, 3))
        axis = list(range(3))
        for i in range(3):
            np.concatenate((a, b), axis=axis[0])  # OK
            assert_raises(ValueError, np.concatenate, (a, b), axis=axis[1])
            assert_raises(ValueError, np.concatenate, (a, b), axis=axis[2])
            a = np.moveaxis(a, -1, 0)
            b = np.moveaxis(b, -1, 0)
            axis.append(axis.pop(0))

        # No arrays to concatenate raises ValueError
        assert_raises(ValueError, concatenate, ())
Ejemplo n.º 7
0
def coral_numpy(source, target):
    n_channels = source.shape[-1]

    source = np.moveaxis(source, -1, 0)  # HxWxC -> CxHxW
    target = np.moveaxis(target, -1, 0)  # HxWxC -> CxHxW

    source_flatten = source.reshape(n_channels, source.shape[1]*source.shape[2])
    target_flatten = target.reshape(n_channels, target.shape[1]*target.shape[2])

    source_flatten_mean = source_flatten.mean(axis=1, keepdims=True)
    source_flatten_std = source_flatten.std(axis=1, keepdims=True)
    source_flatten_norm = (source_flatten - source_flatten_mean) / source_flatten_std

    target_flatten_mean = target_flatten.mean(axis=1, keepdims=True)
    target_flatten_std = target_flatten.std(axis=1, keepdims=True)
    target_flatten_norm = (target_flatten - target_flatten_mean) / target_flatten_std

    source_flatten_cov_eye = source_flatten_norm.dot(source_flatten_norm.T) + np.eye(n_channels)
    target_flatten_cov_eye = target_flatten_norm.dot(target_flatten_norm.T) + np.eye(n_channels)

    source_flatten_norm_transfer = matSqrt_numpy(target_flatten_cov_eye).dot(np.linalg.inv(matSqrt_numpy(source_flatten_cov_eye))).dot(source_flatten_norm)
    source_flatten_transfer = source_flatten_norm_transfer * target_flatten_std + target_flatten_mean

    coraled = source_flatten_transfer.reshape(source.shape)
    coraled = np.moveaxis(coraled, 0, -1)  # CxHxW -> HxWxC

    return coraled
def test_pick():
    group = icosahedral.Pyritohedral()
    full = group.group

    from pycomplex.math import linalg
    N = 128
    points = np.moveaxis(np.indices((N, N)).astype(np.float), 0, -1) / (N - 1) * 2 - 1
    z = np.sqrt(np.clip(1 - linalg.dot(points, points), 0, 1))
    points = np.concatenate([points, z[..., None]], axis=-1)


    element_idx, sub_idx, quotient_idx, bary = group.pick(points.reshape(-1, 3))


    if False:
        col = bary
    else:
        col = np.array([
            sub_idx.astype(np.float) / sub_idx.max(),
            sub_idx * 0,
            quotient_idx.astype(np.float) / quotient_idx.max()
        ]).T


    plt.figure()
    img = np.flip(np.moveaxis(col.reshape(N, N, 3), 0, 1), axis=0)
    # img = (img * 255).astype(np.uint8)
    plt.imshow(img)

    plt.show()
Ejemplo n.º 9
0
def load_and_subsample(raw_img_path, substep, low_freq_percent):
    """
    Loads and subsamples an MR image in Analyze format

    Parameters
    ------------
    raw_img_path : str
        The path to the MR image
    substep : int
        The substep to use when subsampling image slices
    low_freq_percent : float
        The percentage of low frequency data to retain when subsampling slices

    Returns
    ------------
    tuple
        A triple containing the following ordered numpy arrays:

        1. The subsampled MR image (datatype `np.float32`)
        2. The k-space representation of the subsampled MR image (datatype `np.complex128`)
        3. The original MR image (datatype `np.float32`)
    """
    original_img = load_image_data(analyze_img_path=raw_img_path)
    subsampled_img, subsampled_k = subsample(
        analyze_img_data=original_img,
        substep=substep,
        low_freq_percent=low_freq_percent)

    original_img = np.moveaxis(original_img, -1, 0)
    subsampled_img = np.moveaxis(subsampled_img, -1, 0)
    subsampled_k = np.moveaxis(subsampled_k, -1, 0)

    return subsampled_img, subsampled_k, original_img
Ejemplo n.º 10
0
def load_data(dirp,nb_classes):

    # load the dataset as X_train and as a copy the X_val
    X_train = pickle.load( open( os.path.join(dirp,'X_train.pkl'), "rb" ) )  

    y_train = pickle.load( open(os.path.join(dirp,'y_train.pkl'), "rb" ) )
    X_val = pickle.load( open( os.path.join(dirp,'X_val.pkl'), "rb" ) )
    y_val = pickle.load( open( os.path.join(dirp,'y_val.pkl'), "rb" ) )
    if len(X_train.shape)>3:

        X_train=np.moveaxis(X_train,3,1)
        X_val=np.moveaxis(X_val,3,1)
    else:
         X_train = np.expand_dims(X_train,1)  
         X_val =np.expand_dims(X_val,1)
    print ('Xtrain :',X_train.shape)
    print 'X_train min max :',X_train.min(),X_train.max()

    # labels to categorical vectors
#    uniquelbls = np.unique(y_train)
#    nb_classes = int( uniquelbls.shape[0])
    print ('number of classes :', int(nb_classes)) 
  
#    zbn = np.min(uniquelbls) # zero based numbering
    y_train = np_utils.to_categorical(y_train, nb_classes)
    y_val = np_utils.to_categorical(y_val, nb_classes)

    return (X_train, y_train), (X_val, y_val)
def test_pick():
    group = Cyclic(2)
    complex = MultiComplex.generate(group, 6)

    from pycomplex.math import linalg
    N = 1024
    points = np.moveaxis(np.indices((N, N)).astype(np.float), 0, -1) / (N - 1) * 2 - 1
    z = np.sqrt(np.clip(1 - linalg.dot(points, points), 0, 1))
    points = np.concatenate([points, z[..., None]], axis=-1)

    element_idx, sub_idx, quotient_idx, triangle_idx, bary = complex[-1].pick(points.reshape(-1, 3))

    print(bary.min(), bary.max())

    if True:
        col = bary
    else:
        col = np.array([
            sub_idx.astype(np.float) / sub_idx.max(),
            sub_idx * 0,
            quotient_idx.astype(np.float) / quotient_idx.max()
        ]).T

    plt.figure()
    img = np.flip(np.moveaxis(col.reshape(N, N, 3), 0, 1), axis=0)
    plt.imshow(img)

    plt.show()
Ejemplo n.º 12
0
def apply_transform(matrix, image, params):
    """
    Apply a transformation to an image.

    The origin of transformation is at the top left corner of the image.

    The matrix is interpreted such that a point (x, y) on the original image is moved to transform * (x, y) in the generated image.
    Mathematically speaking, that means that the matrix is a transformation from the transformed image space to the original image space.

    Parameters:
      matrix: A homogenous 3 by 3 matrix holding representing the transformation to apply.
      image:  The image to transform.
      params: The transform parameters (see TransformParameters)
    """
    if params.channel_axis != 2:
        image = np.moveaxis(image, params.channel_axis, 2)

    output = cv2.warpAffine(
        image,
        matrix[:2, :],
        dsize       = (image.shape[1], image.shape[0]),
        flags       = params.cvInterpolation(),
        borderMode  = params.cvBorderMode(),
        borderValue = params.cval,
    )

    if params.channel_axis != 2:
        output = np.moveaxis(output, 2, params.channel_axis)
    return output
Ejemplo n.º 13
0
def scattering_matrix(vp1, vs1, rho1, vp2, vs2, rho2, theta1=0):
    """
    Full Zoeppritz solution, considered the definitive solution.
    Calculates the angle dependent p-wave reflectivity of an interface
    between two mediums.

    Originally written by: Wes Hamlyn, vectorized by Agile.

    Returns the complex reflectivity.

    Args:
        vp1 (float): The upper P-wave velocity.
        vs1 (float): The upper S-wave velocity.
        rho1 (float): The upper layer's density.
        vp2 (float): The lower P-wave velocity.
        vs2 (float): The lower S-wave velocity.
        rho2 (float): The lower layer's density.
        theta1 (ndarray): The incidence angle; float or 1D array length n.

    Returns:
        ndarray. The exact Zoeppritz solution for all modes at the interface.
            A 4x4 array representing the scattering matrix at the incident
            angle theta1.
    """
    theta1 = np.radians(theta1).astype(complex) * np.ones_like(vp1)
    p = np.sin(theta1) / vp1  # Ray parameter.
    theta2 = np.arcsin(p * vp2)  # Trans. angle of P-wave.
    phi1 = np.arcsin(p * vs1)    # Refl. angle of converted S-wave.
    phi2 = np.arcsin(p * vs2)    # Trans. angle of converted S-wave.

    # Matrix form of Zoeppritz equations... M & N are matrices.
    M = np.array([[-np.sin(theta1), -np.cos(phi1), np.sin(theta2), np.cos(phi2)],
                  [np.cos(theta1), -np.sin(phi1), np.cos(theta2), -np.sin(phi2)],
                  [2 * rho1 * vs1 * np.sin(phi1) * np.cos(theta1),
                   rho1 * vs1 * (1 - 2 * np.sin(phi1) ** 2),
                   2 * rho2 * vs2 * np.sin(phi2) * np.cos(theta2),
                   rho2 * vs2 * (1 - 2 * np.sin(phi2) ** 2)],
                  [-rho1 * vp1 * (1 - 2 * np.sin(phi1) ** 2),
                   rho1 * vs1 * np.sin(2 * phi1),
                   rho2 * vp2 * (1 - 2 * np.sin(phi2) ** 2),
                   -rho2 * vs2 * np.sin(2 * phi2)]])

    N = np.array([[np.sin(theta1), np.cos(phi1), -np.sin(theta2), -np.cos(phi2)],
                  [np.cos(theta1), -np.sin(phi1), np.cos(theta2), -np.sin(phi2)],
                  [2 * rho1 * vs1 * np.sin(phi1) * np.cos(theta1),
                   rho1 * vs1 * (1 - 2 * np.sin(phi1) ** 2),
                   2 * rho2 * vs2 * np.sin(phi2) * np.cos(theta2),
                   rho2 * vs2 * (1 - 2 * np.sin(phi2) ** 2)],
                  [rho1 * vp1 * (1 - 2 * np.sin(phi1) ** 2),
                   -rho1 * vs1 * np.sin(2 * phi1),
                   - rho2 * vp2 * (1 - 2 * np.sin(phi2) ** 2),
                   rho2 * vs2 * np.sin(2 * phi2)]])

    M_ = np.moveaxis(np.squeeze(M), [0, 1], [-2, -1])
    A = np.linalg.inv(M_)
    N_ = np.moveaxis(np.squeeze(N), [0, 1], [-2, -1])
    Z_ = np.matmul(A, N_)

    return np.transpose(Z_, axes=list(range(Z_.ndim - 2)) + [-1, -2])
Ejemplo n.º 14
0
def test_convinc_2d(
        channels_last, stride0, stride1, kernel0, kernel1, padding, rng):
    correlate2d = pytest.importorskip("scipy.signal").correlate2d

    shape0 = 16
    shape1 = 17
    in_channels = 32
    out_channels = 64
    x_shape = (shape0, shape1, in_channels) if channels_last else (
        in_channels, shape0, shape1)
    x = Signal(rng.randn(*x_shape))
    w = Signal(rng.randn(kernel0, kernel1, in_channels, out_channels))

    conv = Convolution(out_channels,
                       x_shape,
                       kernel_size=(kernel0, kernel1),
                       strides=(stride0, stride1),
                       padding=padding,
                       channels_last=channels_last)

    y = Signal(np.zeros(conv.output_shape.shape))

    signals = {sig: np.array(sig.initial_value) for sig in (x, w, y)}
    step = ConvInc(w, x, y, conv).make_step(signals, None, None)

    step()

    x0 = x.initial_value

    if not channels_last:
        x0 = np.moveaxis(x0, 0, -1)

    if padding == "same":
        strides = np.asarray([stride0, stride1])
        padding = np.ceil(np.asarray([shape0, shape1]) / strides)
        padding = np.maximum(
            (padding - 1) * strides + (kernel0, kernel1) - (shape0, shape1),
            0).astype(np.int64)
        x0 = np.pad(x0, [
            (padding[0] // 2, padding[0] - padding[0] // 2),
            (padding[1] // 2, padding[1] - padding[1] // 2),
            (0, 0),
        ], "constant")

    y0 = np.stack([
        np.sum([
            correlate2d(x0[..., j], w.initial_value[..., j, i], mode="valid")
            for j in range(in_channels)
        ], axis=0) for i in range(out_channels)
    ], axis=-1)
    y0 = y0[::stride0, ::stride1, :]
    if not channels_last:
        y0 = np.moveaxis(y0, -1, 0)

    assert np.allclose(signals[y], y0)
Ejemplo n.º 15
0
    def __init__(self):
        train = scipy.io.loadmat('/home/roliveira/.keras/datasets/svhn/train_32x32.mat')
        test = scipy.io.loadmat('/home/roliveira/.keras/datasets/svhn/test_32x32.mat')

        self.X_train = np.moveaxis(train['X'], [0, 1 , 2, 3], [2, 3, 1, 0])
        self.y_train = train['y'].reshape(-1)
        self.y_train[self.y_train == 10] = 0

        self.X_test = np.moveaxis(test['X'], [0, 1 , 2, 3], [2, 3, 1, 0])
        self.y_test = test['y']
        self.y_test[self.y_test == 10] = 0
Ejemplo n.º 16
0
  def run_same(self, batch, input_support, channels, filters, kernel_support,
               corr, strides_down, strides_up, padding, extra_pad_end,
               channel_separable, data_format, activation, use_bias):
    assert channels == filters == 1

    # Create input array.
    input_shape = (batch, 1) + input_support
    inputs = np.arange(np.prod(input_shape))
    inputs = inputs.reshape(input_shape).astype(np.float32)
    if data_format != "channels_first":
      tf_inputs = tf.constant(np.moveaxis(inputs, 1, -1))
    else:
      tf_inputs = tf.constant(inputs)

    # Create kernel array. This is an identity kernel, so the outputs should
    # be equal to the inputs except for up- and downsampling.
    tf_kernel = parameterizers.StaticParameterizer(
        initializers.IdentityInitializer())

    # Run SignalConv* layer.
    layer_class = {
        3: signal_conv.SignalConv1D,
        4: signal_conv.SignalConv2D,
        5: signal_conv.SignalConv3D,
    }[inputs.ndim]
    layer = layer_class(
        1, kernel_support, corr=corr, strides_down=strides_down,
        strides_up=strides_up, padding=padding, extra_pad_end=extra_pad_end,
        channel_separable=channel_separable, data_format=data_format,
        activation=activation, use_bias=use_bias,
        kernel_parameterizer=tf_kernel)
    tf_outputs = layer(tf_inputs)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      outputs = sess.run(tf_outputs)

    # Check that SignalConv* computes the correct output size.
    predicted_shape = layer.compute_output_shape(tf_inputs.shape)
    self.assertEqual(outputs.shape, tuple(predicted_shape.as_list()))

    # If not using channels_first, convert back to it to compare to input.
    if data_format != "channels_first":
      outputs = np.moveaxis(outputs, -1, 1)

    # Upsample and then downsample inputs.
    expected = inputs
    if not all(s == 1 for s in strides_up):
      expected = self.numpy_upsample(expected, strides_up, extra_pad_end)
    slices = (slice(None), slice(None))
    slices += tuple(slice(None, None, s) for s in strides_down)
    expected = expected[slices]

    self.assertAllClose(expected, outputs, rtol=0, atol=1e-3)
Ejemplo n.º 17
0
    def gen(self, snr, x_axis=0, ivar_precision=.05, structure_shape=(1, )):
        '''
        generate data from full PC basis, and noisify according to snr
        '''

        if x_axis < 0:
            raise ValueError('x axis index must be positive')

        # since in this case we're using all PCs to construct fake data
        q = self.n
        self.x_axis = x_axis

        # if SNR is a single number, just return a single spectrum
        if not hasattr(snr, '__len__'):
            snr = snr * np.ones_like(self.x)
            fulldata_shape = (self.n, )
            coeffs_shape = (q, )
        # if SNR is given as a map (i.e., has an incompatible shape to self.x),
        # then add a dimension where specified in x_axis to make shapes compatible
        elif self.n not in snr.shape:
            # define higher-dimensional data structure shape
            # that delimits separate measurements
            structure_shape = snr.shape
            snr = np.expand_dims(snr, x_axis)
            snr = np.repeat(snr, self.n, axis=x_axis)
            fulldata_shape = snr.shape
            coeffs_shape = tuple_insert(structure_shape, x_axis, q)
        else:
            structure_shape = tuple_delete(snr.shape, x_axis)
            fulldata_shape = snr.shape
            coeffs_shape = tuple_insert(structure_shape, x_axis, q)

        self.snr = snr

        self.A0 = np.random.randn(*coeffs_shape)
        # generate centered data, and then add mean
        self.obs0_ctrd = np.moveaxis(
            (np.moveaxis(self.A0, x_axis, -1) @ self.E_full.T), -1, x_axis)
        self.obs0 = np.moveaxis(
            np.moveaxis(self.obs0_ctrd, x_axis, -1) + self.M, -1, x_axis)
        obs_noise = self.obs0 * np.random.randn(*fulldata_shape) / snr
        spectrophotometric_noise = np.moveaxis(
            np.random.multivariate_normal(
                np.zeros(self.n), self.K_inst.covariance_,
                         structure_shape),
                -1, x_axis)

        self.obs = self.obs0 + obs_noise + spectrophotometric_noise
        self.ivar0 = (snr / self.obs)**2.
        self.ivar = (self.ivar0 * (1. + ivar_precision * \
                     np.random.randn(*self.ivar0.shape))).clip(min=0.)
Ejemplo n.º 18
0
    def __call__(self, l, flam, ivar=None, axis=0, *args, **kwargs):
        if len(l.shape) > 1:
            raise ValueError('wavelength array (l) must be 1D')

        flam[flam == 0.] = eps
        if ivar is None:
            ivar = np.ones_like(flam)
        ivar[ivar == 0.] = eps

        # rearrange axes to make broadcasting work
        ivar = np.moveaxis(ivar, axis, -1)
        flam = np.moveaxis(flam, axis, -1)

        return getattr(self, self._func)(l, flam, ivar, axis, *args, **kwargs)
Ejemplo n.º 19
0
  def run_valid(self, batch, input_support, channels, filters, kernel_support,
                corr, strides_down, strides_up, padding, extra_pad_end,
                channel_separable, data_format, activation, use_bias):
    assert padding == "valid"

    # Create input array.
    inputs = np.random.randint(32, size=(batch, channels) + input_support)
    inputs = inputs.astype(np.float32)
    if data_format != "channels_first":
      tf_inputs = tf.constant(np.moveaxis(inputs, 1, -1))
    else:
      tf_inputs = tf.constant(inputs)

    # Create kernel array.
    kernel = np.random.randint(16, size=kernel_support + (channels, filters))
    kernel = kernel.astype(np.float32)
    tf_kernel = parameterizers.StaticParameterizer(
        tf.constant_initializer(kernel))

    # Run SignalConv* layer.
    layer_class = {
        3: signal_conv.SignalConv1D,
        4: signal_conv.SignalConv2D,
        5: signal_conv.SignalConv3D,
    }[inputs.ndim]
    layer = layer_class(
        filters, kernel_support, corr=corr, strides_down=strides_down,
        strides_up=strides_up, padding="valid", extra_pad_end=extra_pad_end,
        channel_separable=channel_separable, data_format=data_format,
        activation=activation, use_bias=use_bias,
        kernel_parameterizer=tf_kernel)
    tf_outputs = layer(tf_inputs)
    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      outputs = sess.run(tf_outputs)

    # Check that SignalConv* computes the correct output size.
    predicted_shape = layer.compute_output_shape(tf_inputs.shape)
    self.assertEqual(outputs.shape, tuple(predicted_shape.as_list()))

    # If not using channels_first, convert back to it to compare to SciPy.
    if data_format != "channels_first":
      outputs = np.moveaxis(outputs, -1, 1)

    # Compute the equivalent result using SciPy and compare.
    expected = self.scipy_convolve_valid(
        corr, inputs, kernel, strides_down, strides_up, extra_pad_end,
        channel_separable)
    self.assertAllClose(expected, outputs, rtol=0, atol=1e-3)
Ejemplo n.º 20
0
    def run_resample(self, src_data, interp_method='nearest', fill_value=np.nan, nprocs=1, print_msg=True):
        """Run interpolation operation for input 2D/3D data
        Parameters: src_data      : 2D/3D np.array, source data to be geocoded
                    interp_method : string, nearest | linear
                    fill_value    : NaN or number
                    nprocs        : int, number of processes to be used
                    print_msg     : bool
        Returns:    geo_data      : 2D/3D np.array
        """
        # use pyresample
        if self.processor == 'pyresample':
            if len(src_data.shape) == 3:
                src_data = np.moveaxis(src_data, 0, -1)

            if src_data.dtype == np.bool_:
                fill_value = False
                print('restrict fill value to False for bool type source data')

            # resample source data into target data
            geo_data = self.run_pyresample(src_data=src_data,
                                           interp_method=interp_method,
                                           fill_value=fill_value,
                                           nprocs=nprocs,
                                           radius=None,
                                           print_msg=True)

            if len(geo_data.shape) == 3:
                geo_data = np.moveaxis(geo_data, -1, 0)

        # use scipy.interpolater.RegularGridInterpolator
        else:
            if print_msg:
                print('resampling using scipy.interpolate.RegularGridInterpolator ...')
            if len(src_data.shape) == 3:
                geo_data = np.empty((src_data.shape[0], self.length, self.width), src_data.dtype)
                prog_bar = ptime.progressBar(maxValue=src_data.shape[0])
                for i in range(src_data.shape[0]):
                    geo_data[i, :, :] = self.run_regular_grid_interpolator(src_data=src_data[i, :, :],
                                                                           interp_method=interp_method,
                                                                           fill_value=fill_value,
                                                                           print_msg=True)
                    prog_bar.update(i+1)
                prog_bar.close()
            else:
                geo_data = self.run_regular_grid_interpolator(src_data=src_data,
                                                              interp_method=interp_method,
                                                              fill_value=fill_value,
                                                              print_msg=True)
        return geo_data
Ejemplo n.º 21
0
def each_channel(image_filter, image, *args, **kwargs):
    """Return color image by applying `image_filter` on channels of `image`.

    Note that this function is intended for use with `adapt_rgb`.

    Parameters
    ----------
    image_filter : function
        Function that filters a gray-scale image.
    image : array
        Input image.
    """
    c_new = [image_filter(c, *args, **kwargs)
             for c in np.moveaxis(image, -1, 0)]
    return np.moveaxis(np.array(c_new), 0, -1)
Ejemplo n.º 22
0
 def __getitem__(self, idx):
     a = np.load(os.path.join(self.tile_dir, '{}anchor.npy'.format(idx)))
     n = np.load(os.path.join(self.tile_dir, '{}neighbor.npy'.format(idx)))
     if self.pairs_only:
         name = np.random.choice(['anchor', 'neighbor', 'distant'])
         d_idx = np.random.randint(0, self.n_triplets)
         d = np.load(os.path.join(self.tile_dir, '{}{}.npy'.format(d_idx, name)))
     else:
         d = np.load(os.path.join(self.tile_dir, '{}distant.npy'.format(idx)))
     a = np.moveaxis(a, -1, 0)
     n = np.moveaxis(n, -1, 0)
     d = np.moveaxis(d, -1, 0)
     sample = {'anchor': a, 'neighbor': n, 'distant': d}
     if self.transform:
         sample = self.transform(sample)
     return sample
Ejemplo n.º 23
0
    def _calc_sensib(símismo):

        # El diccionario para los resultados
        d_sens = {}

        matr_t = símismo.res.matr_t
        for índs in matr_t.iter_índs(excluir=['días', 'estoc', 'parám']):
            ejes_orig = np.argsort([matr_t.í_eje('días'), matr_t.í_eje('estoc'), matr_t.í_eje('parám')])

            vals_res = matr_t.obt_valor_t(índs=índs)
            vals_res = np.moveaxis(vals_res, ejes_orig, [0, 1, 2])
            vals_res = vals_res.reshape(vals_res.shape[:3]).mean(axis=(1, 2))

            l_llaves = list(str(ll) for ll in índs.values())
            for ll in l_llaves[:-1]:
                if ll not in d_sens:
                    d_sens[ll] = {}
                    d_sens = d_sens[ll]

            d_sensib_índ = {}
            for í, vals_t in enumerate(vals_res):
                sensib = símismo.func(vals_t)
                for egr, m_egr in sensib.items():
                    # Para cada tipo de egreso del análisis de sensibilidad...

                    # Crear la matriz de resultados vacía, si necesario
                    if egr not in d_sensib_índ:
                        d_sensib_índ[egr] = np.zeros((*m_egr.shape, len(vals_res)))

                    # Llenar los datos para este día
                    d_sensib_índ[egr][..., í] = sensib[egr]

            d_sens[l_llaves[-1]] = d_sensib_índ

        return d_sens
Ejemplo n.º 24
0
def collapse_psd(psd, kind, index=None, average=False):
    """Collapse a PSD"""
    if index is None:
        if 'pseudophase' == kind:
            psd_selected = (ap[...,None] * psd).sum(axis=tuple(range(psd.ndim - 1))) / ap.sum()
        else:
            psd_selected = psd.mean(axis=tuple(range(psd.ndim - 1)))
    elif isinstance(index, slice):
        psd_selected = np.moveaxis(psd[index], -1, 0).squeeze()
    else:
        index = tuple(index) + (slice(None, None),)
        psd_selected = np.moveaxis(psd[index], -1, 0).squeeze()
    
    if average and psd_selected.ndim > 1:
        psd_selected = psd_selected.mean(axis=tuple(range(1, psd_selected.ndim)))
    return psd_selected
Ejemplo n.º 25
0
 def _comp_samples(self, point=None, size=None,
                   comp_dist_shapes=None,
                   broadcast_shape=None):
     if self.comp_is_distribution:
         samples = self._comp_dists.random(point=point, size=size)
     else:
         if comp_dist_shapes is None:
             comp_dist_shapes = self._comp_dist_shapes
         if broadcast_shape is None:
             broadcast_shape = self._sample_shape
         samples = []
         for dist_shape, generator in zip(comp_dist_shapes,
                                          self._generators):
             sample = generate_samples(
                 generator=generator,
                 dist_shape=dist_shape,
                 broadcast_shape=broadcast_shape,
                 point=point,
                 size=size,
                 not_broadcast_kwargs={'raw_size_': size},
             )
             samples.append(sample)
         samples = np.array(
             broadcast_distribution_samples(samples, size=size)
         )
         # In the logp we assume the last axis holds the mixture components
         # so we move the axis to the last dimension
         samples = np.moveaxis(samples, 0, -1)
     return samples.astype(self.dtype)
Ejemplo n.º 26
0
def _import_field_image(filename, hotpixelremove=False):
    '''

    '''
    from ..datahandling import Field, Axis
    if filename.lower().endswith('png'):
        data = _readpng(filename)
    else:
        data = _read_image_pil(filename)

    if hotpixelremove:
        import scipy.ndimage
        if data.ndim == 3 and data.shape[2] < 5:
            # assume multichannel image file like rgb or rgba
            for i in range(data.shape[2]):
                data[..., i] = scipy.ndimage.morphology.grey_opening(data[..., i], size=(3, 3))
            else:
                data = scipy.ndimage.morphology.grey_opening(data, size=(3, 3))

    # image data are usually in y-major order, but postpic Fields assume x-major order
    # and rows are stored from top to bottom while y axes coordinate grows from bottom to top
    data = np.moveaxis(data, 0, 1)[:, ::-1, ...]

    axes = []
    for i, (name, axlen) in enumerate(zip(['x', 'y', 'channel'], data.shape)):
        ax = Axis(name=name, unit='px' if i < 2 else '',
                  grid=np.linspace(0, axlen-1, axlen))
        axes.append(ax)

    basename = osp.basename(filename)
    return Field(data, unit='counts', name=basename, axes=axes)
Ejemplo n.º 27
0
def grad(X):
    if X.shape==(1,):
        shape=(X.dim,)
    else:
        shape=X.shape+(X.dim,)
    name='grad({0})'.format(X.name[:10])
    gX=Tensor(name=name, shape=shape, N=X.N,
              Fourier=True, fft_form=X.fft_form)
    if X.Fourier:
        FX=X
    else:
        F=DFT(N=X.N, fft_form=X.fft_form) # TODO:change to X.fourier()
        FX=F(X)

    dim=len(X.N)
    freq=Grid.get_freq(X.N, X.Y, fft_form=X.fft_form)
    strfreq='xyz'
    coef=2*np.pi*1j
    val=np.empty((X.dim,)+X.shape+X.N_fft, dtype=np.complex)

    for ii in range(X.dim):
        mul_str='{0},...{1}->...{1}'.format(strfreq[ii], strfreq[:dim])
        val[ii]=np.einsum(mul_str, coef*freq[ii], FX.val, dtype=np.complex)

    if X.shape==(1,):
        gX.val=np.squeeze(val)
    else:
        gX.val=np.moveaxis(val, 0, X.order)

    if not X.Fourier:
        iF=DFT(N=X.N, inverse=True, fft_form=gX.fft_form)
        gX=iF(gX)
    gX.name='grad({0})'.format(X.name[:10])
    return gX
Ejemplo n.º 28
0
Archivo: core.py Proyecto: adrn/gala
    def hessian(self, q, t=0.):
        """
        Compute the Hessian of the potential at the given position(s).

        Parameters
        ----------
        q : `~gala.dynamics.PhaseSpacePosition`, `~astropy.units.Quantity`, array_like
            The position to compute the value of the potential. If the
            input position object has no units (i.e. is an `~numpy.ndarray`),
            it is assumed to be in the same unit system as the potential.

        Returns
        -------
        hess : `~astropy.units.Quantity`
            The Hessian matrix of second derivatives of the potential. If the input
            position has shape ``q.shape``, the output energy will have shape
            ``(q.shape[0],q.shape[0]) + q.shape[1:]``. That is, an ``n_dim`` by
            ``n_dim`` array (matrix) for each position.
        """
        if (self.R is not None and
                not np.allclose(np.diag(self.R), 1., atol=1e-15, rtol=0)):
            raise NotImplementedError("Computing Hessian matrices for rotated "
                                      "potentials is currently not supported.")
        q = self._remove_units_prepare_shape(q)
        orig_shape,q = self._get_c_valid_arr(q)
        t = self._validate_prepare_time(t, q)
        ret_unit = 1 / self.units['time']**2
        hess = np.moveaxis(self._hessian(q, t=t), 0, -1)
        return hess.reshape((orig_shape[0], orig_shape[0]) + orig_shape[1:]) * ret_unit
Ejemplo n.º 29
0
def test_outer_indexer_consistency_with_broadcast_indexes_vectorized():
    def nonzero(x):
        if isinstance(x, np.ndarray) and x.dtype.kind == 'b':
            x = x.nonzero()[0]
        return x

    original = np.random.rand(10, 20, 30)
    v = Variable(['i', 'j', 'k'], original)
    I = ReturnItem()  # noqa: E741  # allow ambiguous name
    # test orthogonally applied indexers
    indexers = [I[:], 0, -2, I[:3], np.array([0, 1, 2, 3]), np.array([0]),
                np.arange(10) < 5]
    for i, j, k in itertools.product(indexers, repeat=3):

        if isinstance(j, np.ndarray) and j.dtype.kind == 'b':  # match size
            j = np.arange(20) < 4
        if isinstance(k, np.ndarray) and k.dtype.kind == 'b':
            k = np.arange(30) < 8

        _, expected, new_order = v._broadcast_indexes_vectorized((i, j, k))
        expected_data = nputils.NumpyVIndexAdapter(v.data)[expected.tuple]
        if new_order:
            old_order = range(len(new_order))
            expected_data = np.moveaxis(expected_data, old_order,
                                        new_order)

        outer_index = indexing.OuterIndexer((nonzero(i), nonzero(j),
                                             nonzero(k)))
        actual = indexing._outer_to_numpy_indexer(outer_index, v.shape)
        actual_data = v.data[actual]
        np.testing.assert_array_equal(actual_data, expected_data)
Ejemplo n.º 30
0
    def load_dataset(self):
        dataset  = self.cfg.dataset
        dataset_phase = self.cfg.dataset_phase
        dataset_ann = self.cfg.dataset_ann

        # initialize COCO api
        annFile = '%s/annotations/%s_%s.json'%(dataset,dataset_ann,dataset_phase)
        self.coco = COCO(annFile)

        imgIds = self.coco.getImgIds()

        data = []

        # loop through each image
        for imgId in imgIds:
            item = DataItem()

            img = self.coco.loadImgs(imgId)[0]
            item.im_path = "%s/images/%s/%s"%(dataset, dataset_phase, img["file_name"])
            item.im_size = [3, img["height"], img["width"]]
            item.coco_id = imgId
            annIds = self.coco.getAnnIds(imgIds=img['id'], iscrowd=False)
            anns = self.coco.loadAnns(annIds)

            all_person_keypoints = []
            masked_persons_RLE = []
            visible_persons_RLE = []
            all_visibilities = []

            # Consider only images with people
            has_people = len(anns) > 0
            if not has_people and self.cfg.coco_only_images_with_people:
                continue

            for ann in anns: # loop through each person
                person_keypoints = []
                visibilities = []
                if ann["num_keypoints"] != 0:
                    for i in range(self.cfg.num_joints):
                        x_coord = ann["keypoints"][3 * i]
                        y_coord = ann["keypoints"][3 * i + 1]
                        visibility = ann["keypoints"][3 * i + 2]
                        visibilities.append(visibility)
                        if visibility != 0: # i.e. if labeled
                            person_keypoints.append([i, x_coord, y_coord])
                    all_person_keypoints.append(np.array(person_keypoints))
                    visible_persons_RLE.append(maskUtils.decode(self.coco.annToRLE(ann)))
                    all_visibilities.append(visibilities)
                if ann["num_keypoints"] == 0:
                    masked_persons_RLE.append(self.coco.annToRLE(ann))

            item.joints = all_person_keypoints
            item.im_neg_mask = maskUtils.merge(masked_persons_RLE)
            if self.cfg.use_gt_segm:
                item.gt_segm = np.moveaxis(np.array(visible_persons_RLE), 0, -1)
                item.visibilities = all_visibilities
            data.append(item)

        self.has_gt = self.cfg.dataset is not "image_info"
        return data
Ejemplo n.º 31
0
def test(cfg, net):
    _, data = get_data(cfg)

    # Initialize the network the right way
    # net.train and net.eval account for differences in dropout/batch norm
    # during training and testing
    net.eval().cuda()
    metrics = {'acc_all': [], 'acc_nonzero': [], 'loss': []}
    metrics_mean = {'acc_all': [], 'acc_nonzero': [], 'loss': []}
    metrics_std = {'acc_all': [], 'acc_nonzero': [], 'loss': []}
    durations_mean = {'cuda': [], 'loss': [], 'forward': [], 'acc': []}
    durations_std = {'cuda': [], 'loss': [], 'forward': [], 'acc': []}
    # Only enable gradients if we are training
    # with torch.set_grad_enabled(is_training):
    durations, durations_cuda, durations_loss, durations_acc = [], [], [], []
    steps = []
    print('Listing weights...')
    weights = glob.glob(os.path.join(cfg.WEIGHTS_FILE_BASE, "*.ckpt"))
    weights.sort()
    print('Done.')

    blobs = []
    print('Fetch data...')
    for i in range(cfg.MAX_STEPS):
        print("%d/%d" % (i, cfg.MAX_STEPS))
        blob = data.forward()
        blob.pop('data')
        blob['label_voxels'], blob['label_values'] = extract_voxels(
            blob['labels'])
        blob.pop('labels')
        blobs.append(blob)
    print('Done.')

    for w in weights:
        step = int(re.findall(r'model-(\d+)', w)[0])
        steps.append(step)
        print('Restoring weights from %s...' % w)
        with open(w, 'rb') as f:
            checkpoint = torch.load(f)
            net.load_state_dict(checkpoint['state_dict'])
        print('Done.')
        for i, blob in enumerate(blobs):  # FIXME
            print("Step %d/%d" % (i, cfg.MAX_STEPS))
            if sparse:
                start = time.time()
                coords = torch.from_numpy(blob['voxels']).cuda()
                features = torch.from_numpy(
                    np.reshape(blob['voxels_value'], (-1, 1))).cuda()
                label_voxels, labels = blob['label_voxels'], blob[
                    'label_values']
                labels = torch.from_numpy(labels).cuda().type(
                    torch.cuda.LongTensor)
                end = time.time()
                durations_cuda.append(end - start)

                start = time.time()
                predictions_raw = net(coords,
                                      features)  # size N_voxels x num_classes
                end = time.time()
                durations.append(end - start)

            else:
                start = time.time()
                image = torch.from_numpy(np.moveaxis(blob['data'], -1,
                                                     1)).cuda()
                labels = torch.from_numpy(blob['labels']).cuda().type(
                    torch.cuda.LongTensor)
                end = time.time()
                durations_cuda.append(end - start)

                start = time.time()
                predictions_raw = net(image)
                end = time.time()
                durations.append(end - start)

            start = time.time()
            loss = criterion(predictions_raw, labels)
            end = time.time()
            durations_loss.append(end - start)
            metrics['loss'].append(loss.item())
            print("\tLoss = ", metrics['loss'][-1])

            # Accuracy
            start = time.time()
            predicted_labels = torch.argmax(predictions_raw, dim=1)
            acc_all = (predicted_labels == labels).sum().item() / float(
                labels.numel())
            nonzero_px = labels > 0
            nonzero_prediction = predicted_labels[nonzero_px]
            nonzero_label = labels[nonzero_px]
            acc_nonzero = (nonzero_prediction
                           == nonzero_label).sum().item() / float(
                               nonzero_label.numel())
            end = time.time()
            durations_acc.append(end - start)
            metrics['acc_all'].append(acc_all)
            metrics['acc_nonzero'].append(acc_nonzero)
            print("\tAccuracy = ", metrics['acc_all'][-1],
                  " - Nonzero accuracy = ", metrics['acc_nonzero'][-1])

        metrics_mean['loss'].append(np.array(metrics['loss']).mean())
        metrics_std['loss'].append(np.array(metrics['loss']).std())
        metrics_mean['acc_all'].append(np.array(metrics['acc_all']).mean())
        metrics_std['acc_all'].append(np.array(metrics['acc_all']).std())
        metrics_mean['acc_nonzero'].append(
            np.array(metrics['acc_nonzero']).mean())
        metrics_std['acc_nonzero'].append(
            np.array(metrics['acc_nonzero']).std())
        durations_mean['cuda'].append(np.array(durations_cuda).mean())
        durations_std['cuda'].append(np.array(durations_cuda).std())
        durations_mean['loss'].append(np.array(durations_loss).mean())
        durations_std['loss'].append(np.array(durations_loss).std())
        durations_mean['forward'].append(np.array(durations).mean())
        durations_std['forward'].append(np.array(durations).std())
        durations_mean['acc'].append(np.array(durations_acc).mean())
        durations_std['acc'].append(np.array(durations_acc).std())
        durations, durations_cuda, durations_loss, durations_acc = [], [], [], []
        metrics = {'acc_all': [], 'acc_nonzero': [], 'loss': []}

        print('Mean cuda duration = %f s' % durations_mean['cuda'][-1])
        print('Mean loss duration = %f s' % durations_mean['loss'][-1])
        print('Mean acc duration = %f s' % durations_mean['acc'][-1])
        print('Mean forward duration = %f s' % durations_mean['forward'][-1])

        print('Mean acc = %f s' % metrics_mean['acc_nonzero'][-1])

        np.savetxt(os.path.join(cfg.OUTPUT_DIR, 'steps_%d.csv' % step),
                   steps,
                   delimiter=',')
        for attr in metrics:
            np.savetxt(os.path.join(cfg.OUTPUT_DIR,
                                    '%s_mean_%d.csv' % (attr, step)),
                       metrics_mean[attr],
                       delimiter=',')
            np.savetxt(os.path.join(cfg.OUTPUT_DIR,
                                    '%s_std_%d.csv' % (attr, step)),
                       metrics_std[attr],
                       delimiter=',')
        for attr in durations_mean:
            np.savetxt(os.path.join(cfg.OUTPUT_DIR,
                                    'durations_%s_mean_%d.csv' % (attr, step)),
                       durations_mean[attr],
                       delimiter=',')
            np.savetxt(os.path.join(cfg.OUTPUT_DIR,
                                    'durations_%s_std_%d.csv' % (attr, step)),
                       durations_std[attr],
                       delimiter=',')
Ejemplo n.º 32
0
def img_to_tensor(im, normalize=None):
    tensor = torch.from_numpy(np.moveaxis(im / (255.0 if im.dtype == np.uint8 else 1), -1, 0).astype(np.float32))
    if normalize is not None:
        return F.normalize(tensor, **normalize)
    return tensor
Ejemplo n.º 33
0
def ATL15_write2nc(args):

    def make_dataset(field,fieldout,data,field_attrs,file_obj,group_obj,nctype,dimScale=False):
        # where field is the name from ATL15_output_attrs.csv file
        # where fieldout is the name of the output variable in the .nc file
        dimensions = field_attrs[field]['dimensions'].split(',')
        dimensions = tuple(x.strip() for x in dimensions)
        if field_attrs[field]['datatype'].startswith('int'):
            fill_value = np.iinfo(np.dtype(field_attrs[field]['datatype'])).max
        elif field_attrs[field]['datatype'].startswith('float'):
            fill_value = np.finfo(np.dtype(field_attrs[field]['datatype'])).max
        data = np.nan_to_num(data,nan=fill_value)

        if dimScale:
            group_obj.createDimension(field_attrs[field]['dimensions'],data.shape[0])

        dsetvar = group_obj.createVariable(fieldout,
                                           nctype[field_attrs[field]['datatype']],
                                           dimensions,
                                           fill_value=fill_value, zlib=True,
                                           least_significant_digit=ast.literal_eval(field_attrs[field]['least_significant_digit']))
            
        dsetvar[:] = data
        for attr in attr_names:
            if attr != 'group description':
                dsetvar.setncattr(attr,field_attrs[field][attr])
        # add attributes for projection
        if not field.startswith('time'):
            dsetvar.setncattr('grid_mapping','Polar_Stereographic')
        if field == 'x':
            dsetvar.standard_name = 'projection_x_coordinate'
        if field == 'y':
            dsetvar.standard_name = 'projection_y_coordinate'

        return file_obj
    
    dz_dict ={'time':'t',     # for non-lagged vars. {ATL15 outgoing var name: hdf5 incoming var name}          
              'time_lag1':'t',
              'time_lag4':'t',
              'time_lag8':'t',
              'x':'x',
              'y':'y',
              'cell_area':'cell_area',
              'ice_mask':'mask',
              'data_count':'count',
              'misfit_rms':'misfit_rms',
              'misfit_scaled_rms':'misfit_scaled_rms',
              'delta_h':'dz',
              'delta_h_sigma':'sigma_dz',
              'delta_h_10km':'avg_dz_10000m',
              'delta_h_sigma_10km':'sigma_avg_dz_10000m',
              'delta_h_20km':'avg_dz_20000m',
              'delta_h_sigma_20km':'sigma_avg_dz_20000m',
              'delta_h_40km':'avg_dz_40000m',
              'delta_h_sigma_40km':'sigma_avg_dz_40000m',
              }
    nctype = {'float64':'f8',
              'float32':'f4',
              'int8':'i1'}

    lags = {
            'file' : ['FH','FH_lag1','FH_lag4','FH_lag8'],
            'vari' : ['','_lag1','_lag4','_lag8'],
            'varigrp' : ['delta_h','dhdt_lag1','dhdt_lag4','dhdt_lag8']
           }
    avgs = ['','_10km','_20km','_40km']
    # open data attributes file
    attrFile = pkg_resources.resource_filename('surfaceChange','resources/ATL15_output_attrs.csv')
    with open(attrFile,'r',encoding='utf-8-sig') as attrfile:
        reader=list(csv.DictReader(attrfile))

    attr_names=[x for x in reader[0].keys() if x != 'field' and x != 'group']
    
    for ave in avgs:  
        # establish output file, one per average
        if ave=='':
            fileout = args.base_dir.rstrip('/') + '/ATL15_' + args.region + '_' + args.cycles + '_01km_' + args.Release + '_' + args.version + '.nc'
        else:
            fileout = args.base_dir.rstrip('/') + '/ATL15_' + args.region + '_' + args.cycles + ave + '_' + args.Release + '_' + args.version + '.nc'
#        print('output file:',fileout)
    
        with Dataset(fileout,'w',clobber=True) as nc:
            nc.setncattr('GDAL_AREA_OR_POINT','Area')
            nc.setncattr('Conventions','CF-1.6')
            
            # make tile_stats group (ATBD 4.1.2.1, Table 3)
            tilegrp = nc.createGroup('tile_stats')   
            tileFile = pkg_resources.resource_filename('surfaceChange','resources/tile_stats_output_attrs.csv')
            with open(tileFile,'r', encoding='utf-8-sig') as tilefile:
                tile_reader=list(csv.DictReader(tilefile))
        
            tile_attr_names=[x for x in tile_reader[0].keys() if x != 'field' and x != 'group']
    
            tile_field_names = [row['field'] for row in tile_reader]
    
            tile_stats={}        # dict for appending data from the tile files
            for field in tile_field_names:
                if field not in tile_stats:
                    tile_stats[field] = { 'data': [], 'mapped':np.array(())}
                        
            
            # work through the tiles in all three subdirectories
            for sub in ['centers','edges','corners']:
                files = os.listdir(os.path.join(args.base_dir,sub))
                files = [f for f in files if f.endswith('.h5')]
                for file in files:
                    try:
                        tile_stats['x']['data'].append(int(re.match(r'^.*E(.*)\_.*$',file).group(1)))
                    except Exception as e:
                        print(f"problem with [ {file} ], skipping")
                        continue
                    tile_stats['y']['data'].append(int(re.match(r'^.*N(.*)\..*$',file).group(1)))
    
                    with h5py.File(os.path.join(args.base_dir,sub,file),'r') as h5:
                        tile_stats['N_data']['data'].append( np.sum(h5['data']['three_sigma_edit'][:]) )
                        tile_stats['RMS_data']['data'].append( h5['RMS']['data'][()] )  # use () for getting a scalar.
                        tile_stats['RMS_bias']['data'].append( np.sqrt(np.mean((h5['bias']['val'][:]/h5['bias']['expected'][:])**2)) )
                        tile_stats['N_bias']['data'].append( len(h5['bias']['val'][:]) )  #### or all BUT the zeros.
                        tile_stats['RMS_d2z0dx2']['data'].append( h5['RMS']['grad2_z0'][()] )
                        tile_stats['RMS_d2zdt2']['data'].append( h5['RMS']['d2z_dt2'][()] )
                        tile_stats['RMS_d2zdx2dt']['data'].append( h5['RMS']['grad2_dzdt'][()] )
                        tile_stats['sigma_xx0']['data'].append( h5['E_RMS']['d2z0_dx2'][()] )
                        tile_stats['sigma_tt']['data'].append( h5['E_RMS']['d2z_dt2'][()] )
                        tile_stats['sigma_xxt']['data'].append( h5['E_RMS']['d3z_dx2dt'][()] )
                        
            # establish output grids from min/max of x and y
            for key in tile_stats.keys():
                if key == 'N_data' or key == 'N_bias':  # key == 'x' or key == 'y' or 
                    tile_stats[key]['mapped'] = np.zeros( [len(np.arange(np.min(tile_stats['y']['data']),np.max(tile_stats['y']['data'])+40,40)),
                                                            len(np.arange(np.min(tile_stats['x']['data']),np.max(tile_stats['x']['data'])+40,40))], 
                                                            dtype=int)
                else:
                    tile_stats[key]['mapped'] = np.zeros( [len(np.arange(np.min(tile_stats['y']['data']),np.max(tile_stats['y']['data'])+40,40)),
                                                            len(np.arange(np.min(tile_stats['x']['data']),np.max(tile_stats['x']['data'])+40,40))],
                                                            dtype=float)
            # put data into grids
            for key in tile_stats.keys():
                # fact helps convert x,y in km to m
                if key == 'x' or key == 'y':
                    continue
                for (yt, xt, dt) in zip(tile_stats['y']['data'], tile_stats['x']['data'], tile_stats[key]['data']):
                    if not np.isfinite(dt):
                        print(f"ATL14_write2nc: found bad tile_stats value in field {key} at x={xt}, y={yt}")
                        continue
                    row=int((yt-np.min(tile_stats['y']['data']))/40)
                    col=int((xt-np.min(tile_stats['x']['data']))/40)
                    tile_stats[key]['mapped'][row,col] = dt
                tile_stats[key]['mapped'] = np.ma.masked_where(tile_stats[key]['mapped'] == 0, tile_stats[key]['mapped'])   

            # make dimensions, fill them as variables
            tilegrp.createDimension('y',len(np.arange(np.min(tile_stats['y']['data']),np.max(tile_stats['y']['data'])+40,40)))
            tilegrp.createDimension('x',len(np.arange(np.min(tile_stats['x']['data']),np.max(tile_stats['x']['data'])+40,40)))
    
            # create tile_stats/ variables in .nc file
            for field in tile_field_names:
                tile_field_attrs = {row['field']: {tile_attr_names[ii]:row[tile_attr_names[ii]] for ii in range(len(tile_attr_names))} for row in tile_reader if field in row['field']}
                if field == 'x':
                    dsetvar = tilegrp.createVariable('x', tile_field_attrs[field]['datatype'], ('x',), fill_value=np.finfo(tile_field_attrs[field]['datatype']).max, zlib=True)
                    dsetvar[:] = np.arange(np.min(tile_stats['x']['data']),np.max(tile_stats['x']['data'])+40,40.) * 1000 # convert from km to meter
                elif field == 'y':
                    dsetvar = tilegrp.createVariable('y', tile_field_attrs[field]['datatype'], ('y',), fill_value=np.finfo(tile_field_attrs[field]['datatype']).max, zlib=True)
                    dsetvar[:] = np.arange(np.min(tile_stats['y']['data']),np.max(tile_stats['y']['data'])+40,40.) * 1000 # convert from km to meter
                elif field == 'N_data' or field == 'N_bias': 
                    dsetvar = tilegrp.createVariable(field, tile_field_attrs[field]['datatype'],('y','x'),fill_value=np.iinfo(tile_field_attrs[field]['datatype']).max, zlib=True)
                else:
                    dsetvar = tilegrp.createVariable(field, tile_field_attrs[field]['datatype'],('y','x'),fill_value=np.finfo(tile_field_attrs[field]['datatype']).max, zlib=True)

                if field != 'x' and field != 'y':
                    dsetvar[:] = tile_stats[field]['mapped'][:]
                
                for attr in ['units','dimensions','datatype','coordinates','description','coordinates','long_name','source']:
                    dsetvar.setncattr(attr,tile_field_attrs[field][attr])
                dsetvar.setncattr('grid_mapping','Polar_Stereographic')
    
            
            crs_var = projection_variable(args.region,tilegrp)

#            # make comfort figures
#            extent=[np.min(tile_stats['x']['data'])*fact,np.max(tile_stats['x']['data'])*fact,
#                    np.min(tile_stats['y']['data'])*fact,np.max(tile_stats['y']['data'])*fact]
#            cmap = mpl.cm.get_cmap("viridis").copy()
#            cmnan = mpl.cm.get_cmap(cmap)
#            cmnan.set_bad(color='white')
#            
#            for i,key in enumerate(tile_stats.keys()):
#                makey = np.ma.masked_where(tile_stats[key]['mapped'] == 0, tile_stats[key]['mapped'])   
#                fig,ax=plt.subplots(1,1)
#                ax = plt.subplot(1,1,1,projection=ccrs.NorthPolarStereo(central_longitude=-45))
#                ax.add_feature(cartopy.feature.LAND)
#                ax.coastlines(resolution='110m',linewidth=0.5)
#                ax.set_extent([-10,90,70,90],crs=ccrs.PlateCarree())
#                ax.gridlines(crs=ccrs.PlateCarree())
#                h=ax.imshow(makey,extent=extent,vmin=np.min(makey.ravel()),vmax=np.max(makey.ravel()),cmap=cmnan,origin='lower')
#                fig.colorbar(h,ax=ax)
#                ax.set_title(f'{key}')
#                fig.savefig(f"{os.path.join(args.base_dir,'tile_stats_' + key + '.png')}",format='png')
#                
#            plt.show(block=False)
#            plt.pause(0.001)
#            input('Press enter to end.')
#            plt.close('all')
#            exit(-1)

            # loop over dz*.h5 files for one ave
            for jj in range(len(lags['file'])):
                filein = args.base_dir.rstrip('/')+'/dz'+ave+lags['vari'][jj]+'.h5'
                if not os.path.isfile(filein):
                    print('No file:',args.base_dir.rstrip('/')+'/'+os.path.basename(filein))
                    continue
                else:
                    print('Reading file:',args.base_dir.rstrip('/')+'/'+os.path.basename(filein))
                lags['file'][jj] = h5py.File(filein,'r')  # file object
                dzg=list(lags['file'][jj].keys())[0]      # dzg is group in input file
    
                nc.createGroup(lags['varigrp'][jj])
                # print(lags['varigrp'][jj])
                
                # make projection variable for each group
                crs_var = projection_variable(args.region,nc.groups[lags['varigrp'][jj]])
                                   
                # dimension scales for each group
                for field in ['x','y']:
                    data = np.array(lags['file'][jj][dzg][dz_dict[field]])
                    if field == 'x':
                        x = data
                        xll = np.min(x)
                        dx = x[1]-x[0]
                    if field == 'y':
                        y = data
                        yll = np.max(y)
                        dy = y[0]-y[1]
                    field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field in row['field'] if row['group']=='height_change'+ave}
                    make_dataset(field,field,data,field_attrs,nc,nc.groups[lags['varigrp'][jj]],nctype,dimScale=True)
                crs_var.GeoTransform = (xll,dx,0,yll,0,dy)

                if jj==0:  # no lag
                    field = 'time'
                    data = np.array(lags['file'][jj][dzg]['t'])
                    # convert to decimal days from 1/1/2018
                    data = (data-2018.)*365.25
                    field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field in row['field'] if row['group']=='height_change'+ave}
                    make_dataset(field,field,data,field_attrs,nc,nc.groups[lags['varigrp'][jj]],nctype,dimScale=True)
                    
                    for fld in ['cell_area','delta_h','delta_h_sigma','ice_mask','data_count','misfit_rms','misfit_scaled_rms']:  # fields that can be ave'd but not lagged
                        if (len(ave) > 0) and (fld.startswith('misfit') or fld=='ice_mask' or fld=='data_count'): # not in ave'd groups
                            break
                        field = fld+ave
                        field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field == row['field'] if row['group']=='height_change'+ave}
                        if fld.startswith('delta_h'):  # fields with complicated name changes
                            data = np.array(lags['file'][jj][dzg][dz_dict[field]]) 
                            for tt in range(data.shape[-1]):
                                data[:,:,tt][np.isnan(cell_area_mask)] = np.nan
                            if fld=='delta_h':  # add group description
                                nc.groups[lags['varigrp'][jj]].setncattr('description',field_attrs[field]['group description'])
                        else:
                            data = np.array(lags['file'][jj][dzg][dz_dict[fld]])
                        if len(data.shape)==3:
                            data = np.moveaxis(data,2,0)  # t, y, x
                        if fld == 'cell_area':
                            data[data==0.0] = np.nan 
                            cell_area_mask = data # where cell_area is invalid, so are delta_h and dhdt variables.

                        make_dataset(field,fld,data,field_attrs,nc,nc.groups[lags['varigrp'][jj]],nctype,dimScale=False)

                else:  # one of the lags
                    field = 'time'+lags['vari'][jj]
                    data = np.array(lags['file'][jj][dzg]['t'])
                    # convert to decimal days from 1/1/2018
                    data = (data-2018.)*365.25
                    field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field in row['field'] if row['group']=='height_change'+ave}
                    make_dataset(field,'time',data,field_attrs,nc,nc.groups[lags['varigrp'][jj]],nctype,dimScale=True)
                    
                    field = 'dhdt'+lags['vari'][jj]+ave
                    data = np.array(lags['file'][jj][dzg][dzg])
                    for tt in range(data.shape[-1]):
                        data[:,:,tt][np.isnan(cell_area_mask)] = np.nan
                    data = np.moveaxis(data,2,0)  # t, y, x
                    field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field in row['field'] if row['group']=='height_change'+ave}
                    make_dataset(field,'dhdt',data,field_attrs,nc,nc.groups[lags['varigrp'][jj]],nctype,dimScale=False)
                    # add group description
                    nc.groups[lags['varigrp'][jj]].setncattr('description',field_attrs[field]['group description'])
                    
                    field = 'dhdt'+lags['vari'][jj]+'_sigma'+ave
                    data = np.array(lags['file'][jj][dzg]['sigma_'+dzg])
                    for tt in range(data.shape[-1]):
                        data[:,:,tt][np.isnan(cell_area_mask)] = np.nan
                    data = np.moveaxis(data,2,0)  # t, y, x
                    field_attrs = {row['field']: {attr_names[ii]:row[attr_names[ii]] for ii in range(len(attr_names))} for row in reader if field in row['field'] if row['group']=='height_change'+ave}
                    make_dataset(field,'dhdt_sigma',data,field_attrs,nc,nc.groups[lags['varigrp'][jj]],nctype,dimScale=False)
                        
            for jj in range(len(lags['file'])):
                try:
                    lags['file'][jj].close()
                except:
                    pass
        
            ncTemplate=pkg_resources.resource_filename('surfaceChange','resources/atl15_metadata_template.nc')
            write_atl14meta(nc, fileout, ncTemplate, args)

    return fileout
Ejemplo n.º 34
0
def collect_images_and_labels(directory):
   """
      This function collects the images and labels of these images located in
      a directory. The contents of the directory must be organized into folders
      where images under said folder are labeled as that folder.

      example:
        -my_directory/
            |----A/
            |    |- image1.jpg
            |    |- image2.jpg
            |
            |----B/
            |    |- image3.jpg
            |
            |----C/
                 |- image4.jpg
                 |- image5.jpg
                 |- image6.jpg

         The images get classified as follows
            class   file_name
               A    image1.jpg
               A    image2.jpg
               B    image3.jpg
               C    image4.jpg
               C    image5.jpg
               C    image6.jpg

      @returns:
         @labels     - list of strings of corresponding labels to each file in @file_names
         @file_names - list of strings corresponding to the file names of each image

         @grayscale_images  - 3D numpy array of grayscale images where each image is indexed
                              using axis 3. grayscale_images[:,:,i] indexes the i'th image.
                              dtype is uint8, values range from 0 to 255.
                              Images normalized using the mean_filter_normalize function

         @saturation_images - 3D numpy array of saturation images where each image is indexed
                              using axis 3. saturation_images[:,:,i] indexes the i'th image.
                              dtype is uint8, values range from 0 to 255.
                              Images normalized using the mean_filter_normalize function
   """
   labels = []
   file_names = []
   grayscale_images = []
   saturation_images = []

   # collect images and labels
   for label in os.listdir(directory):
      path = '{}/{}'.format(directory, label)
      image_names = os.listdir(path) # names of images in label folder

      # record labels and names
      labels.extend([label]*len(image_names))
      file_names.extend(image_names)

      for file_nm in image_names:
         name = '{}/{}'.format(path, file_nm) # name of file of image
         img = skimio.imread(name) # read image

         img = img[:-128,:,:] # crop scalebar
         img_gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)      # grayscale
         img_sat = cv.cvtColor(img, cv.COLOR_RGB2HSV)[:,:,1] # saturation

         # collect images
         grayscale_images.append(img_gray)
         saturation_images.append(img_sat)

   # convert to numpy arrays
   grayscale_images = np.moveaxis(np.array(grayscale_images), 0, -1)
   saturation_images = np.moveaxis(np.array(saturation_images), 0, -1)

   # normalize images, this removes artifacts
   grayscale_images = mean_filter_normalize(grayscale_images)
   saturation_images = mean_filter_normalize(saturation_images)

   return labels, file_names, grayscale_images, saturation_images
Ejemplo n.º 35
0
def MDD(G,
        d,
        dt=0.004,
        dr=1.,
        nfmax=None,
        wav=None,
        twosided=True,
        causality_precond=False,
        adjoint=False,
        psf=False,
        dtype='float64',
        dottest=False,
        saveGt=True,
        add_negative=True,
        smooth_precond=0,
        **kwargs_solver):
    r"""Multi-dimensional deconvolution.

    Solve multi-dimensional deconvolution problem using
    :py:func:`scipy.sparse.linalg.lsqr` iterative solver.

    Parameters
    ----------
    G : :obj:`numpy.ndarray`
        Multi-dimensional convolution kernel in time domain of size
        :math:`[n_s \times n_r \times n_t]` for ``twosided=False`` or
        ``twosided=True`` and ``add_negative=True``
        (with only positive times) or size
        :math:`[n_s \times n_r \times 2*n_t-1]` for ``twosided=True`` and
        ``add_negative=False``
        (with both positive and negative times)
    d : :obj:`numpy.ndarray`
        Data in time domain :math:`[n_s (\times n_{vs}) \times n_t]` if
        ``twosided=False`` or ``twosided=True`` and ``add_negative=True``
        (with only positive times) or size
        :math:`[n_s (\times n_{vs}) \times 2*n_t-1]` if ``twosided=True``
    dt : :obj:`float`, optional
        Sampling of time integration axis
    dr : :obj:`float`, optional
        Sampling of receiver integration axis
    nfmax : :obj:`int`, optional
        Index of max frequency to include in deconvolution process
    wav : :obj:`numpy.ndarray`, optional
        Wavelet to convolve to the inverted model and psf
        (must be centered around its index in the middle of the array).
        If ``None``, the outputs of the inversion are returned directly.
    twosided : :obj:`bool`, optional
        MDC operator and data both negative and positive time (``True``)
        or only positive (``False``)
    add_negative : :obj:`bool`, optional
        Add negative side to MDC operator and data (``True``) or not
        (``False``)- operator and data are already provided with both positive
        and negative sides. To be used only with ``twosided=True``.
    causality_precond : :obj:`bool`, optional
        Apply causality mask (``True``) or not (``False``)
    smooth_precond : :obj:`int`, optional
        Lenght of smoothing to apply to causality preconditioner
    adjoint : :obj:`bool`, optional
        Compute and return adjoint(s)
    psf : :obj:`bool`, optional
        Compute and return Point Spread Function (PSF) and its inverse
    dtype : :obj:`bool`, optional
        Type of elements in input array.
    dottest : :obj:`bool`, optional
        Apply dot-test
    saveGt : :obj:`bool`, optional
        Save ``G`` and ``G^H`` to speed up the computation of adjoint of
        :class:`pylops.signalprocessing.Fredholm1` (``True``) or create
        ``G^H`` on-the-fly (``False``) Note that ``saveGt=True`` will be
        faster but double the amount of required memory
    **kwargs_solver
        Arbitrary keyword arguments for chosen solver
        (:py:func:`scipy.sparse.linalg.cg` and
        :py:func:`pylops.optimization.solver.cg` are used as default for numpy
        and cupy `data`, respectively)

    Returns
    -------
    minv : :obj:`numpy.ndarray`
        Inverted model of size :math:`[n_r (\times n_{vs}) \times n_t]`
        for ``twosided=False`` or
        :math:`[n_r (\times n_vs) \times 2*n_t-1]` for ``twosided=True``
    madj : :obj:`numpy.ndarray`
        Adjoint model of size :math:`[n_r (\times n_{vs}) \times n_t]`
        for ``twosided=False`` or
        :math:`[n_r (\times n_r) \times 2*n_t-1]` for ``twosided=True``
    psfinv : :obj:`numpy.ndarray`
        Inverted psf of size :math:`[n_r \times n_r \times n_t]`
        for ``twosided=False`` or
        :math:`[n_r \times n_r \times 2*n_t-1]` for ``twosided=True``
    psfadj : :obj:`numpy.ndarray`
        Adjoint psf of size :math:`[n_r \times n_r \times n_t]`
        for ``twosided=False`` or
        :math:`[n_r \times n_r \times 2*n_t-1]` for ``twosided=True``

    See Also
    --------
    MDC : Multi-dimensional convolution

    Notes
    -----
    Multi-dimensional deconvolution (MDD) is a mathematical ill-solved problem,
    well-known in the image processing and geophysical community [1]_.

    MDD aims at removing the effects of a Multi-dimensional Convolution
    (MDC) kernel or the so-called blurring operator or point-spread
    function (PSF) from a given data. It can be written as

    .. math::
        \mathbf{d}= \mathbf{D} \mathbf{m}

    or, equivalently, by means of its normal equation

    .. math::
        \mathbf{m}= (\mathbf{D}^H\mathbf{D})^{-1} \mathbf{D}^H\mathbf{d}

    where :math:`\mathbf{D}^H\mathbf{D}` is the PSF.

    .. [1] Wapenaar, K., van der Neut, J., Ruigrok, E., Draganov, D., Hunziker,
       J., Slob, E., Thorbecke, J., and Snieder, R., "Seismic interferometry
       by crosscorrelation and by multi-dimensional deconvolution: a
       systematic comparison", Geophyscial Journal International, vol. 185,
       pp. 1335-1364. 2011.

    """
    ncp = get_array_module(d)

    ns, nr, nt = G.shape
    if len(d.shape) == 2:
        nv = 1
    else:
        nv = d.shape[1]
    if twosided:
        if add_negative:
            nt2 = 2 * nt - 1
        else:
            nt2 = nt
            nt = (nt2 + 1) // 2
        nfmax_allowed = int(np.ceil((nt2 + 1) / 2))
    else:
        nt2 = nt
        nfmax_allowed = nt

    # Fix nfmax to be at maximum equal to half of the size of fft samples
    if nfmax is None or nfmax > nfmax_allowed:
        nfmax = nfmax_allowed
        logging.warning('nfmax set equal to ceil[(nt+1)/2=%d]' % nfmax)

    # Add negative part to data and model
    if twosided and add_negative:
        G = np.concatenate((ncp.zeros((ns, nr, nt - 1)), G), axis=-1)
        d = np.concatenate((np.squeeze(np.zeros((ns, nv, nt - 1))), d),
                           axis=-1)
    # Bring kernel to frequency domain
    Gfft = np.fft.rfft(G, nt2, axis=-1)
    Gfft = Gfft[..., :nfmax]

    # Bring frequency/time to first dimension
    Gfft = np.moveaxis(Gfft, -1, 0)
    d = np.moveaxis(d, -1, 0)
    if psf:
        G = np.moveaxis(G, -1, 0)

    # Define MDC linear operator
    MDCop = MDC(Gfft,
                nt2,
                nv=nv,
                dt=dt,
                dr=dr,
                twosided=twosided,
                transpose=False,
                saveGt=saveGt)
    if psf:
        PSFop = MDC(Gfft,
                    nt2,
                    nv=nr,
                    dt=dt,
                    dr=dr,
                    twosided=twosided,
                    transpose=False,
                    saveGt=saveGt)
    if dottest:
        Dottest(MDCop,
                nt2 * ns * nv,
                nt2 * nr * nv,
                verb=True,
                backend=get_module_name(ncp))
        if psf:
            Dottest(PSFop,
                    nt2 * ns * nr,
                    nt2 * nr * nr,
                    verb=True,
                    backend=get_module_name(ncp))

    # Adjoint
    if adjoint:
        madj = MDCop.H * d.flatten()
        madj = np.squeeze(madj.reshape(nt2, nr, nv))
        madj = np.moveaxis(madj, 0, -1)
        if psf:
            psfadj = PSFop.H * G.flatten()
            psfadj = np.squeeze(psfadj.reshape(nt2, nr, nr))
            psfadj = np.moveaxis(psfadj, 0, -1)

    # Inverse
    if twosided and causality_precond:
        P = np.ones((nt2, nr, nv))
        P[:nt - 1] = 0
        if smooth_precond > 0:
            P = filtfilt(np.ones(smooth_precond) / smooth_precond,
                         1,
                         P,
                         axis=0)
        P = to_cupy_conditional(d, P)
        Pop = Diagonal(P)
        minv = PreconditionedInversion(MDCop,
                                       Pop,
                                       d.flatten(),
                                       returninfo=False,
                                       **kwargs_solver)
    else:
        if ncp == np:
            minv = lsqr(MDCop, d.flatten(), **kwargs_solver)[0]
        else:
            minv = cgls(MDCop, d.flatten(),
                        ncp.zeros(int(MDCop.shape[1]), dtype=MDCop.dtype),
                        **kwargs_solver)[0]
    minv = np.squeeze(minv.reshape(nt2, nr, nv))
    minv = np.moveaxis(minv, 0, -1)

    if wav is not None:
        wav1 = wav.copy()
        for _ in range(minv.ndim - 1):
            wav1 = wav1[ncp.newaxis]
        minv = get_fftconvolve(d)(minv, wav1, mode='same')

    if psf:
        if ncp == np:
            psfinv = lsqr(PSFop, G.flatten(), **kwargs_solver)[0]
        else:
            psfinv = cgls(PSFop, G.flatten(),
                          ncp.zeros(int(PSFop.shape[1]), dtype=PSFop.dtype),
                          **kwargs_solver)[0]
        psfinv = np.squeeze(psfinv.reshape(nt2, nr, nr))
        psfinv = np.moveaxis(psfinv, 0, -1)
        if wav is not None:
            wav1 = wav.copy()
            for _ in range(psfinv.ndim - 1):
                wav1 = wav1[np.newaxis]
            psfinv = get_fftconvolve(d)(psfinv, wav1, mode='same')
    if adjoint and psf:
        return minv, madj, psfinv, psfadj
    elif adjoint:
        return minv, madj
    elif psf:
        return minv, psfinv
    else:
        return minv
Ejemplo n.º 36
0
                 running_tar_loss / ite_num4val))

            #save entire model
            # torch.save(net, "saved_models/u2netp/itr_%d_train_%3f_tar_%3f.pth" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val))

            running_loss = 0.0
            running_tar_loss = 0.0
            net.train()  # resume train

            # middle_output = (d0[0][0] + d1[0][0] + d2[0][0] + d3[0][0] + d4[0][0] + d5[0][0] + d6[0][0]) / 7 * 255

            middle_output = d0[0][0] * 255

            middle_output = middle_output.cpu().detach().numpy()
            middle_input = inputs.cpu().detach().numpy()[0][:3]
            middle_input = np.moveaxis(middle_input, 0, 2) * 255
            middle_prior = inputs.cpu().detach().numpy()[0][3] * 255

            middle_label = labels.cpu().detach().numpy()[0][0] * 255

            # print('middle_input', middle_input.shape)
            # print('middle_prior', middle_prior.shape)

            # cv2.imwrite(model_dir + "saved_models/u2netp/_output_bce_itr_%d_train_%3f_tar_%3f.png" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val), middle_output)
            # cv2.imwrite(model_dir + "_input_bce_itr_%d_train_%3f_tar_%3f.png" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val), cv2.cvtColor(middle_input, cv2.COLOR_BGR2RGB))
            # cv2.imwrite(model_dir + "_prior_bce_itr_%d_train_%3f_tar_%3f.png" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val), middle_prior)
            # cv2.imwrite(model_dir + "_difference_prior_output_bce_itr_%d_train_%3f_tar_%3f.png" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val), middle_prior - middle_output)
            # cv2.imwrite(model_dir + "_difference_prior_label_bce_itr_%d_train_%3f_tar_%3f.png" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val), middle_prior - middle_label)
            # cv2.imwrite(model_dir + "_difference_output_label_bce_itr_%d_train_%3f_tar_%3f.png" % (ite_num, running_loss / ite_num4val, running_tar_loss / ite_num4val), middle_output - middle_label)

            cv2.imwrite("saved_models/u2netp/output_itr_%d.png" % (ite_num),
Ejemplo n.º 37
0
    #print(Y)
    #print('*************************************************************************************')

    # remove unphysical values
    X[X < 1e-6] = 0

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        train_size=0.05,
                                                        test_size=0.05)

    # tensorflow ordering
    X_train = np.expand_dims(X_train, axis=-1)
    X_test = np.expand_dims(X_test, axis=-1)
    print(X_train.shape)
    X_train = np.moveaxis(X_train, -1, 1)
    X_test = np.moveaxis(X_test, -1, 1)

    y_train = y_train / 100
    y_test = y_test / 100
    print(X_train.shape)
    print(X_test.shape)
    print(y_train.shape)
    print(y_test.shape)
    print(
        '*************************************************************************************'
    )

    nb_train, nb_test = X_train.shape[0], X_test.shape[0]

    X_train = X_train.astype(np.float32)
Ejemplo n.º 38
0
def Detect(img, model):
    transform = tv.transforms.Compose(
        [tv.transforms.Resize(224),
         tv.transforms.ToTensor()])
    img = transform(img)
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')
    thre = 0.5  # confidence threshold
    class_names = [
        'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat',
        'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person',
        'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor'
    ]
    model.eval()
    with torch.no_grad():
        prediction = model([img.to(device)])
    prediction = prediction[0]
    img = np.array(img)
    img = np.moveaxis(img, [0, 1, 2], [2, 0, 1])

    plt.figure()
    fig, ax = plt.subplots(1, figsize=(12, 12))
    ax.imshow(img)

    # prediction items
    boxes = prediction['boxes']
    scores = prediction['scores']
    labels = prediction['labels']

    # Bounding-box colors
    cmap = plt.get_cmap("tab20b")
    colors = [cmap(i) for i in np.linspace(0, 1, 20)]

    #
    unique_labels = labels.cpu().unique()
    n_cls_preds = len(unique_labels)
    bbox_colors = random.sample(colors, n_cls_preds)

    for row in range(len(scores[scores > thre])):
        box = boxes[row, :].reshape(1, -1)
        box = box[0]
        x1 = box[0]
        y1 = box[1]
        box_w = box[2] - box[0]
        box_h = box[3] - box[1]
        cls_pred = labels[row]
        cls_conf = scores[row]

        color = bbox_colors[int(np.where(unique_labels == int(cls_pred))[0])]
        bbox = patches.Rectangle((x1, y1),
                                 box_w,
                                 box_h,
                                 linewidth=2,
                                 edgecolor=color,
                                 facecolor="none")
        ax.add_patch(bbox)
        plt.text(x1,
                 y1,
                 fontsize=14,
                 s=class_names[int(cls_pred) - 1] + ' ' + ('%.4f' % cls_conf),
                 color="white",
                 verticalalignment="top",
                 bbox={
                     "color": color,
                     "pad": 0
                 })

    plt.show()
Ejemplo n.º 39
0
    os.makedirs(log_dir)

#
# load the data
#
fds = xr.open_dataset(feature_data)
lds = xr.open_dataset(label_data)
feature = fds.QRAIN.values
label = lds.W.values

#
# move the channels from position 1 to position 3
# goes from [time,channel,height,width] to [time, height, width, channel]
# which is the default for Conv2D.
#
feature = np.moveaxis(feature, 1, 3)
label = np.moveaxis(label, 1, 3)

#
# random shuffle
#
s = np.arange(feature.shape[0])
np.random.shuffle(s)

#
#
#
num_images = feature.shape[0]

train_data_start = 0
train_data_end = int(
Ejemplo n.º 40
0
    def verify_ell_model(self,
                         onnx_model_path,
                         verify_compiled=True,
                         arg_max_only=False):
        """
        Test each operation in the onnx graph by creating a custom pytorch layer for each node then
        run forward with the onnx node weight on both ell and pytorch node.  If verify_compiled is
        True then also test compiled ELL model.
        """

        self._logger.info("Model verification started")
        try:
            # install debug hooks into torch model so we capture output of every layer.
            info = LayerInfo(self.torch_model)

            # get the pytorch model output
            self.torch_model.eval()

            model_name = os.path.basename(onnx_model_path)
            model_name = os.path.splitext(model_name)[0]
            ell_map, ordered_importer_nodes = onnx_to_ell.convert_onnx_to_ell(
                onnx_model_path)
            ell_map.Save(model_name + ".ell")

            # Get compiled ELL result
            if verify_compiled:
                self._logger.info("Getting compiled ELL results")
                compiler_options = ell.model.MapCompilerOptions()
                compiler_options.useBlas = True
                compiled_ell_map = ell_map.Compile(
                    "host",
                    "model",
                    "predict",
                    compilerOptions=compiler_options,
                    dtype=np.float32)

            input_index = 0
            for test_input in self.input_tensors:

                # get torch model output
                info.clear()
                torch_out = self.torch_model.forward(
                    test_input).data.numpy().ravel()

                test_input = test_input.detach().cpu().numpy(
                )  # convert to numpy
                order = self.get_order(test_input.shape)
                ell_input_tensor = memory_shapes.get_tensor_in_ell_order(
                    test_input, order)
                ell_flat_input = ell_input_tensor.ravel().astype(np.float32)
                if verify_compiled:
                    ell_out_compiled = np.array(
                        compiled_ell_map.Compute(ell_flat_input,
                                                 dtype=np.float32))

                # must come after the compiled Compute so that map contains valid outputs for layer by layer test
                ell_out = np.array(
                    ell_map.Compute(ell_input_tensor, dtype=np.float32))
                ell_nodes = get_nodes(ell_map)

                if not arg_max_only:
                    # Compare the layers of the torch model with the coorresponding layers of the ELL model
                    for key in info.layers.keys():
                        if input_index == 0:
                            self._logger.info(
                                "----- Comparing Layer {} output -----".format(
                                    key))
                        torch_output = info.layers[key]["output"].detach(
                        ).numpy().ravel()
                        node = get_matching_node_output(ell_nodes, key)
                        if node is not None:
                            port = node.GetOutputPort("output")
                            shape = tuple(port.GetMemoryLayout().size)
                            extent = tuple(port.GetMemoryLayout().extent)
                            offset = tuple(port.GetMemoryLayout().offset)

                            # padding = tuple(port.GetMemoryLayout().padding) # output shape includes padding
                            ell_output = np.array(
                                port.GetDoubleOutput()).astype(np.float32)
                            # now to compare ell (row,col,channel) with torch (channel,row,col) we have to reorder
                            ell_output = get_active_region(
                                ell_output, shape, extent, offset)
                            ell_output = np.moveaxis(ell_output, 2, 0).ravel()

                            # close = np.allclose(torch_output, ell_output, atol=1e-3)
                            np.testing.assert_almost_equal(
                                torch_output,
                                ell_output,
                                decimal=3,
                                err_msg=
                                ('results for ELL layer {} do not match torch output for row {}'
                                 .format(node.GetRuntimeTypeName(),
                                         input_index)))

                # compare whole model output but only the argmax of it,
                # because sometimes model has Softmax but ELL does not.
                torch_prediction = np.argmax(torch_out)
                ell_prediction = np.argmax(ell_out)
                if verify_compiled:
                    compiled_prediction = np.argmax(ell_out_compiled)
                    msg = "argmax of ELL result {}, ELL compiled result {} and argmax of torch output {} on row {}"
                    msg = msg.format(ell_prediction, compiled_prediction,
                                     torch_prediction, input_index)
                else:
                    msg = "argmax of ELL result {} and argmax of torch output {} on row {}".format(
                        ell_prediction, torch_prediction, input_index)

                self._logger.info(msg)
                np.testing.assert_equal(torch_prediction, ell_prediction, msg)

                if verify_compiled:
                    np.testing.assert_equal(torch_prediction,
                                            compiled_prediction, msg)
                input_index += 1

        except BaseException as exception:
            self._logger.error("Verification of model output failed: " +
                               str(exception))
            raise

        self._logger.info("Verification of model output complete")
def reverse_channels(img):
    return np.moveaxis(img, 0, -1)  # source, dest
Ejemplo n.º 42
0
np.tensordot(B, A)
np.tensordot(A, A)
np.tensordot(A, A, axes=0)
np.tensordot(A, A, axes=(0, 1))

np.isscalar(i8)
np.isscalar(A)
np.isscalar(B)

np.roll(A, 1)
np.roll(A, (1, 2))
np.roll(B, 1)

np.rollaxis(A, 0, 1)

np.moveaxis(A, 0, 1)
np.moveaxis(A, (0, 1), (1, 2))

np.cross(B, A)
np.cross(A, A)

np.indices([0, 1, 2])
np.indices([0, 1, 2], sparse=False)
np.indices([0, 1, 2], sparse=True)

np.binary_repr(1)

np.base_repr(1)

np.allclose(i8, A)
np.allclose(B, A)
Ejemplo n.º 43
0
def skeletonize(imgs, cases, args, skelDir, miFile):

    target = load(args.template)
    targetData = target.get_data()
    X, Y, Z = targetData.shape[0], targetData.shape[1], targetData.shape[2]

    # provide the user with allFA sequence so he knows which volume he is looking at while scrolling through allFA
    seqFile = pjoin(args.statsDir, f'all_{args.modality}_sequence.txt')
    with open(seqFile, 'w') as f:
        f.write('index,caseid\n')
        for i, c in enumerate(cases):
            f.write(f'{i},{c}\n')

    print(f'Calculating mean {args.modality} over all the cases ...')
    allFAdata, cumsumFA = calc_mean(imgs, (X, Y, Z), args.qc)

    if args.qc:
        allFA = pjoin(args.statsDir, f'all_{args.modality}.nii.gz')
        save_nifti(allFA, np.moveaxis(allFAdata, 0, -1), target.affine,
                   target.header)

        print(
            f'''\n\nQC the warped {args.modality} images: {allFA}, view {seqFile} for index of volumes in all_FA.nii.gz. 
You may use fsleyes/fslview to load {allFA}.

MI metric b/w the warped images and target are stored in {miFile}

It might be helpful to re-run registration for warped images that are bad.

Moving images are   :   {args.outDir}/preproc/
Target is           :   {args.template}
Transform files are :   {args.xfrmDir}/
Warped images are   :   {args.outDir}/warped/

Save any re-registered images in {args.outDir}/warped/ with the same name as before

For re-registration of any subject, output the transform files to a temporary directory:
        
        mkdir /tmp/badRegistration/
        
        antsRegistrationSyNQuick.sh -d 3 \\
        -f TEMPLATE \\
        -m FA/preproc/caseid_FA.nii.gz \\
        -o /tmp/badRegistration/caseid_FA
        
        antsApplyTransforms -d 3 \\
        -i FA/preproc/caseid_FA.nii.gz \\
        -o FA/warped/caseid_[FA/MD/AD/RD]_to_target.nii.gz \\
        -r TEMPLATE \\
        -t /tmp/badRegistration/caseid_FA1Warp.nii.gz /tmp/badRegistration/caseid_FA0GenericAffine.mat
    
Finally, if wanted, you can copy the transform files to {args.xfrmDir}/ directory.

Note: Replace all the above directories with absolute paths.\n\n''')

        while input('Press Enter when you are done with QC/re-registration: '):
            pass

        allFAdata, cumsumFA = calc_mean(imgs, targetData.shape, args.qc)

    meanFAdata = cumsumFA / len(imgs)
    meanFA = pjoin(args.statsDir, 'mean_FA.nii.gz')

    # outDir should contain
    # all_{modality}.nii.gz
    # mean_FA.nii.gz
    # mean_FA_mask.nii.gz
    # mean_FA_skeleton.nii.gz
    # mean_FA_skeleton_mask.nii.gz
    # mean_FA_skeleton_mask_dst.nii.gz

    if args.modality == 'FA':

        if not args.templateMask:
            print('Creating template mask ...')
            args.templateMask = pjoin(args.statsDir, 'mean_FA_mask.nii.gz')
            meanFAmaskData = (meanFAdata > 0) * 1
            save_nifti(args.templateMask, meanFAmaskData.astype('uint8'),
                       target.affine, target.header)

        else:
            meanFAmaskData = load(args.templateMask).get_data()

        meanFAdata = meanFAdata * meanFAmaskData
        save_nifti(meanFA, meanFAdata, target.affine, target.header)

        # if skeleton is not given:
        #     create all three of skeleton, skeletonMask, and skeletonMaskDst

        # if skeleton is given and (neither skeletonMask nor skeletonMaskDst is given):
        #     create skeletonMask and skeletonMaskDst

        # if skeleton and skeletonMask is given and skeletonMaskDst is not given:
        #     create skeletonMaskDst

        if not args.skeleton:
            print(
                'Creating all three of skeleton, skeletonMask, and skeletonMaskDst ...'
            )
            args.skeleton = pjoin(args.statsDir, 'mean_FA_skeleton.nii.gz')
            args.skeletonMask = pjoin(args.statsDir,
                                      'mean_FA_skeleton_mask.nii.gz')
            args.skeletonMaskDst = pjoin(args.statsDir,
                                         'mean_FA_skeleton_mask_dst.nii.gz')

            _create_skeleton(meanFA, args.skeleton)
            _create_skeletonMask(args.skeleton, args.SKEL_THRESH,
                                 args.skeletonMask)
            _create_skeletonMaskDst(args.templateMask, args.skeletonMask,
                                    args.skeletonMaskDst)

        if args.skeleton and not (args.skeletonMask or args.skeletonMaskDst):
            print('Creating skeletonMask and skeletonMaskDst ...')
            args.skeletonMask = pjoin(args.statsDir,
                                      'mean_FA_skeleton_mask.nii.gz')
            args.skeletonMaskDst = pjoin(args.statsDir,
                                         'mean_FA_skeleton_mask_dst.nii.gz')

            _create_skeletonMask(args.skeleton, args.SKEL_THRESH,
                                 args.skeletonMask)
            _create_skeletonMaskDst(args.templateMask, args.skeletonMask,
                                    args.skeletonMaskDst)

        if args.skeleton and not args.skeletonMask and args.skeletonMaskDst:
            print('Creating skeletonMask ...')
            args.skeletonMask = pjoin(args.statsDir,
                                      'mean_FA_skeleton_mask.nii.gz')

            _create_skeletonMask(args.skeleton, args.SKEL_THRESH,
                                 args.skeletonMask)

        if (args.skeleton and args.skeletonMask) and not args.skeletonMaskDst:
            print('Creating skeletonMaskDst ...')
            args.skeletonMaskDst = pjoin(args.statsDir,
                                         'mean_FA_skeleton_mask_dst.nii.gz')

            _create_skeletonMaskDst(args.templateMask, args.skeletonMask,
                                    args.skeletonMaskDst)

    # mask allFA, this step does not seem to have any effect on the pipeline, it should help the user to visualize only
    if args.qc:
        check_call(
            (' ').join(['fslmaths', allFA, '-mas', args.templateMask, allFA]),
            shell=True)

    # projecting all {modality} data onto skeleton
    pool = Pool(args.ncpu)
    for c, imgPath in zip(cases, imgs):
        pool.apply_async(project_skeleton, (c, imgPath, args, skelDir),
                         error_callback=RAISE)

    pool.close()
    pool.join()

    if not args.noAllSkeleton:

        allFAskeletonized = pjoin(args.statsDir,
                                  f'all_{args.modality}_skeletonized.nii.gz')
        print('Creating ', allFAskeletonized)

        # this loop has been moved out of multiprocessing block to prevent memroy error
        allFAskeletonizedData = np.zeros((len(imgs), X, Y, Z), dtype='float32')
        for i, c in enumerate(cases):
            allFAskeletonizedData[i, :] = load(
                pjoin(
                    skelDir,
                    f'{c}_{args.modality}_to_target_skel.nii.gz')).get_data()

        save_nifti(allFAskeletonized, np.moveaxis(allFAskeletonizedData, 0,
                                                  -1), target.affine,
                   target.header)
        print(
            f'Created {allFAskeletonized} and corresponding index file: {seqFile}'
        )

    return args
Ejemplo n.º 44
0
def scattering_matrix(vp1, vs1, rho1, vp2, vs2, rho2, theta1=0):
    """
    Full Zoeppritz solution, considered the definitive solution.
    Calculates the angle dependent p-wave reflectivity of an interface
    between two mediums.

    Originally written by: Wes Hamlyn, vectorized by Agile.

    Returns the complex reflectivity.

    Args:
        vp1 (float): The upper P-wave velocity.
        vs1 (float): The upper S-wave velocity.
        rho1 (float): The upper layer's density.
        vp2 (float): The lower P-wave velocity.
        vs2 (float): The lower S-wave velocity.
        rho2 (float): The lower layer's density.
        theta1 (ndarray): The incidence angle; float or 1D array length n.

    Returns:
        ndarray. The exact Zoeppritz solution for all modes at the interface.
            A 4x4 array representing the scattering matrix at the incident
            angle theta1.
    """
    theta1 *= np.ones_like(vp1)
    p = np.sin(theta1) / vp1  # Ray parameter.
    theta2 = np.arcsin(p * vp2)  # Trans. angle of P-wave.
    phi1 = np.arcsin(p * vs1)  # Refl. angle of converted S-wave.
    phi2 = np.arcsin(p * vs2)  # Trans. angle of converted S-wave.

    # Matrix form of Zoeppritz equations... M & N are matrices.
    M = np.array(
        [[-np.sin(theta1), -np.cos(phi1),
          np.sin(theta2),
          np.cos(phi2)],
         [np.cos(theta1), -np.sin(phi1),
          np.cos(theta2), -np.sin(phi2)],
         [
             2 * rho1 * vs1 * np.sin(phi1) * np.cos(theta1),
             rho1 * vs1 * (1 - 2 * np.sin(phi1)**2),
             2 * rho2 * vs2 * np.sin(phi2) * np.cos(theta2),
             rho2 * vs2 * (1 - 2 * np.sin(phi2)**2)
         ],
         [
             -rho1 * vp1 * (1 - 2 * np.sin(phi1)**2),
             rho1 * vs1 * np.sin(2 * phi1),
             rho2 * vp2 * (1 - 2 * np.sin(phi2)**2),
             -rho2 * vs2 * np.sin(2 * phi2)
         ]])

    N = np.array(
        [[np.sin(theta1),
          np.cos(phi1), -np.sin(theta2), -np.cos(phi2)],
         [np.cos(theta1), -np.sin(phi1),
          np.cos(theta2), -np.sin(phi2)],
         [
             2 * rho1 * vs1 * np.sin(phi1) * np.cos(theta1),
             rho1 * vs1 * (1 - 2 * np.sin(phi1)**2),
             2 * rho2 * vs2 * np.sin(phi2) * np.cos(theta2),
             rho2 * vs2 * (1 - 2 * np.sin(phi2)**2)
         ],
         [
             rho1 * vp1 * (1 - 2 * np.sin(phi1)**2),
             -rho1 * vs1 * np.sin(2 * phi1),
             -rho2 * vp2 * (1 - 2 * np.sin(phi2)**2),
             rho2 * vs2 * np.sin(2 * phi2)
         ]])

    M_ = np.moveaxis(np.squeeze(M), [0, 1], [-2, -1])
    A = np.linalg.inv(M_)
    N_ = np.moveaxis(np.squeeze(N), [0, 1], [-2, -1])
    Z_ = np.matmul(A, N_)

    return np.transpose(Z_, axes=list(range(Z_.ndim - 2)) + [-1, -2])
Ejemplo n.º 45
0
  def lime_segmentation(self,forward_func, inp_image, inp_transform_flag,transform_func, device,
                        output_path, input_file, tran_func, is_3d, target, batch_dim_present,
                        top_labels, num_samples, num_features, depth=None):

    self.device = device
    indx = 0
    target = target if type(target) == int else int(target[0])
    if inp_transform_flag:
      ip = transform_func(inp_image).to(device)
    else:
      ip = inp_image.to(device)

    if not batch_dim_present:
        ip = ip.unsqueeze(0)

    def batch_func(imgs):
      forward_func.to(device)
      imgs = torch.from_numpy(imgs).transpose(-1,1).float().to(device)
      op = forward_func(imgs).detach().cpu().numpy()
      return op

    def batch_func_3d(imgs):
      forward_func.to(device)
      outputArray = []
      for i in range(imgs.shape[0]):
          ip[0,0,indx]= torch.from_numpy(imgs[i,:,:,0]).float().to(device)
          outputArray.append(forward_func(ip).detach().cpu().numpy().transpose())
      return np.array(outputArray).squeeze(-1)


    explainer = lime_image.LimeImageExplainer()
    if not is_3d:
      if type(inp_image) == torch.Tensor:
          input = np.array(inp_image.squeeze().type(torch.DoubleTensor).cpu())
          if input.shape[0] == 3:
              input = np.moveaxis(input, 0, -1)
      else:
          input = np.array(tran_func(inp_image))
      explanation = explainer.explain_instance(input,
                                             batch_func,
                                             top_labels=top_labels,
                                             hide_color=0,
                                             num_samples=num_samples,)
      temp, mask = explanation.get_image_and_mask(target, positive_only=True,
                                                num_features=num_features, hide_rest=True)
      img_boundry1 = mark_boundaries(temp / 255.0, mask)
      save_im = Image.fromarray((img_boundry1 * 255).astype(np.uint8))
      save_im.save(output_path + "/" + input_file + '_lime_' + "_towards_prediction_class_" + str(target) + ".png",
                 format='png')

      temp, mask = explanation.get_image_and_mask(target, positive_only=False,
                                                num_features=num_features, hide_rest=False)
      img_boundry2 = mark_boundaries(temp / 255.0, mask)
      save_im = Image.fromarray((img_boundry2 * 255).astype(np.uint8))
      save_im.save(output_path + "/" + input_file + '_lime_' + "_against_prediction_class_" + str(target) + ".png",
                 format='png')
    else:
      #print(target, type(target))
      if not batch_dim_present:
          inp_image = inp_image.unsqueeze(0)
      if not os.path.exists(output_path + '/Lime_'+input_file):
        os.mkdir(output_path + '/Lime_'+input_file)
      while indx< depth:
        explanation = explainer.explain_instance(np.array(inp_image[0,0,indx].type(torch.DoubleTensor)),
                                             batch_func_3d,
                                             top_labels=top_labels,
                                             hide_color=0,
                                             num_samples=num_samples)
        temp, mask = explanation.get_image_and_mask(target, positive_only=False,
                                                    num_features=num_features, hide_rest=False)
        img_boundry1 = mark_boundaries(temp / 255.0, mask)
        save_im = Image.fromarray((img_boundry1 * 255).astype(np.uint8))
        save_im.save(output_path + '/Lime_'+input_file +"/" + str(indx)+"_towards_prediction_class_" + str(target) + ".png",
                     format='png')

        indx +=1
Ejemplo n.º 46
0
# Load Dataset
if dataset_name.lower() in ["mnist", "mnist_digits", "mnistdigits"]:
    X, y = mnist.load_data()[0]
    selected_idx = mnist_idx

elif dataset_name.lower() in ["cifar", "cifar10"]:
    X, y = cifar10.load_data()[0]
    selected_idx = cifar_idx

elif dataset_name.lower() in ['cifar_gray', 'cifar10_gray', 'cifargray']:
    from skimage.color import rgb2gray

    X, y = cifar10.load_data()[0]
    selected_idx = cifar_idx
    X = np.moveaxis(X, 1, 3)
    X = rgb2gray(X) * 255
    print("Array Shape:", X.shape)

elif dataset_name.lower() in ["fashion", "fashion_mnist", "fashionmnist"]:
    X, y = fashion_mnist.load_data()[0]
    selected_idx = fashion_idx

else:
    print("Dataset not found.")
    sys.exit(1)

y = np.array([selected_idx[int(val)] for val in y])

print("Dataset loaded.")
Ejemplo n.º 47
0
    def f_lut(array):
        if len(axis) > 1 and axis != np.arange(len(axis)):
            array = np.moveaxis(array,
                                source=axis,
                                destination=np.arange(len(axis)))
        elif add_empty_axis:
            array = array.reshape((1, ) + array.shape)

        # if 'int' not in str(array.dtype):
        #     log.warn('Array passed to apply_lut was converted to int32. Numeric precision may have been lost.')

        # Read array shape
        a_source_shape = array.shape[:n_axis]
        map_shape = array.shape[n_axis:]
        map_size = int(np.prod(map_shape))

        # Check source shape
        if a_source_shape != source_shape:
            raise ValueError(
                'Invalid dimensions on axis: %s. (expected: %s, received: %s)'
                % (str(axis), str(source_shape), str(a_source_shape)))

        # Prepare table
        array = np.moveaxis(array.reshape(source_shape + (map_size, )), -1,
                            0).reshape((map_size, source_size))

        if isinstance(sampling, str):
            id_mapped = None
        else:
            if sampling != 1:
                array = (array / sampling).astype(np.int32)
            id_mapped = np.logical_not(
                np.any(np.logical_or(array > maxs, array < mins), axis=1))
            array = np.sum((array - mins) * stride, axis=1).astype(np.uint32)

        # Map values
        if isinstance(lut_sources, str):  # and lut_sources == 'nearest':
            a = np.sum(
                (array[np.newaxis, :, :] - sources[:, np.newaxis, :])**2,
                axis=-1)
            a = np.argmin(a, axis=0) + 1
        elif lut_sources is not None:
            a = np.zeros(shape=(map_size, ), dtype=np.uint32)
            if lut_sources is not None:
                a[id_mapped] = lut_sources[array[id_mapped]]
            else:
                a[id_mapped] = array[id_mapped] + 1
        else:
            if lut_sources is not None:
                a = lut_sources[array]
            else:
                a = array + 1
        array = lut_dests[a]

        del a
        del id_mapped

        # Reshape
        array = array.reshape(map_shape + dest_shape)

        array = np.moveaxis(
            array, np.arange(len(map_shape), array.ndim),
            np.arange(dest_axis, dest_axis + len(dest_shape))
            if len(dest_shape) != len(axis) else axis)
        if not keep_dims and dest_shape == (1, ):
            array = array.reshape(map_shape)

        return array
Ejemplo n.º 48
0
def train_demo(cfg, net, criterion, optimizer, lr_scheduler):
    # Data generator
    train_data, test_data = get_data(cfg)
    if is_training:
        data = train_data
    else:
        data = test_data

    # Initialize the network the right way
    # net.train and net.eval account for differences in dropout/batch norm
    # during training and testing
    start = 0
    if is_training:
        net.train().cuda()
    else:
        net.eval().cuda()
    if cfg.WEIGHTS_FILE_BASE is not None and cfg.WEIGHTS_FILE_BASE != '':
        print('Restoring weights from %s...' % cfg.WEIGHTS_FILE_BASE)
        with open(cfg.WEIGHTS_FILE_BASE, 'rb') as f:
            checkpoint = torch.load(f)
            net.load_state_dict(checkpoint['state_dict'])
            # print(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
            start = checkpoint['epoch'] + 1
        print('Done.')
    print('Done.')

    metrics = {'acc_all': [], 'acc_nonzero': [], 'loss': []}
    # Only enable gradients if we are training
    # with torch.set_grad_enabled(is_training):
    durations = []
    for i in range(cfg.MAX_STEPS):  # use with torch.no_grad() for test network
        # Check parameters for nan
        # print('Check for nan...')
        # had_nan = False
        # for p in net.parameters():
        #     if torch.isnan(p).any():
        #         print(i, p)
        #         had_nan = True
        #
        # for name in net.state_dict():
        #     tensor = net.state_dict()[name]
        #     if name == 'sparseModel.2.4.1.2.4.1.2.4.1.2.4.1.2.0.1.0.runningVar':
        #         print(i, name, tensor)
        #     if torch.isnan(tensor).any():
        #         print(i, name, tensor)
        #         had_nan = True
        # if had_nan:
        #     break
        # print('Done.')

        # inputs, label = dataloader(i)
        print("Step %d/%d" % (i, cfg.MAX_STEPS))
        blob = data.forward()
        print(blob['voxels'].shape, blob['voxels_value'].shape,
              blob['data'].shape, blob['labels'].shape)
        if sparse:
            coords = torch.from_numpy(blob['voxels']).cuda()
            features = torch.from_numpy(
                np.reshape(blob['voxels_value'], (-1, 1))).cuda()
            # print(coords.type(), features.type())
            start = time.time()
            predictions_raw = net(coords,
                                  features)  # size N_voxels x num_classes
            end = time.time()
            durations.append(end - start)
            # print(predictions_raw.size())
            label_voxels, labels = extract_voxels(blob['labels'])
            labels = torch.from_numpy(labels).cuda().type(
                torch.cuda.LongTensor)
            # print(labels, label_voxels, blob['voxels'])
            # print(net.parameters())
        else:
            image = torch.from_numpy(np.moveaxis(blob['data'], -1, 1)).cuda()
            labels = torch.from_numpy(blob['labels']).cuda().type(
                torch.cuda.LongTensor)
            start = time.time()
            predictions_raw = net(image)
            end = time.time()
            durations.append(end - start)

        loss = criterion(predictions_raw, labels)
        if is_training:
            lr_scheduler.step()  # Decay learning rate
            optimizer.zero_grad()  # Clear previous gradients
            loss.backward()  # Compute gradients of all variables wrt loss
            nn.utils.clip_grad_norm_(net.parameters(), 1.0)  # Clip gradient
            optimizer.step()  # update using computed gradients
        metrics['loss'].append(loss.item())
        print("\tLoss = ", metrics['loss'][-1])

        # Accuracy
        predicted_labels = torch.argmax(predictions_raw, dim=1)
        acc_all = (predicted_labels == labels).sum().item() / float(
            labels.numel())
        nonzero_px = labels > 0
        nonzero_prediction = predicted_labels[nonzero_px]
        nonzero_label = labels[nonzero_px]
        acc_nonzero = (nonzero_prediction
                       == nonzero_label).sum().item() / float(
                           nonzero_label.numel())
        metrics['acc_all'].append(acc_all)
        metrics['acc_nonzero'].append(acc_nonzero)
        print("\tAccuracy = ", metrics['acc_all'][-1],
              " - Nonzero accuracy = ", metrics['acc_nonzero'][-1])

        if is_training and i % 100 == 0:
            for attr in metrics:
                np.savetxt(os.path.join(cfg.OUTPUT_DIR,
                                        '%s_%d.csv' % (attr, i)),
                           metrics[attr],
                           delimiter=',')
                # metrics[attr] = []

        if is_training and i % 100 == 0:
            filename = os.path.join(cfg.OUTPUT_DIR, 'model-%d.ckpt' % i)
            # with open(filename, 'wb'):
            torch.save(
                {
                    'epoch': i,
                    'state_dict': net.state_dict(),
                    'optimizer': optimizer.state_dict(),
                    'lr_scheduler': lr_scheduler.state_dict()
                }, filename)

        if not is_training:
            print('Display...')
            if sparse:
                final_predictions = np.zeros(
                    (1, cfg.IMAGE_SIZE, cfg.IMAGE_SIZE, cfg.IMAGE_SIZE))
                indices = label_voxels.T
                final_predictions[
                    0, indices[0], indices[1],
                    indices[2]] = predicted_labels.cpu().data.numpy()
                display_uresnet(blob,
                                cfg,
                                index=i,
                                predictions=final_predictions)
            else:
                display_uresnet(
                    blob,
                    cfg,
                    index=i,
                    predictions=predicted_labels.cpu().data.numpy())
            print('Done.')
    print("Average duration = %f s" % np.array(durations).mean())
Ejemplo n.º 49
0
def load_h5(fpath,
            clip_flyscan=True,
            xbic_on_dsic=False,
            quant_scaler="us_ic"):
    """
    Loads a MAPS-generated .h5 file (raw + fitted data from 2IDD, fitted from 26IDC)

    fpath: path to file
    clip_flyscan: boolean, removes last two columns of map (which are corrupted when running flyscans)
    xbic_on_dsic: boolean. if True, considers the downstream ion chamber to be connected in an XBIC setup, adds to output['maps']['XBIC']
    quant_scaler: one of ['sr_current', 'us_ic', 'ds_ic']. quantification is normalized to some metric of beam power deposited in sample. typically we use upstream ion chamber

    returns a dictionary with scan info and maps.
    """

    quant_scaler_key = {
        "sr_current": 0,  # storage ring current
        "us_ic": 1,  # upstream ion chamber (incident flux)
        "ds_ic":
        2,  # downstream ion chamber (transmitted flux - make sure this sensor was not used for XBIC!)
    }
    if quant_scaler not in quant_scaler_key:
        raise ValueError(
            f"Quantification normalization must be by either sr_current, us_ic, or ds_ic! user provided {quant_scaler}."
        )

    output = {"filepath": fpath}
    if clip_flyscan:
        xmask = slice(
            0, -2)  # last two columns are garbage from flyscan, omit here
    else:
        xmask = slice(0, None)  # no clipping

    with h5py.File(fpath, "r") as dat:
        output["x"] = dat["MAPS"]["x_axis"][()]
        output["y"] = dat["MAPS"]["y_axis"][()]
        output["spectra"] = np.moveaxis(
            dat["MAPS"]["mca_arr"][()], 0,
            2)[:, xmask]  # y by x by energy, full XRF spectra
        output["energy"] = dat["MAPS"]["energy"][:output["spectra"].shape[
            -1]]  # specmap only has 2000 bins sometimes, match energy to that
        output["intspectra"] = dat["MAPS"][
            "int_spec"][:output["spectra"].shape[-1]]  # integrated spectra
        output["extent"] = [
            output["x"][0],
            output["x"][-1],
            output["y"][0],
            output["y"][-1],
        ]

        scaler_names = dat["MAPS"]["scaler_names"][()].astype("U13").tolist()
        quant_scaler_values = np.array(
            dat["MAPS"]["scalers"][scaler_names.index(quant_scaler)])[:, xmask]
        fillval = np.nanmean(quant_scaler_values[quant_scaler_values > 0])
        quant_scaler_values = np.where(
            quant_scaler_values == 0.0,
            fillval,
            quant_scaler_values,
        )  # change all values that are 0.0 to mean, avoid divide by 0
        if "/MAPS/XRF_fits" in dat:
            xrf = []
            raw = dat["MAPS"][
                "XRF_fits"][:, :,
                            xmask]  # xrf elemental maps, elements by y by x
            quant = np.moveaxis(
                dat["MAPS"]["XRF_fits_quant"][()], 2, 0
            )  # quantification factors from MAPS fitting, elements by factors
            for x, q in zip(raw, quant):
                x = np.divide(
                    x,
                    quant_scaler_values)  # normalize to quantification scaler
                quantfactor = q[quant_scaler_key[quant_scaler]]
                xrf.append(
                    x / quantfactor / 4
                )  # factor of 4 came from discussion w/ Arthur Glowacki @ APS, and brings this value in agreement with MAPS displayed value. not sure why it is necessary though...
                # update 20221228: this factor changes from run to run, but is always a round number (have seen 1, 4, and 10). I expect it could be related to usic amplifier settings or similar, but cant find a related value in the h5 file REK
            output["fitted"] = True
        else:
            xrf = dat["MAPS"]["XRF_roi"][:, :, xmask]
            output["fitted"] = False

        allchan = dat["MAPS"]["channel_names"][()].astype("U13").tolist()
        if xbic_on_dsic:
            xrf.append(dat["MAPS"]["scalers"][scaler_names.index("ds_ic")]
                       [:, xmask])  # append DSIC to channels, used for xbic
            allchan.append("XBIC")  # label for DSIC
    output["maps"] = {}
    for channel, xrf_ in zip(allchan, xrf):
        output["maps"][channel] = np.array(xrf_)
    return output
Ejemplo n.º 50
0
def load_fan_data(return_crlb=False, data_name='supersampled'):
    if data_name == 'fan':
        file_name = 'runs_2017_01_07_lineardet'
    elif data_name == 'supersampled':
        file_name = '10_Jan_2017_17_51_44_simulation_forbild_head'
    elif data_name == 'fan_circ':
        file_name = 'simulated_images_2017_01_06'
    else:
        assert False

    current_path = os.path.dirname(os.path.realpath(__file__))
    data_path = os.path.join(current_path, 'data', file_name, 'head_image.mat')

    try:
        data_mat = sio.loadmat(data_path)
    except IOError:
        raise IOError('{} missing, '
                      'contact '
                      'developers for a copy of the data or use another data '
                      'source.'.format(data_path))

    # print(sorted(data_mat.keys()))
    data = data_mat['decomposedbasisProjectionsmm']
    data = data.swapaxes(0, 2)

    if data_name == 'fan':
        det_size = 853

        angle_partition = odl.uniform_partition(0.5 * np.pi,
                                                2.5 * np.pi,
                                                360,
                                                nodes_on_bdry=[[True, False]])
        detector_partition = odl.uniform_partition(-det_size / 2.0,
                                                   det_size / 2.0, 853)

        geometry = odl.tomo.FanFlatGeometry(angle_partition,
                                            detector_partition,
                                            src_radius=500,
                                            det_radius=500)

        data[:] = data[:, ::-1]

        if not return_crlb:
            return data, geometry
        else:
            crlb = data_mat['CRLB']
            crlb = crlb.swapaxes(0, 1)
            crlb[:] = crlb[::-1]
            crlb = swap_axes(crlb)

            # Negative correlation
            #crlb[0, 1] *= -1
            #crlb[1, 0] *= -1

            return data, geometry, crlb
    if data_name == 'supersampled':
        det_size = 853

        angle_partition = odl.uniform_partition(0 * np.pi, 2 * np.pi, 360)
        detector_partition = odl.uniform_partition(-det_size / 2.0,
                                                   det_size / 2.0, 853)

        geometry = odl.tomo.FanFlatGeometry(angle_partition,
                                            detector_partition,
                                            src_radius=500,
                                            det_radius=500)

        #data[:] = data[:, ::-1]

        if not return_crlb:
            return data, geometry
        else:
            crlb = data_mat['CRLB']
            crlb = crlb.swapaxes(0, 1)
            #crlb[:] = crlb[::-1]
            crlb = swap_axes(crlb)

            # Negative correlation
            #crlb[0, 1] *= -1
            #crlb[1, 0] *= -1

            return data, geometry, crlb
    elif data_name == 'fan_circ':
        # Create approximate fan flat geometry.
        det_size = 883 * (500 + 500)

        angle_partition = odl.uniform_partition(0.5 * np.pi,
                                                2.5 * np.pi,
                                                360,
                                                nodes_on_bdry=[[True, False]])
        detector_partition = odl.uniform_partition(-det_size / 2.0,
                                                   det_size / 2.0, 883)

        geometry = odl.tomo.FanFlatGeometry(angle_partition,
                                            detector_partition,
                                            src_radius=500,
                                            det_radius=500)

        # Convert to true fan flat geometry
        data[0][:] = fan_to_fan_flat(geometry, data[0])
        data[1][:] = fan_to_fan_flat(geometry, data[1])

        if not return_crlb:
            return data, geometry
        else:
            crlb = data_mat['CRLB']
            crlb = crlb.swapaxes(0, 1)
            crlb = np.moveaxis(crlb, [-2, -1], [0, 1])

            crlb[0, 0][:] = fan_to_fan_flat(geometry, crlb[0, 0])
            crlb[0, 1][:] = fan_to_fan_flat(geometry, crlb[0, 1])
            crlb[1, 0][:] = fan_to_fan_flat(geometry, crlb[1, 0])
            crlb[1, 1][:] = fan_to_fan_flat(geometry, crlb[1, 1])

            # Negative correlation
            # crlb[0, 1] *= -1
            # crlb[1, 0] *= -1

            return data, geometry, crlb
Ejemplo n.º 51
0
def find_block(a,
               axis=0,
               ignore_masked=True,
               as_slice=False,
               block_value=None):
    """Find blocks of contiguous values along a given dimension of a.

    This function will identify the index ranges for which there are contiguous values in a and the values themselves.
    For example, for::

        a = np.array([0, 1, 1, 1, 2, 3, 3, 4, 5])

    then ``find_block(a)`` would return ``[(1, 4), (5, 7)]`` and ``[1, 3]``. Note that the index ranges follow the
    Python convention that the last index is exclusive, so that ``a[1:4]`` would give ``[1, 1, 1]``.

    If ``a`` is a 2D or higher array, then the whole slice ``a[i+1, :]`` must equal ``a[i, :]`` to be considered
    contiguous. For::

        a = [[1, 2, 3],
             [1, 2, 3],
             [1, 2, 4]]

    the first two rows only would be considered a contiguous block because all their values are identical, while the
    third row is not. Finally, if ``a`` is a masked array, then by default the masked *values* are ignored but the masks
    themselves must be the same. With the same 2D ``a`` as above, if all the values in the last column were masked:

        a = [[1, 2, --],
             [1, 2, --],
             [1, 2, --]]

    then all three rows would be considered a block, but if only the bottom right element was masked:

        a = [[1, 2,  3],
             [1, 2,  3],
             [1, 2, --]]

    then the third row would *not* be part of the block, because even though its unmasked values are identical to the
    corresponding values in the previous row, the mask is not.

    Parameters
    ----------
    a : array-like
        the array or array-like object to find contiguous values in. Can be given as anything that `numpy.array`
        can turn into a proper array.

    axis : int
        which axis of ``a`` to operate along.

    ignore_masked : bool
        setting this to ``False`` modifies the behavior described above regarding masked arrays. When
        this is ``False``, then the underlying values are considered for equality. In the example where the entire last
        column of ``a`` was masked, setting ``ignore_masked = True`` would cause the third row to be excluded from the
        block again, because the values under the mask are compared.

    as_slice : bool
        set to ``True`` to return the indices as slice instances instead; these can be used directly in
        ``a`` retrieve the contiguous blocks.

    block_value : array-like
        if given, this will only look for blocks matching this value. In the first example where ``a``
        was a vector, setting this to ``1`` would only return the indices (1, 4) since only the first block has values of
        1. If ``a`` if multidimensional, keep in mind that the explicit check is that ``np.all(a[i,:] == block_value)``,
        so this may be a scalar or an array, as long as each slice passes that test. If this is a masked array, then the
        mask of each slice of ``a`` must also match its mask.

    Returns
    -------
    list[tuple[int]]
        A list of block start/end indices as two-element tuples (or slices, if ``as_slice = True``)

    list[numpy.ndarray]
        a list of the values of each block.

    Notes
    -----
    Even if you pass ``a`` in as something other than a numpy array (e.g. a list of lists) the second return value will
    be numpy arrays. This is due to how ``a`` is handled internally.

    If ``block_value`` is a masked array and ``a`` is not, then most likely nothing will match. However, you should
    not rely on this behavior, since there may be some corner cases where this is not true, particularly if
   ``block_value`` and ``a`` are boolean arrays.
    """
    if not isinstance(a, np.ndarray):
        a = np.array(a)

    block_mask = None
    if block_value is not None:
        if not isinstance(block_value, np.ndarray):
            block_value = np.array(block_value)
        elif isinstance(
                block_value, np.ma.masked_array
        ):  # and not isinstance(row.mask, np.bool_): # TODO: figure out why I'd needed the extra isinstance check here and for the rows
            block_mask = block_value.mask
            if ignore_masked:
                # If ignoring masked values, we cut down the value the block is supposed to equal just like we cut
                # down each row
                block_value = block_value[~block_mask]

    a = np.moveaxis(a, axis, 0)

    last_row = None
    last_mask = None
    blocks = []
    values = []
    block_start = None

    for idx, row in enumerate(a):
        if isinstance(
                row,
                np.ma.masked_array):  # and not isinstance(row.mask, np.bool_):
            mask = row.mask
            if ignore_masked:
                # Ignoring masked values: masked values do not contribute one way or another to determining if two rows
                # are equal, *however*, rows must have the *same* mask, so we store the mask but remove masked values.
                # If the whole row was masked, then the last row will have to have been the same. That's why we extract
                # the mask before cutting row down.
                row = row[~mask]
            # If we're not ignoring masked values, then don't cut down row: np.array_equal will test the underlying data
            # even under masked values

        else:
            # Not a masked array? Just make the mask equal to the row so they are both equal or not together - this
            # removes the need for logic to determine whether or not to test the masks
            mask = row

        if last_row is None:
            # On the first row. Can't do anything yet.
            pass
        elif block_start is not None and not (np.array_equal(row, last_row) and
                                              np.array_equal(mask, last_mask)):
            # End of block. Add to the list of blocks. The current index is not in the block, but since Python's
            # convention is that the ending index is exclusive, we make the end of the block the first index not
            # included in it.
            blocks.append((block_start, idx))
            block_start = None
        elif block_start is None and np.array_equal(
                row, last_row) and np.array_equal(mask, last_mask):
            # If the current row equals the last row, then we're in a block.
            if block_value is None:
                # If no value to check against the block was given, then set the start of the block to the last index
                # (where the block actually started).
                block_start = idx - 1
                values.append(row)
            elif np.all(row.shape == block_value.shape) and np.all(
                    row == block_value):
                # If a value to check against the block was given, then only start the block if the current row
                # matches that value. If that value was a masked array, then the masks must be equal as well.
                if block_mask is None or np.all(
                        block_mask == mask
                ):  # if the row is the same shape, then the mask will be too
                    block_start = idx - 1
                    values.append(row)

        last_row = row
        last_mask = mask

    if block_start is not None:
        # If block_start isn't None after we're done with the for loop, then there's a block that goes to the end of
        # the array, so add that.
        blocks.append((block_start, a.shape[0]))

    if as_slice:
        blocks = [slice(*b) for b in blocks]

    return blocks, values
Ejemplo n.º 52
0
    field_data,
    analytic_data,
    chop_size=chop_size,
    chunk_size=chunk_size,
    network_sequence_size=network_time_sequence)

#field_data = GIGANTIC_DATA_PROCESSOR.rescale_arr(field_data, 1, 255)
#test = field_data.astype(np.uint8)
#imsave('test2.tif', test[0:500, : , :, 3 ])

mean_version = False
if mean_version:
    analytic_data = GIGANTIC_DATA_PROCESSOR.analysis_mean_calculation(
        analytic_data)

field_data = np.moveaxis(field_data, 3, 1)
#analytic_data = np.moveaxis(analytic_data, 3, 1)
field_data = np.expand_dims(field_data, axis=4)
analytic_data = np.expand_dims(analytic_data, axis=1)
#field_data = np.expand_dims(field_data, axis=1) # [ amount, 1, :, : , 25 ]
#analytic_data = np.expand_dims(analytic_data, axis=1)

freq_set = 3
input_data = field_data[freq_set, :, :, :]
input_data = np.expand_dims(input_data, axis=0)

from keras.models import load_model

model = load_model('test.hdf5')
output = model.predict(input_data)
print(output.shape)
Ejemplo n.º 53
0
import numpy as np
from numpy.random import seed
from numpy.random import rand

import argparse

parser = argparse.ArgumentParser(description='Run experiment on data')
parser.add_argument('-n', type=int, default=10000)
args = parser.parse_args()
l = 20

randstart = rand(args.n) * 0.5
randterm = rand(args.n) * (1.03 - 0.77) + 0.77
v1 = np.arange(l).reshape(1, -1).repeat(args.n * 2, axis=0).reshape(args.n, 2, l)
v1 = v1 * randterm.reshape(-1, 1, 1) + randstart.reshape(-1, 1, 1)
v1[:, 0, :] = np.sin(v1[:, 0, :])
v1[:, 1, :] = np.cos(v1[:, 1, :])
v1 = np.moveaxis(v1, 1, 2)

np.savetxt("generate_iterative_multijump.dat",v1.reshape(args.n, -1),fmt="%.3f")
Ejemplo n.º 54
0
def tensorboard_image(name, image, iteration, writer):
    out_im = np.moveaxis(image.data.cpu().numpy(), 0, 2)
    writer.add_image(name, out_im, iteration)
Ejemplo n.º 55
0
 def push_datas(self, datas, axis=0):
     if axis != 0:
         datas = np.moveaxis(datas, axis, 0)
     assert datas.shape[1:] == self._accum.shape_data
     for data in datas:
         self._values.append(data)
Ejemplo n.º 56
0
def test_moveaxis(source, destination):
    x = sparse.random((2, 3, 4, 5), density=0.25)
    y = x.todense()
    xx = sparse.moveaxis(x, source, destination)
    yy = np.moveaxis(y, source, destination)
    assert_eq(xx, yy)
Ejemplo n.º 57
0
    def _prepare_grating(self):
        '''Create temporospatial grating
        '''

        spatial_frequency = self.options["spatial_frequency"]
        temporal_frequency = self.options["temporal_frequency"]
        fps = self.options["fps"]
        duration_seconds = self.options["duration_seconds"]
        orientation = self.options["orientation"]

        if not spatial_frequency:
            print('Spatial_frequency missing, setting to 1')
            spatial_frequency = 1
        if not temporal_frequency:
            print('Temporal_frequency missing, setting to 1')
            temporal_frequency = 1

        # Create sine wave
        one_cycle = 2 * np.pi
        cycles_per_degree = spatial_frequency
        #image_width_in_degrees = self.options["image_width_in_deg"]
        image_width = self.options["image_width"]
        image_height = self.options["image_height"]
        image_width_in_degrees = image_width / self.options["pix_per_deg"]

        # Calculate larger image size to allow rotations
        diameter = np.ceil(np.sqrt(image_height**2 + image_width**2)).astype(
            np.int)
        image_width_diameter = diameter
        image_height_diameter = diameter
        image_width_diameter_in_degrees = image_width_diameter / self.options[
            "pix_per_deg"]

        # Draw temporospatial grating
        # NB! one_cycle * cycles_per_degree needs to be multiplied with the scaled width to have
        # the desired number of cpd in output image
        image_position_vector = np.linspace(
            0, one_cycle * cycles_per_degree * image_width_diameter_in_degrees,
            image_width_diameter)
        n_frames = self.frames.shape[2]

        # Recycling large_frames and self.frames below, instead of descriptive variable names for the evolving video, saves a lot of memory
        # Create large 3D frames array covering the most distant corner when rotated
        large_frames = np.tile(image_position_vector,
                               (image_height_diameter, n_frames, 1))
        # Correct dimensions to image[0,1] and time[2]
        large_frames = np.moveaxis(large_frames, 2, 1)
        total_temporal_shift = temporal_frequency * one_cycle * duration_seconds
        one_frame_temporal_shift = (temporal_frequency * one_cycle) / fps
        temporal_shift_vector = np.arange(0, total_temporal_shift,
                                          one_frame_temporal_shift)
        # Shift grating phase in time. Broadcasting temporal vector automatically to correct dimension.
        large_frames = large_frames + temporal_shift_vector

        # Rotate to desired orientation
        large_frames = ndimage.rotate(large_frames, orientation, reshape=False)

        # Cut back to original image dimensions
        marginal_height = (diameter - image_height) / 2
        marginal_width = (diameter - image_width) / 2
        marginal_height = np.round(marginal_height).astype(np.int)
        marginal_width = np.round(marginal_width).astype(np.int)
        self.frames = large_frames[marginal_height:-marginal_height,
                                   marginal_width:-marginal_width, :]
        # remove rounding error
        self.frames = self.frames[0:image_height, 0:image_width, :]
Ejemplo n.º 58
0
    def train(self, epochs, batch_size=4):
        # Load the LFW dataset
        print("Loading the dataset: this step can take a few minutes.")
        # Complete LFW dataset
        # lfw_people = fetch_lfw_people(color=True, resize=1.0, \
        #                               slice_=(slice(0, 250), slice(0, 250)))

        # Smaller dataset used for implementation evaluation
        lfw_people = fetch_lfw_people(color=True, resize=1.0, \
                                      slice_=(slice(0, 250), slice(0, 250)), \
                                      min_faces_per_person=3)

        images_rgb = lfw_people.images
        images_rgb = np.moveaxis(images_rgb, -1, 1)

        # Zero pad them to get 256 x 256 inputs
        images_rgb = np.pad(images_rgb, ((0,0), (0,0), (3,3), (3,3)), 'constant')
        self.images_lfw = images_rgb

        # Convert images from RGB to YCbCr and from RGB to grayscale
        images_ycc = np.zeros(images_rgb.shape)
        secret_gray = np.zeros((images_rgb.shape[0], 1, images_rgb.shape[2], images_rgb.shape[3]))
        for k in range(images_rgb.shape[0]):
            images_ycc[k, :, :, :] = rgb2ycc(images_rgb[k, :, :, :])
            secret_gray[k, 0, :, :] = rgb2gray(images_rgb[k, :, :, :])

        # Rescale to [-1, 1]
        X_train_ycc = (images_ycc.astype(np.float32) - 127.5) / 127.5
        X_train_gray = (secret_gray.astype(np.float32) - 127.5) / 127.5

        # Adversarial ground truths
        original = np.ones((batch_size, 1))
        encrypted = np.zeros((batch_size, 1))

        for epoch in range(epochs):
            # Select a random batch of cover images
            idx = np.random.randint(0, X_train_ycc.shape[0], batch_size)
            imgs_cover = X_train_ycc[idx]

            # Idem for secret images
            idx = np.random.randint(0, X_train_ycc.shape[0], batch_size)
            imgs_gray = X_train_gray[idx]

            # Predict the generator output for these images
            imgs_stego, _ = self.base_model.predict([imgs_cover, imgs_gray])
            # imgs_stego, _, _ = self.adversarial.predict([imgs_cover, imgs_gray])

            # Train the discriminator
            d_loss_real = self.discriminator.train_on_batch(imgs_cover, original)
            d_loss_encrypted = self.discriminator.train_on_batch(imgs_stego, encrypted)
            d_loss = 0.5 * np.add(d_loss_real, d_loss_encrypted)

            # Train the generator
            g_loss = self.adversarial.train_on_batch([imgs_cover, imgs_gray], [imgs_cover, imgs_gray, original])

            # Print the progress
            print("{} [D loss: {}] [G loss: {}]".format(epoch, d_loss, g_loss[0]))

            self.adversarial.save('adversarial.h5')
            self.discriminator.save('discriminator.h5')
            self.base_model.save('base_model.h5')
Ejemplo n.º 59
0
C3 = np.exp(-(X - 0.1)**2 - (Y + 1)**2) - 0.8 * np.exp(-(X + 1)**2 -
                                                       (Y + 1)**2)
C3 = C3 - C3.min()

test_compositions = [C1, C2, C3]

if __name__ == "__main__":
    import matplotlib.pyplot as plt
    from matplotlib import cm

    C1 = (C1 - C1.min()) / (C1.max() - C1.min())
    C2 = (C2 - C2.min()) / (C2.max() - C2.min())
    C3 = (C3 - C3.min()) / (C3.max() - C3.min())

    RGB_style = np.array([C1, C2, C3])
    RGB_style = np.moveaxis(RGB_style, 0, -1)

    fig, axes = plt.subplots(ncols=4)
    axes[0].set_title('Red')
    axes[0].imshow(C1,
                   vmax=1.0,
                   vmin=0,
                   cmap=cm.gray,
                   origin='lower',
                   extent=[-imsize, imsize, -imsize, imsize])

    axes[1].set_title('Green')
    axes[1].imshow(C2,
                   vmax=1.0,
                   vmin=0,
                   cmap=cm.gray,
Ejemplo n.º 60
0
        # Get meta to pass to input data
        meta = src.meta.copy()
        meta['transform'] = src.transform

        # Reset meta for our outputs
        meta['count'] = 3
        meta['dtype'] = 'uint8'

        # Build API call and request our image
        url = f'https://api.mapbox.com/styles/v1/{STYLE_USER}/{STYLE_ID}/static/{im_bounds}/{src.width}x{src.height}?access_token={MAPBOX_API_KEY}&attribution=false&logo=false'
        r = requests.get(url)

        # Read in our data and convert it to RGB
        map_im = Image.open(BytesIO(r.content))
        map_conv = map_im.convert(mode='RGB')

        # Get our image as an np array and set axes for rio
        a = np.asarray(map_conv)
        move = np.moveaxis(np.asarray(a), [0, 1, 2], [1, 2, 0])

        # Add geospatial data and write
        with rasterio.open(new_file, 'w', **meta) as outds:
            outds.write(move)

        # Sleep to prevent getting rate limited (1,250/hr)
        sleep(.05)

elapsed = timeit.default_timer() - t0
print(f'Run complete in {elapsed / 60:.3f} min')