示例#1
0
    def test_extremes(self):
        # Test extremes of alpha correspond to boxcar and hann
        tuk0 = windows.tukey(100, 0)
        box0 = windows.boxcar(100)
        assert_array_almost_equal(tuk0, box0)

        tuk1 = windows.tukey(100, 1)
        han1 = windows.hann(100)
        assert_array_almost_equal(tuk1, han1)
示例#2
0
    def test_extremes(self):
        # Test extremes of alpha correspond to boxcar and hann
        tuk0 = windows.tukey(100, 0)
        box0 = windows.boxcar(100)
        assert_array_almost_equal(tuk0, box0)

        tuk1 = windows.tukey(100, 1)
        han1 = windows.hann(100)
        assert_array_almost_equal(tuk1, han1)
def test_Vx_1D(opt='R'):
    ''' opt :
        Q(plot q,Vq) w(show window function),
        R(plot r,Vr) i(show imaginary)'''
    npts, Npad = 1000, 10**4
    elts, qmax = ['C'], 6
    q, fq = get_elec_atomic_factors(elts, qmax=qmax, npts=npts)
    fq = fq[0]
    w = tukey(2 * npts, alpha=0.5, sym=False)[npts:]
    fqw = w * fq

    #qpad,fqpad = q,fq
    qpad, fqpad = np.linspace(0, int(qmax * Npad / npts), Npad), np.zeros(
        (Npad))
    fqpad[:npts] = fqw
    qpad, fqpad = np.concatenate((-np.flip(qpad), qpad)), np.concatenate(
        (np.flip(fqpad), fqpad))
    if 'Q' in opt:
        plts = [[qpad, fqpad, 'b', '$fq_{pad}$', 2]]
        if 'w' in opt:
            plts += [[q, fq, 'b', '$fq$', 2], [q, fqw, 'r', '$fq_w$', 2],
                     [q, w, 'r--', '$w$', 1]]
        stddisp(plts, labs=['$q(A^{-1})$', '$Fq(A)$'], opt='p')

    ##perform 1D fft
    if 'R' in opt:
        r, Vr = fu.get_iFFT(qpad, fqpad)
        plts = [[r, np.abs(Vr), 'g', '$V$']]
        if 'i' in opt:
            plts += [[r, Vr.real, 'b', '$Re$'], [r, Vr.imag, 'r', '$Im$']]
        stddisp(plts, labs=['r(A)', '$V$'], opt='p', lw=2)
示例#4
0
def fade_on(timeseries, alpha=0.25):
    """
    Take a PyCBC time series and use a one-sided Tukey window to "fade
    on" the waveform (to reduce discontinuities in the amplitude).

    Args:
        timeseries (pycbc.types.timeseries.TimeSeries): The PyCBC
            TimeSeries object to be faded on.
        alpha (float): The alpha parameter for the Tukey window.

    Returns:
        The `timeseries` which has been faded on.
    """

    # Save the parameters from the time series we are about to fade on
    delta_t = timeseries.delta_t
    epoch = timeseries.start_time
    duration = timeseries.duration
    sample_rate = timeseries.sample_rate

    # Create a one-sided Tukey window for the turn on
    window = tukey(M=int(duration * sample_rate), alpha=alpha)
    window[int(0.5 * len(window)):] = 1

    # Apply the one-sided Tukey window for the fade-on
    ts = window * np.array(timeseries)

    # Create and return a TimeSeries object again from the resulting array
    # using the original parameters (delta_t and epoch) of the time series
    return TimeSeries(initial_array=ts, delta_t=delta_t, epoch=epoch)
示例#5
0
    def time_domain_window(self, roll_off=None, alpha=None):
        """
        Window function to apply to time domain data before FFTing.

        This defines self.window_factor as the power loss due to the windowing.
        See https://dcc.ligo.org/DocDB/0027/T040089/000/T040089-00.pdf

        Parameters
        ==========
        roll_off: float
            Rise time of window in seconds
        alpha: float
            Parameter to pass to tukey window, how much of segment falls
            into windowed part

        Returns
        =======
        window: array
            Window function over time array
        """
        from scipy.signal.windows import tukey
        if roll_off is not None:
            self.roll_off = roll_off
        elif alpha is not None:
            self.roll_off = alpha * self.duration / 2
        window = tukey(len(self._time_domain_strain), alpha=self.alpha)
        self.window_factor = np.mean(window**2)
        return window
def get_solved_episode(input_: np.ndarray,
                       N: int, V: float, geometry: BaseGeometry, t_list: np.ndarray,
                       ghz_state: BaseGHZState,
                       interpolation_timesteps: int = 3000) -> EvolvingQubitSystem:
    timesteps = len(t_list) - 1

    Omega_params = input_[:timesteps]
    Delta_params = input_[timesteps:]

    _t_list = np.linspace(0, t_list[-1], interpolation_timesteps + 1)
    interp = partial(interp1d,
                     # kind="cubic",
                     kind="quadratic",
                     # kind="linear",
                     # kind="previous",
                     )

    Omega_func: Callable[[float], float] = interp(t_list, np.hstack((Omega_params, Omega_params[-1])))
    Omega_shape_window = tukey(interpolation_timesteps + 1, alpha=0.2)
    Omega = np.array([Omega_func(_t) * Omega_shape_window[_i] for _i, _t in enumerate(_t_list[:-1])])

    Delta_func: Callable[[float], float] = interp(t_list, np.hstack((Delta_params, Delta_params[-1])))
    Delta = np.array([Delta_func(_t) for _t in _t_list[:-1]])

    e_qs = EvolvingQubitSystem(
        N, V, geometry,
        Omega, Delta,
        _t_list,
        ghz_state=ghz_state
    )
    start_time = time.time()
    e_qs.solve()
    print(f"Solved in {time.time() - start_time:.3f}s")
    return e_qs
示例#7
0
 def test_basic(self):
     # Test against hardcoded data
     for k, v in tukey_data.items():
         if v is None:
             assert_raises(ValueError, windows.tukey, *k)
         else:
             win = windows.tukey(*k)
             assert_allclose(win, v, rtol=1e-14)
示例#8
0
 def test_basic(self):
     # Test against hardcoded data
     for k, v in tukey_data.items():
         if v is None:
             assert_raises(ValueError, windows.tukey, *k)
         else:
             win = windows.tukey(*k)
             assert_allclose(win, v, rtol=1e-14)
示例#9
0
def traveltime_adjoint_source(tr,
                              time_window=None,
                              reverse=True,
                              save=False,
                              zeros=False):
    """
    Define a traveltime adjoint source, used to generate 'Banana-doughtnut'
    kernels. Traveltime adjoint sources are not data dependent, but rather they
    are sensitivity kernels that illuminate the finite-frequency ray path
    of the waveforms.

    Equation and variable naming is based on Tromp et al. (2005) Eq. 45.
    Implementation is based on Pyadjoint's cc_traveltime adjoint source
    Tapering is done with a hanning window.

    :type st_syn: obspy.core.trace.Trace
    :param st_syn: Synthetic data to be converted to traveltime adjoint source
    :type t_window: list of float
    :param t_window: [t_start, t_end] window to cut phases from waveform
    :rtype: np.array
    :return: a numpy array that defines the adjoint source
    """
    s = tr.data
    deltat = tr.stats.delta
    offset = float(tr.stats.starttime)
    times = tr.times() + offset

    # Generate the adjoint source 'fp'
    dsdt = np.gradient(s, deltat)
    nnorm = simps(y=dsdt * dsdt, dx=deltat)
    fp = 1 / nnorm * dsdt[:]

    if zeros:
        # Only write zeros for empty adjoint sources
        fp = np.zeros(tr.stats.npts)
    else:
        # Window the adjoint source based on given start and end times
        if time_window is not None:
            t_start, t_end = time_window
            overlay = np.zeros(tr.stats.npts)
            samp_start = int((t_start - offset) / deltat)
            samp_end = int((t_end - offset) / deltat)

            window = tukey(samp_end - samp_start, alpha=0.5)
            overlay[samp_start:samp_end] = window

            fp = np.multiply(overlay, fp)

        # Adjoint sources need to be time reversed
        if reverse:
            fp = fp[::-1]

    data = np.vstack((times, fp)).T

    if save:
        np.savetxt(save, data, "%14.7f %20.8E")

    return data
示例#10
0
def fft(t, a, n=None):
    freq = []
    amp = []
    ac = np.copy(a)
    if len(t.shape) == 1:  # only a single time vector
        dt = np.mean(np.diff(t))
        if len(ac.shape) == 1:
            if n == None:
                n = len(ac)
            window = windows.tukey(len(ac), 0.05, sym=False)
            ac *= window
            amp.append(np.fft.fft(ac, n))
            freq.append(np.fft.fftfreq(n) / dt)
        else:
            if n == None:
                n = len(a[0])
            i = 0
            window = windows.tukey(
                len(ac[0]), 0.05, sym=False
            )  # if there is only a single time vector, then all amplitude vector should be of same length
            for temp in a:
                ac[i] *= window
                amp.append(np.fft.fft(ac[i], n))
                i += 1
            freq.append(np.fft.fftfreq(n) / dt)  # only one time vector
    else:  # more time vectors
        if len(ac.shape) == len(
                t.shape
        ):  # then the number of time vectors should equal amplitude vectors
            i = 0
            for temp1, temp2 in zip(t, ac):
                if n == None:
                    n = len(ac[i])
                dt = np.mean(np.diff(temp1))
                window = windows.tukey(len(temp2), 0.05, sym=False)
                temp2 *= window
                amp.append(np.fft.fft(temp2, n))
                freq.append(np.fft.fftfreq(n) / dt)
                i += 1
        else:
            print(
                'Warning: number of time vectors does not match number of amplitude vectors by more than one time vector'
            )
    return np.asarray(freq), np.asarray(amp)
示例#11
0
    def cosine_taper(self, width):
        """Apply cosine taper to time series.

        Parameters
        ----------
        width : {0.-1.}
            Amount of the time series to be tapered.
            `0` is equal to a rectangular and `1` a Hann window.

        Returns
        -------
        None
            Applies cosine taper to attribute `amp`.

        """
        self._amp = self._amp * tukey(self.nsamples, alpha=width)
示例#12
0
    def process(self, data):
        data = deepcopy(data)

        winData = data.data
        if self.windowType == FourierTransform.WindowTypes.Hann:
            winData *= windows.hann(len(winData), sym=False)
        elif self.windowType == FourierTransform.WindowTypes.Blackman:
            winData *= windows.blackman(len(winData), sym=False)
        elif self.windowType == FourierTransform.WindowTypes.Flattop:
            winData *= windows.flattop(len(winData), sym=False)
        elif self.windowType == FourierTransform.WindowTypes.Tukey:
            winData *= windows.tukey(len(winData), sym=False, alpha=self.alpha)

        data.data = np.fft.rfft(winData, axis=0, norm='ortho')
        data.axes[0] = np.fft.rfftfreq(len(data.axes[0]),
                                       np.mean(np.diff(data.axes[0])))
        return data
示例#13
0
def test_Vx_2D(KE=100, tmax=0.2, npts=100, Npad=1000, opt='QRwp'):
    '''
KE          : incident energy (keV)
tmax        : maximum scattering angle (rad)
npts,Npad   : nb points for normal and padded regions
opt         : Q(plot q,Vq) R(plot r,Vr) w(apply window) p(apply padding)'
    '''
    lam = wavelength(KE)
    ikxy = np.linspace(-1, 1, npts)
    qmax = tmax / lam
    qx, qy = np.array(np.meshgrid(ikxy, ikxy)) * qmax
    q = np.sqrt(qx**2 + qy**2)
    fq = get_elec_atomic_factors(['C'], q)[1][0]
    fq[q > qmax] = 0

    if 'w' in opt:
        wf = interp1d(ikxy * qmax, tukey(npts, alpha=0.5))
        w = np.zeros(q.shape)
        w[q <= qmax] = wf(q[q <= qmax])
        fqw = fq * w
        fq = fqw
    if 'p' in opt:
        qM = qmax * Npad / npts
        iqxy = np.linspace(-qM, qM, Npad)
        (qxpad, qypad), fqpad = np.meshgrid(iqxy, iqxy), np.zeros((Npad, Npad))
        qpad = np.sqrt(qxpad**2 + qypad**2)
        fqpad[(np.abs(qxpad) <= qmax)
              & (np.abs(qypad) <= qmax)] = fq[(qx <= qmax) & (qy <= qmax)]
        qx, qy, fq, npts = qxpad, qypad, fqpad, Npad

    if 'Q' in opt:
        stddisp(im=[qx, qy, fq], labs=['$q_x$', '$q_y$'], imOpt='c', pOpt='t')

    if 'R' in opt:
        Vr = np.fft.fftshift(np.fft.ifft2(fq))
        dq = qx[0, 1] - qx[0, 0]
        #print(dq)
        r = np.fft.fftshift(np.fft.fftfreq(npts, dq))
        x, y = np.meshgrid(r, r)
        im = [x, y, np.abs(Vr)]
        print('im_max/re_max=%.2f' % (Vr.imag.max() / Vr.real.max()))
        stddisp(im=im, labs=['$x$', '$y$'], imOpt='c', pOpt='t')

    print('tmax=%.2f deg\nqmax=%.2f A^-1\nmax_res=%.2f A' %
          (tmax * 180 / pi, qmax, lam / tmax))
示例#14
0
def _getfft(xy, photons, imgshape, zoom, ctx: Context):
    spots = np.zeros((len(xy), 5))
    spots[:, [0, 1]] = xy * zoom
    spots[:, 4] = photons
    spots[:, [2, 3]] = 0.5

    w = np.max(imgshape) * zoom
    img = np.zeros((w, w))
    img = GaussianPSFMethods(ctx).Draw(img, spots)
    img = np.array(img, dtype=np.float32)

    # Image width / Width of edge region
    wnd = tukey(w, 1 / 4).astype(np.float32)
    #plt.plot(wnd)
    img = (img * wnd[:, None]) * wnd[None, :]
    f_img = np.fft.fftshift(ctx.smlm.FFT2(img))
    #f_img = np.fft.fftshift(np.fft.fft2(img))

    return f_img
示例#15
0
 def window(wlen):
     return tukey(wlen, alpha=apodization_window)
示例#16
0
    def __specMath__(self, winIndZ, winIndX, rangeBinCenter):
        complexVoltage = np.mean(
            self.data.complex, axis=2
        )  # get the complext voltage for each sample from the 4 channels
        cvWindow = (np.mean(
            complexVoltage[self.windowX * (winIndX):self.windowX *
                           (winIndX + 1)],
            axis=0))[winIndZ:(
                winIndZ +
                self.nIndsZ)]  # grab the complex voltages in the window
        rWindow = self.data.get_power().range[winIndZ:(
            winIndZ + self.nIndsZ)]  # grab the ranges of the window
        sVector = cvWindow * rWindow  #scale the complex voltage by range to account for spreading
        b = tukey(self.nIndsZ, 0.1) / (
            np.linalg.norm(tukey(self.nIndsZ, 0.1)) / np.sqrt(self.nIndsZ)
        )  # build the tukey window, using a 10% taper
        sVector = sVector * b  # apply the tukey window
        sVector = fft(
            sVector,
            self.nfft)  # run the fft on the now windowed scaled volltages
        fsdec = 1 / self.data.sample_interval[
            0]  # sampling rate, parameter value in the config
        FFTvec_tmp, ftmp = self.freqtransf(
            sVector, fsdec, self.freq
        )  # Apply the frequency transformation, return the FFT and frequency vectors

        # Not every ping group contains the environmental parameters. The placement of the environment datagram in the order of the ping groups
        # also varies, particulalry in mission-plan files, so sometimes the environmental parameters are assigned to later ping groups. Hence:
        env = self.data.environment[self.data.environment != np.array(None)][0]
        self.alpha = [
            self.alphaFG(env['sound_speed'], env['acidity'],
                         env['temperature'], env['depth'], env['salinity'],
                         nomf / 1000) / 1000 for nomf in ftmp
        ]

        f = interp1d(
            self.calF, self.calG, fill_value=np.nan
        )  # 1-d interpolation of the calibration gains to fit the size of our frequency vector
        ftmp[ftmp < min(self.calF)] = np.nan
        ftmp[ftmp > max(
            self.calF
        )] = np.nan  # nan-out the frequency vector outside of our calibration range
        calPsi = self.calPsi + 20 * np.log10(
            self.freq /
            ftmp)  # Use the empirical relationship to extrapolate out EBA
        G = f(ftmp)  # Gain interpolated to match the frequency values
        dt = 2 * (self.range[winIndZ + self.nIndsZ] -
                  self.range[winIndZ]) / env['sound_speed']

        # The following are the way CB handles Sv(f), terms vary slightly from Anderson et al., 2021 for calculating Sv(f) due to
        # consolidation of terms, though results are consistent. This can easily be replaced if an Sv(f) function exists elsewhere
        pr = np.abs(FFTvec_tmp)**2  # power
        zet = self.data.ZTRANSDUCER  # transducer impedance
        zer = self.data.ZTRANSCEIVER  # transciever impedance
        pTr = self.data.transmit_power[0]  #Transmit power

        svtmp = 10*np.log10(pr) +\
           [(2*rangeBinCenter*a) for a in self.alpha] - 2*G - calPsi - \
           10*np.log10(dt) +\
           10*np.log10(4/zet/pTr/(2*np.sqrt(2))**2) +\
           10*np.log10((zer+zet)/zer) - \
           10*np.log10(env['sound_speed']**3/(32*np.pi**2*ftmp**2))
        return ftmp, svtmp
示例#17
0
def autoencoder_body(self, features):
  """ Customized body function for autoencoders acting on continuous images.
  This is based on tensor2tensor.models.research.AutoencoderBasic.body
  and should be compatible with most derived classes.

  The original autoencoder class relies on embedding the channels to a discrete
  vocabulary and defines the loss on that vocab. It's cool and all, but here we
  prefer expressing the reconstruction loss as an actual continuous likelihood
  function.
  """
  hparams = self.hparams
  is_training = hparams.mode == tf.estimator.ModeKeys.TRAIN

  output_activation = tf.nn.softplus if hparams.output_activation == 'softplus' else None
  input_shape =  [None, ] + common_layers.shape_list(features["inputs"])[1:]

  if hparams.mode == tf.estimator.ModeKeys.PREDICT:
    # In predict mode, we also define TensorFlow Hub modules for all pieces of
    # the autoencoder
    if hparams.encode_psf and 'psf' in features:
      psf_shape =  [None, ] + common_layers.shape_list(features["psf"])[1:]
    # First build encoder spec
    def make_model_spec():
      input_layer = tf.placeholder(tf.float32, shape=input_shape)
      x = self.embed(tf.expand_dims(input_layer, -1))
      x, encoder_layers = self.encoder(x)
      b, b_loss = self.bottleneck(x)
      hub.add_signature(inputs=input_layer, outputs=b)

    def make_model_spec_psf():
      input_layer = tf.placeholder(tf.float32, shape=input_shape)
      psf_layer = tf.placeholder(tf.float32, shape=psf_shape)
      x = self.embed(tf.expand_dims(input_layer, -1))

      # If we have access to the PSF, we add this information to the encoder
      if hparams.encode_psf and 'psf' in features:
        psf_image = tf.expand_dims(tf.signal.irfft2d(tf.cast(psf_layer[...,0], tf.complex64)), axis=-1)
        # Roll the image to undo the fftshift, assuming x1 zero padding and x2 subsampling
        psf_image = tf.roll(psf_image, shift=[input_shape[1], input_shape[2]], axis=[1,2])
        psf_image = tf.image.resize_with_crop_or_pad(psf_image, input_shape[1], input_shape[2])
        net_psf = tf.layers.conv2d(psf_image,
                                   hparams.hidden_size // 4, 5,
                                   padding='same', name="psf_embed_1")
        net_psf = common_layers.layer_norm(net_psf, name="psf_norm")
        x, encoder_layers = self.encoder(tf.concat([x, net_psf], axis=-1))
      else:
        x, encoder_layers = self.encoder(x)
      b, b_loss = self.bottleneck(x)
      hub.add_signature(inputs={'input':input_layer, 'psf':psf_layer}, outputs=b)

    spec = hub.create_module_spec(make_model_spec_psf if hparams.encode_psf else make_model_spec, drop_collections=['checkpoints'])
    encoder = hub.Module(spec, name="encoder_module")
    hub.register_module_for_export(encoder, "encoder")

    if hparams.encode_psf:
      code = encoder({'input':features["inputs"], 'psf': features['psf']})
    else:
      code = encoder(features["inputs"])
    b_shape = [None, ] + common_layers.shape_list(code)[1:]
    res_size = self.hparams.hidden_size * 2**self.hparams.num_hidden_layers
    res_size = min(res_size, hparams.max_hidden_size)

    # Second build decoder spec
    def make_model_spec():
      input_layer = tf.placeholder(tf.float32, shape=b_shape)
      x = self.unbottleneck(input_layer, res_size)
      x = self.decoder(x, None)
      reconstr = tf.layers.dense(x, input_shape[-1], name="autoencoder_final",
                                 activation=output_activation)
      hub.add_signature(inputs=input_layer, outputs=reconstr)
      hub.attach_message("stamp_size", tf.train.Int64List(value=[hparams.problem_hparams.img_len]))
      try:
        hub.attach_message("pixel_size", tf.train.FloatList(value=[hparams.problem_hparams.pixel_scale[res] for res in hparams.problem_hparams.resolutions]))
      except AttributeError:
        hub.attach_message("pixel_size", tf.train.FloatList(value=[hparams.problem_hparams.pixel_scale]))
    spec = hub.create_module_spec(make_model_spec, drop_collections=['checkpoints'])
    decoder = hub.Module(spec, name="decoder_module")
    hub.register_module_for_export(decoder, "decoder")

    reconstr = decoder(code)
    return reconstr , {"bottleneck_loss": 0.0}

  encoder_layers = None
  self.is1d = hparams.sample_width == 1
  if (hparams.mode != tf.estimator.ModeKeys.PREDICT
      or self._encode_on_predict):
    labels = features["targets_raw"]
    labels_shape = common_layers.shape_list(labels)

    shape = common_layers.shape_list(labels)
    with tf.variable_scope('encoder_module'):
      x = self.embed(tf.expand_dims(labels, -1))

    if shape[2] == 1:
      self.is1d = True

    # Run encoder.
    with tf.variable_scope('encoder_module'):
      # If we have access to the PSF, we add this information to the encoder
      # Note that we only support single band images so far...
      if hparams.encode_psf and 'psf' in features:
        psf_image = tf.expand_dims(tf.signal.irfft2d(tf.cast(features['psf'][...,0], tf.complex64)), axis=-1)
        # Roll the image to undo the fftshift, assuming x1 zero padding and x2 subsampling
        psf_image = tf.roll(psf_image, shift=[input_shape[1], input_shape[2]], axis=[1,2])
        psf_image = tf.image.resize_with_crop_or_pad(psf_image, input_shape[1], input_shape[2])
        net_psf = tf.layers.conv2d(psf_image,
                                   hparams.hidden_size // 4, 5,
                                   padding='same', name="psf_embed_1")
        net_psf = common_layers.layer_norm(net_psf, name="psf_norm")
        x, encoder_layers = self.encoder(tf.concat([x, net_psf], axis=-1))
      else:
        x, encoder_layers = self.encoder(x)

    # Bottleneck.
    with tf.variable_scope('encoder_module'):
      b, b_loss = self.bottleneck(x)

    xb_loss = 0.0
    b_shape = common_layers.shape_list(b)
    self._cur_bottleneck_tensor = b
    res_size = common_layers.shape_list(x)[-1]
    with tf.variable_scope('decoder_module'):
      b = self.unbottleneck(b, res_size)
    if not is_training:
      x = b
    else:
      l = 2**hparams.num_hidden_layers
      warm_step = int(hparams.bottleneck_warmup_steps * 0.25 * l)
      nomix_p = common_layers.inverse_lin_decay(warm_step) + 0.01
      if common_layers.should_generate_summaries():
        tf.summary.scalar("nomix_p_bottleneck", nomix_p)
      rand = tf.random_uniform(common_layers.shape_list(x))
      # This is the distance between b and x. Having this as loss helps learn
      # the bottleneck function, but if we back-propagated to x it would be
      # minimized by just setting x=0 and b=0 -- so we don't want too much
      # of the influence of this, and we stop-gradient to not zero-out x.
      x_stop = tf.stop_gradient(x)
      xb_loss = tf.reduce_mean(tf.reduce_sum(
          tf.squared_difference(x_stop, b), axis=-1))
      # To prevent this loss from exploding we clip at 1, but anneal clipping.
      clip_max = 1.0 / common_layers.inverse_exp_decay(
          warm_step, min_value=0.001)
      xb_clip = tf.maximum(tf.stop_gradient(xb_loss), clip_max)
      xb_loss *= clip_max / xb_clip
      x = tf.where(tf.less(rand, nomix_p), b, x)
  else:
    if self._cur_bottleneck_tensor is None:
      b = self.sample()
    else:
      b = self._cur_bottleneck_tensor
    self._cur_bottleneck_tensor = b
    res_size = self.hparams.hidden_size * 2**self.hparams.num_hidden_layers
    res_size = min(res_size, hparams.max_hidden_size)

    with tf.variable_scope('decoder_module'):
      x = self.unbottleneck(b, res_size)

  # Run decoder.
  with tf.variable_scope('decoder_module'):
    x = self.decoder(x, encoder_layers)

  # Cut to the right size and mix before returning.
  res = x
  if hparams.mode != tf.estimator.ModeKeys.PREDICT:
    res = x[:, :shape[1], :shape[2], :]

  with tf.variable_scope('decoder_module'):
    reconstr = tf.layers.dense(res, shape[-1], name="autoencoder_final",
                               activation=output_activation)

  # We apply an optional apodization of the output before taking the
  if hparams.output_apodization > 0:
    nx = reconstr.get_shape().as_list()[1]
    alpha = 2 * hparams.output_apodization / nx
    from scipy.signal.windows import tukey
    # Create a tukey window
    w = tukey(nx, alpha)
    w = np.outer(w,w).reshape((1, nx, nx,1)).astype('float32')
    # And penalize non zero things at the border
    apo_loss = tf.reduce_mean(tf.reduce_sum(((1.- w)*reconstr)**2, axis=[1,2,3]))
  else:
    w = 1.0
    apo_loss = 0.

  # We apply the window
  reconstr = reconstr * w

  # Optionally regularizes further the output
  # Anisotropic TV:
  tv = tf.reduce_mean(tf.image.total_variation(reconstr))
  # Smoothed Isotropic TV:
  #im_dx, im_dy = tf.image.image_gradients(reconstr)
  #tv = tf.reduce_sum(tf.sqrt(im_dx**2 + im_dy**2 + 1e-6), axis=[1,2,3])
  #tv = tf.reduce_mean(tv)

  image_summary("without_psf",tf.reshape(reconstr, labels_shape))
  # Apply channel-wise convolution with the PSF if requested
  if hparams.apply_psf and 'psf' in features:
    output_list = []
    for i in range(shape[3]):
      output_list.append(tf.squeeze(convolve(tf.expand_dims(reconstr[...,i],-1), tf.cast(features['psf'][...,i], tf.complex64),
                          zero_padding_factor=1)))
    reconstr = tf.stack(output_list,axis=-1)
    reconstr = tf.reshape(reconstr,shape)

  # Losses.
  losses = {
      "bottleneck_extra": b_loss,
      "bottleneck_l2": hparams.bottleneck_l2_factor * xb_loss,
      "total_variation": hparams.total_variation_loss * tv,
      "apodization_loss": hparams.apodization_loss * apo_loss,
  }

  loglik = loglikelihood_fn(labels, reconstr, features, hparams)
  targets_loss = tf.reduce_mean(- loglik)

  tf.summary.scalar("negloglik", targets_loss)
  tf.summary.scalar("bottleneck_loss", b_loss)

  # Compute final loss
  losses["training"] = targets_loss + b_loss + hparams.bottleneck_l2_factor * xb_loss + hparams.total_variation_loss * tv +  hparams.apodization_loss * apo_loss
  logits = tf.reshape(reconstr, labels_shape)

  image_summary("ae", reconstr)
  image_summary("input", labels)

  return logits, losses
示例#18
0
文件: essn.py 项目: xuekaiyu/prop
    abs_dip = abs(station['dip_angle'])

    time_weight = np.sqrt(2 + np.cos((local_time - 14.5) * np.pi / 12.)) - 0.73
    if abs_dip < 25.:
        return 0.75 * time_weight
    elif abs_dip < 50.:
        return 1. * time_weight
    elif abs_dip < 56.:
        return 0.75 * time_weight
    else:
        return 0.5 * time_weight


tukey_int = interpolate.interp1d(
    np.linspace(0, 25, 2501),
    windows.tukey(2501, alpha=0.08),
)


def recency_weight(tm, now, recency):
    delta = now - tm
    hours = delta / timedelta(hours=1)

    if hours < 0:
        return 0

    if recency:
        if hours >= 24:
            return 0
        else:
            return np.power(2.0, -(hours / 6))
    def smooth(x,
               y,
               x_out=None,
               fit_pl=True,
               fit_gp=True,
               ls=1.0,
               nl=0.1,
               n_restarts=1,
               optimizer=None,
               alpha=0):
        """ smooth input y array with power law and/or gaussian process
        x, y : input 1D x-array [MHz] and y-array
        x_out : output 1D x-array [MHz] to sample y-fit at
        fit_pl : fit a power law
        fit_gp : fit a gaussian process
        ls : gp length scale in MHz
        nl : gp noise level
        optimizer : optimizer to use in gp fitting: None is no optimization
        n_restarts : number of optimizer restarts
        """
        if fit_pl:
            # fit a power law and subtract it from y
            if np.any(y <= 0.0):
                pl = False
                fit = np.polyfit(x, y, 1)
                ypl = np.polyval(fit, x)
            else:
                pl = True
                fit = np.polyfit(np.log10(x), np.log10(y), 1)
                ypl = 10**np.polyval(fit, np.log10(x))
            y = y - ypl

        # make output x array and y array
        if x_out is None:
            x_out = x[:, None].copy()
        else:
            if x_out.ndim == 1:
                x_out = x_out[:, None]
        y_out = np.zeros_like(x_out.ravel())

        # GP smooth
        if fit_gp:
            # setup kernel
            kernel = 1**2 * gp.kernels.RBF(length_scale=ls, length_scale_bounds=(0.1, 1e3)) \
                     + gp.kernels.WhiteKernel(noise_level=nl, noise_level_bounds=(1e-5, 1e1))
            GP = gp.GaussianProcessRegressor(kernel=kernel,
                                             n_restarts_optimizer=n_restarts,
                                             optimizer=optimizer)
            GP.fit(x[:, None], y)
            y_out += GP.predict(x_out).ravel()

        # Add power law back in
        if fit_pl:
            if pl:
                y_out += 10**np.polyval(fit, np.log10(x_out.ravel()))
            else:
                y_out += np.polyval(fit, x_out.ravel())

        # interpolate to full frequency resolution if no smoothing
        if not fit_pl and not fit_gp:
            y_out = interp1d(x, y, kind='linear',
                             fill_value='extrapolate')(x_out.ravel())

        # add tapering to band edges
        y_out *= windows.tukey(len(x_out.ravel()), alpha)

        return y_out
示例#20
0
def ann2bb_dataset(src, batch_percent, Xwindow, zwindow, nzd, nzf, md, nsy,
                   device):

    pbs = loadmat(src + "pbs.mat", squeeze_me=True,
                  struct_as_record=False)['pbs']
    spm = loadmat(src + "spm.mat", squeeze_me=True,
                  struct_as_record=False)['spm']
    rec = loadmat(src + "rec.mat", squeeze_me=True,
                  struct_as_record=False)['rec']
    rec_pbs = loadmat(src + "rec_rec.mat",
                      squeeze_me=True,
                      struct_as_record=False)['rec']
    rec_spm = loadmat(src + "rec_spm.mat",
                      squeeze_me=True,
                      struct_as_record=False)['spm']

    nsy = min(nsy, pbs.mon.na, rec_pbs.mon.na)
    md['dtm'] = round(rec_pbs.mon.dtm[0], 3)
    md['dtm1'] = round(pbs.mon.dtm[0], 3)
    vtm = md['dtm'] * np.arange(0, md['ntm'])
    md['vtm1'] = md['dtm1'] * np.arange(0, md['ntm'])
    w = windows.tukey(md['ntm'], 5 / 100)

    tar = np.zeros((nsy, 2))
    rec_set = -999.9 * np.ones(shape=(nsy, 3, md['ntm']))
    pbs_set = -999.9 * np.ones(shape=(nsy, 3, md['ntm']))
    spm_set = -999.9 * np.ones(shape=(nsy, 3, md['ntm']))
    fil_set = -999.9 * np.ones(shape=(nsy, 3, md['ntm']))
    sfm_set = -999.9 * np.ones(shape=(nsy, 3, md['ntm']))
    pga_rec_set = -999.9 * np.ones(shape=(nsy, 3))
    psa_rec_set = -999.9 * np.ones(shape=(nsy, 3, md['nTn']))
    pga_fil_set = -999.9 * np.ones(shape=(nsy, 3))
    psa_fil_set = -999.9 * np.ones(shape=(nsy, 3, md['nTn']))
    pga_pbs_set = -999.9 * np.ones(shape=(nsy, 3))
    psa_pbs_set = -999.9 * np.ones(shape=(nsy, 3, md['nTn']))
    pga_spm_set = -999.9 * np.ones(shape=(nsy, 3))
    psa_spm_set = -999.9 * np.ones(shape=(nsy, 3, md['nTn']))
    pga_sfm_set = -999.9 * np.ones(shape=(nsy, 3))
    psa_sfm_set = -999.9 * np.ones(shape=(nsy, 3, md['nTn']))
    # parse mat database
    bi = 0
    pr = max(0, (Xwindow - len(rec.syn[0].tha.__dict__['ew'])))
    ps = max(0, (Xwindow - len(pbs.syn[0].tha.__dict__['ew'])))
    pr = divmod(pr, 2)
    ps = divmod(ps, 2)
    pr = (pr[0] + pr[1], pr[0])
    ps = (ps[0] + ps[1], ps[0])

    for i in range(nsy):
        for d, j in dirs.items():
            rec_tha = np.pad(rec.syn[i].tha.__dict__[d], pr)
            pbs_tha = np.pad(pbs.syn[i].tha.__dict__[d], ps)
            rec_pbs_tha = np.pad(rec_pbs.syn[i].tha.__dict__[d], ps)
            spm_tha = np.pad(spm.__dict__[d].syn[i].tha.__dict__[d], ps)
            rec_spm_tha = np.pad(rec_spm.__dict__[d].syn[i].tha.__dict__[d],
                                 ps)
            #
            rec_set[i, j, :] = detrend(rec_tha[bi:bi + Xwindow]) * w
            pbs_set[i, j, :] = detrend(pbs_tha[bi:bi + Xwindow]) * w
            spm_set[i, j, :] = detrend(spm_tha[bi:bi + Xwindow]) * w
            fil_set[i, j, :] = detrend(rec_pbs_tha[bi:bi + Xwindow]) * w
            sfm_set[i, j, :] = detrend(rec_spm_tha[bi:bi + Xwindow]) * w
            #
            #             from matplotlib import pyplot as plt
            #             import seaborn as sns
            #             clr = sns.color_palette("coolwarm",6)
            #             _,hax=plt.subplots(nrows=3,ncols=2,sharex=True,sharey=True)
            #             hax = list(hax)
            #             hax[0][0].plot(pbs_set[i,j,:],label=r'$SPEED$',color=clr[0])
            #             hax[1][0].plot(fil_set[i,j,:],label=r'$rec_{fil}^1$',color=clr[1])
            #             hax[2][0].plot(filter_signal(rec_set[i,j,:].squeeze(),1.*2.*0.005),label=r'$rec_{fil}^2$',color=clr[2])
            #             hax[0][1].plot(spm_set[i,j,:],label=r'$ANN2BB^1$',color=clr[3])
            #             hax[1][1].plot(sfm_set[i,j,:],label=r'$ANN2BB^2$',color=clr[4])
            #             hax[2][1].plot(rec_set[i,j,:],label='rec',color=clr[5])
            #             hax[0][0].legend()
            #             hax[1][0].legend()
            #             hax[2][0].legend()
            #             hax[0][1].legend()
            #             hax[1][1].legend()
            #             hax[2][1].legend()
            #             plt.show()

            pga_rec_set[i, j] = np.abs(rec_set[i, j, :]).max()
            pga_pbs_set[i, j] = np.abs(pbs_set[i, j, :]).max()
            pga_fil_set[i, j] = np.abs(fil_set[i, j, :]).max()
            pga_spm_set[i, j] = np.abs(spm_set[i, j, :]).max()
            pga_sfm_set[i, j] = np.abs(sfm_set[i, j, :]).max()


#             _,psa,_,_,_ = rsp(md['dtm'],rec_set[i,j,:],md['vTn'],5.)
#             psar_set[i,j,:] = psa.reshape((md['nTn']))
#             _,psa,_,_,_ = rsp(md['dtm'],pbs_set[i,j,:],md['vTn'],5.)
#             psat_set[i,j,:] = psa.reshape((md['nTn']))
#
    rec_set = np2t(rec_set).float()
    pbs_set = np2t(pbs_set).float()
    fil_set = np2t(fil_set).float()
    spm_set = np2t(spm_set).float()
    sfm_set = np2t(sfm_set).float()

    wnz_set = tFT(nsy, nzd, zwindow)
    wnz_set.resize_(nsy, nzd, zwindow).normal_(**rndm_args)
    wnf_set = tFT(nsy, nzf, zwindow)
    wnf_set.resize_(nsy, nzf, zwindow).normal_(**rndm_args)
    pbs_set = lowpass_biquad(pbs_set, 1.5 / 0.01, md['cutoff'])

    for i in range(pbs_set.shape[0]):
        for j in range(3):
            #             _,psa,_,_,_ = rsp(md['dtm'],pbs_set[i,j,:].data.numpy(),md['vTn'],5.)
            #             psaf_set[i,j,:] = psa.reshape((md['nTn']))

            pga_pbs_set[i, j] = np.abs(pbs_set[i, j, :].data.numpy()).max()
            pga = np.max([pga_rec_set[i,j],pga_fil_set[i,j],\
                          pga_pbs_set[i,j],pga_spm_set[i,j],\
                          pga_sfm_set[i,j]])
            rec_set[i, j, :] = rec_set[i, j, :] / pga
            fil_set[i, j, :] = fil_set[i, j, :] / pga
            pbs_set[i, j, :] = pbs_set[i, j, :] / pga
            spm_set[i, j, :] = spm_set[i, j, :] / pga
            sfm_set[i, j, :] = sfm_set[i, j, :] / pga

    pga_rec_set = np2t(np.float32(pga_rec_set))
    pga_fil_set = np2t(np.float32(pga_fil_set))
    psa_pbs_set = np2t(np.float32(psa_pbs_set))
    psa_spm_set = np2t(np.float32(psa_spm_set))
    psa_sfm_set = np2t(np.float32(psa_sfm_set))
    ths_fsc = []

    partition = {'all': range(0, nsy)}
    trn = max(1, int(batch_percent[0] * nsy))
    tst = max(1, int(batch_percent[1] * nsy))
    vld = max(1, nsy - trn - tst)
    tar[:, 0] = np.float32(pbs.mon.dep[:nsy])
    tar[:, 1] = np.float32(pbs.mon.dep[:nsy])

    nhe = tar.shape[1]
    tar_fsc = select_scaler(method='fit_transform',\
                            scaler='StandardScaler')
    tar = operator_map['fit_transform'](tar_fsc, tar)
    lab_enc = LabelEncoder()
    for i in range(nhe):
        tar[:, i] = lab_enc.fit_transform(tar[:, i])
    from sklearn.preprocessing import MultiLabelBinarizer
    one_hot = MultiLabelBinarizer(classes=range(64))
    tar = np2t(np.float32(one_hot.fit_transform(tar)))
    from plot_tools import plot_ohe
    plot_ohe(tar)
    fsc = {'lab_enc':lab_enc,'tar':tar_fsc,'ths_fsc':ths_fsc,\
           'one_hot':one_hot,'ncat':tar.shape[1]}
    tar = (psa_rec_set, psa_fil_set, tar)
    ths = thsTensorData(rec_set, fil_set, wnz_set, wnf_set, tar,
                        partition['all'], pbs_set, spm_set, sfm_set)

    # RANDOM SPLIT
    idx,\
    ths_trn = random_split(ths,[trn,vld,tst])
    ths_trn,\
    ths_tst,\
    ths_vld = ths_trn
    return ths_trn, ths_tst, ths_vld, vtm, fsc, md
def tukey_pulse(duration, sample_rate, amplitude, alpha):
    n_points = int(round(duration / sample_rate * 1e9))
    return windows.tukey(n_points, alpha) * amplitude
示例#22
0
class PyQtGraphPlotter(QtWidgets.QGroupBox):
    @enum.unique
    class WindowTypes(enum.Enum):
        Rectangular = 0
        Hann = 1
        Flattop = 2
        Tukey_5Percent = 3

    windowFunctionMap = {
        WindowTypes.Rectangular:
        lambda M: windows.boxcar(M, sym=False),
        WindowTypes.Hann:
        lambda M: windows.hann(M, sym=False),
        WindowTypes.Flattop:
        lambda M: windows.flattop(M, sym=False),
        WindowTypes.Tukey_5Percent:
        lambda M: windows.tukey(M, sym=False, alpha=0.05),
    }

    dataIsPower = False
    prevDataSet = None
    curDataSet = None

    axes_units = [ureg.dimensionless]
    data_unit = ureg.dimensionless

    def __init__(self, parent=None):
        _style_pg()

        super().__init__(parent)

        pal = self.palette()
        highlightPen = pg.mkPen(
            pal.color(QtGui.QPalette.Highlight).darker(120))
        darkerHighlightPen = pg.mkPen(highlightPen.color().darker(120))
        alphaColor = darkerHighlightPen.color()
        alphaColor.setAlphaF(0.25)
        darkerHighlightPen.setColor(alphaColor)

        self.toolbar = QtWidgets.QToolBar(self)
        self.toolbar.addWidget(QtWidgets.QLabel("Fourier transform window:"))
        self.windowComboBox = QtWidgets.QComboBox(self.toolbar)
        for e in self.WindowTypes:
            self.windowComboBox.addItem(e.name, e)
        self.toolbar.addWidget(self.windowComboBox)
        self.windowComboBox.currentIndexChanged.connect(self._updateFTWindow)

        self.pglwidget = pg.GraphicsLayoutWidget(self)
        self.pglwidget.setBackground(None)

        vbox = QtWidgets.QVBoxLayout(self)
        vbox.addWidget(self.toolbar)
        vbox.addWidget(self.pglwidget)

        self.plot = self.pglwidget.addPlot(row=0, col=0)
        self.ft_plot = self.pglwidget.addPlot(row=1, col=0)

        self.plot.setLabels(title="Data")
        self.ft_plot.setLabels(title="Magnitude spectrum")

        self.plot.showGrid(x=True, y=True)
        self.ft_plot.showGrid(x=True, y=True)

        self._make_plot_background(self.plot)
        self._make_plot_background(self.ft_plot)

        self._lines = []
        self._lines.append(self.plot.plot())
        self._lines.append(self.plot.plot())
        self._lines[0].setPen(darkerHighlightPen)
        self._lines[1].setPen(highlightPen)

        self._ft_lines = []
        self._ft_lines.append(self.ft_plot.plot())
        self._ft_lines.append(self.ft_plot.plot())
        self._ft_lines[0].setPen(darkerHighlightPen)
        self._ft_lines[1].setPen(highlightPen)

        self._lastPlotTime = time.perf_counter()

    def _make_plot_background(self, plot, brush=None):
        if brush is None:
            brush = pg.mkBrush(self.palette().color(QtGui.QPalette.Base))

        vb_bg = QtWidgets.QGraphicsRectItem(plot)
        vb_bg.setRect(plot.vb.rect())
        vb_bg.setBrush(brush)
        vb_bg.setFlag(QtWidgets.QGraphicsItem.ItemStacksBehindParent)
        vb_bg.setZValue(-1e9)
        plot.vb.sigResized.connect(lambda x: vb_bg.setRect(x.geometry()))

    def setLabels(self, axesLabels, dataLabel):
        self.axesLabels = axesLabels
        self.dataLabel = dataLabel

        self.updateLabels()

    def updateLabels(self):
        self.plot.setLabels(bottom='{} [{:C~}]'.format(self.axesLabels[0],
                                                       self.axes_units[0]),
                            left='{} [{:C~}]'.format(self.dataLabel,
                                                     self.data_unit))

        ftUnits = self.data_unit
        if not self.dataIsPower:
            ftUnits = ftUnits**2

        self.ft_plot.setLabels(bottom='1 / {} [{:C~}]'.format(
            self.axesLabels[0], (1 / self.axes_units[0]).units),
                               left='Power [dB-({:C~})]'.format(ftUnits))

    def get_ft_data(self, data):
        delta = np.mean(np.diff(data.axes[0]))
        winFn = self.windowFunctionMap[self.windowComboBox.currentData()]
        refUnit = 1 * self.data_unit
        Y = np.fft.rfft(data.data / refUnit * winFn(len(data.data)), axis=0)
        freqs = np.fft.rfftfreq(len(data.axes[0]), delta)
        dBdata = 10 * np.log10(np.abs(Y))
        if not self.dataIsPower:
            dBdata *= 2
        return (freqs, dBdata)

    def _updateFTWindow(self):
        if self.prevDataSet:
            F, dBdata = self.get_ft_data(self.prevDataSet)
            self._ft_lines[0].setData(x=F, y=dBdata)

        if self.curDataSet:
            F, dBdata = self.get_ft_data(self.curDataSet)
            self._ft_lines[1].setData(x=F, y=dBdata)

    def drawDataSet(self, newDataSet, *args):
        plotTime = time.perf_counter()

        # artificially limit the replot rate to 20 Hz
        if (plotTime - self._lastPlotTime < 0.05):
            return

        self._lastPlotTime = plotTime

        self.prevDataSet = self.curDataSet
        self.curDataSet = newDataSet

        if (self.curDataSet.data.units != self.data_unit
                or self.curDataSet.axes[0].units != self.axes_units[0]):
            self.data_unit = self.curDataSet.data.units
            self.axes_units[0] = self.curDataSet.axes[0].units
            self.updateLabels()

        if self.prevDataSet:
            self._lines[0].setData(x=self._lines[1].xData,
                                   y=self._lines[1].yData)
            self._ft_lines[0].setData(x=self._ft_lines[1].xData,
                                      y=self._ft_lines[1].yData)
        if self.curDataSet:
            self._lines[1].setData(x=self.curDataSet.axes[0],
                                   y=self.curDataSet.data)
            F, dBdata = self.get_ft_data(self.curDataSet)
            self._ft_lines[1].setData(x=F, y=dBdata)
示例#23
0
def stead_dataset(src, batch_percent, Xwindow, zwindow, nzd, nzf, md, nsy,
                  device):

    vtm = md['dtm'] * np.arange(0, md['ntm'])
    tar = np.zeros((nsy, 2))
    trn_set = -999.9 * np.ones(shape=(nsy, 3, md['ntm']))
    pgat_set = -999.9 * np.ones(shape=(nsy, 3))
    psat_set = -999.9 * np.ones(shape=(nsy, 3, md['nTn']))

    # parse hdf5 database
    eqd = h5py.File(src, 'r')['earthquake']['local']
    eqm = pd.read_csv(osj(src.split('/waveforms')[0],'metadata_'+\
                          src.split('waveforms_')[-1].split('.hdf5')[0]+'.csv'))
    eqm = eqm.loc[eqm['trace_category'] == 'earthquake_local']
    eqm = eqm.loc[eqm['source_magnitude'] >= 3.5]
    eqm = eqm.sample(frac=nsy / len(eqm)).reset_index(drop=True)
    w = windows.tukey(md['ntm'], 5 / 100)
    for i in eqm.index:
        tn = eqm.loc[i, 'trace_name']
        bi = int(eqd[tn].attrs['p_arrival_sample'])
        for j in range(3):
            trn_set[i, j, :] = detrend(eqd[tn][bi:bi + Xwindow, j]) * w
            pgat_set[i, j] = np.abs(trn_set[i, j, :]).max()
            trn_set[i, j, :] = trn_set[i, j, :] / pgat_set[i, j]
            pgat_set[i, j] = np.abs(trn_set[i, j, :]).max()
            _, psa, _, _, _ = rsp(md['dtm'], trn_set[i, j, :], md['vTn'], 5.)
            psat_set[i, j, :] = psa.reshape((md['nTn']))
    pgat_set = np2t(np.float32(pgat_set))
    trn_set = np2t(trn_set).float()

    ths_fsc = []

    partition = {'all': range(0, nsy)}
    trn = max(1, int(batch_percent[0] * nsy))
    tst = max(1, int(batch_percent[1] * nsy))
    vld = max(1, nsy - trn - tst)

    wnz_set = tFT(nsy, nzd, zwindow)
    wnz_set.resize_(nsy, nzd, zwindow).normal_(**rndm_args)
    wnf_set = tFT(nsy, nzf, zwindow)
    wnf_set.resize_(nsy, nzf, zwindow).normal_(**rndm_args)
    thf_set = lowpass_biquad(trn_set, 1. / md['dtm'], md['cutoff'])
    pgaf_set = -999.9 * np.ones(shape=(nsy, 3))
    psaf_set = -999.9 * np.ones(shape=(nsy, 3, md['nTn']))

    for i in range(thf_set.shape[0]):
        for j in range(3):
            pgaf_set[i, j] = np.abs(thf_set[i, j, :].data.numpy()).max(axis=-1)
            _, psa, _, _, _ = rsp(md['dtm'], thf_set.data[i, j, :].numpy(),
                                  md['vTn'], 5.)
            psaf_set[i, j, :] = psa.reshape((md['nTn']))
    pgat_set = np2t(np.float32(pgat_set))
    pgaf_set = np2t(np.float32(pgaf_set))
    psat_set = np2t(np.float32(psat_set))
    psaf_set = np2t(np.float32(psaf_set))

    tar[:, 0] = eqm['source_magnitude'].to_numpy(np.float32)
    tar[:, 1] = eqm['source_depth_km'].to_numpy(np.float32)

    nhe = tar.shape[1]
    tar_fsc = select_scaler(method='fit_transform',\
                            scaler='StandardScaler')
    tar = operator_map['fit_transform'](tar_fsc, tar)
    lab_enc = LabelEncoder()
    for i in range(nhe):
        tar[:, i] = lab_enc.fit_transform(tar[:, i])
    from sklearn.preprocessing import MultiLabelBinarizer
    one_hot = MultiLabelBinarizer(classes=range(64))
    tar = np2t(np.float32(one_hot.fit_transform(tar)))
    from plot_tools import plot_ohe
    plot_ohe(tar)
    fsc = {'lab_enc':lab_enc,'tar':tar_fsc,'ths_fsc':ths_fsc,\
           'one_hot':one_hot,'ncat':tar.shape[1]}
    tar = (psat_set, psaf_set, tar)
    ths = thsTensorData(trn_set, thf_set, wnz_set, wnf_set, tar,
                        partition['all'])

    # RANDOM SPLIT
    idx,\
    ths_trn = random_split(ths,[trn,vld,tst])
    ths_trn,\
    ths_tst,\
    ths_vld = ths_trn
    return ths_trn, ths_tst, ths_vld, vtm, fsc
示例#24
0
文件: vortex.py 项目: ehpor/hcipy
    def make_instance(self, instance_data, input_grid, output_grid,
                      wavelength):
        pupil_diameter = input_grid.shape * input_grid.delta

        levels = int(np.ceil(
            np.log(self.q / 2) / np.log(self.scaling_factor))) + 1
        qs = [2 * self.scaling_factor**i for i in range(levels)]
        num_airys = [input_grid.shape / 2]

        focal_grids = []
        instance_data.props = []
        instance_data.jones_matrices = []

        for i in range(1, levels):
            num_airys.append(num_airys[i - 1] * self.window_size /
                             (2 * qs[i - 1] * num_airys[i - 1]))

        for i in range(levels):
            q = qs[i]
            num_airy = num_airys[i]

            focal_grid = make_focal_grid(q,
                                         num_airy,
                                         pupil_diameter=pupil_diameter,
                                         reference_wavelength=1,
                                         focal_length=1)

            fast_axis_orientation = Field(
                self.charge / 2 * focal_grid.as_('polar').theta, focal_grid)
            retardance = self.evaluate_parameter(self.phase_retardation,
                                                 input_grid, output_grid,
                                                 wavelength)

            focal_mask_raw = LinearRetarder(retardance, fast_axis_orientation)
            jones_matrix = focal_mask_raw.jones_matrix

            jones_matrix *= 1 - circular_aperture(1e-9)(focal_grid)

            if i != levels - 1:
                wx = windows.tukey(self.window_size, 1, False)
                wy = windows.tukey(self.window_size, 1, False)
                w = np.outer(wy, wx)

                w = np.pad(w, (focal_grid.shape - w.shape) // 2,
                           'constant').ravel()
                jones_matrix *= 1 - w

            for j in range(i):
                fft = FastFourierTransform(focal_grids[j])
                mft = MatrixFourierTransform(focal_grid, fft.output_grid)

                jones_matrix -= mft.backward(
                    fft.forward(instance_data.jones_matrices[j]))

            if i == 0:
                prop = FourierFilter(input_grid, jones_matrix, q)
            else:
                prop = FraunhoferPropagator(input_grid, focal_grid)

            focal_grids.append(focal_grid)
            instance_data.jones_matrices.append(jones_matrix)
            instance_data.props.append(prop)
     temp_sig = G(N, L, L_0) * O(N, K, L) * T_PM(N) * match_amplitude
     I_t.append(temp_sig)
 sum_sig = sum(I_t, axis=0)
 tau_scan_ms = (tau_scan + SN / 2 / SR) * 1e3
 # plot(tau_scan_ms,sum(I_t,axis=0))
 subplot(221)
 gca().cla()
 # plot(tau_scan,sum_sig)
 plot(L * 1e3, sum_sig[::-1])
 xlabel('Space (mm)')
 # xlabel('Time (s)') # tau_scan
 # ylim((-0.05,0.05))
 ylim((-25000, 25000))
 subplot(222)
 gca().cla()
 tukey_win = windows.tukey(SN, alpha=0.5)
 sum_sig_win = tukey_win * sum_sig
 semilogy(fftfreq(n=len(tau_scan), d=1 / SR * 1e3),
          abs((fft(real(sum_sig_win)))))
 xlabel('Frequency (kHz)')
 xlim((0, 500))
 # ylim((1e-5,1e3)) # for unmatched amplitude
 ylim((1e4, 1e8))
 # pause(0.001)
 # savefig('movie_jpgs/movie_{:04d}.jpg'.format(counter))
 counter += 1
 print(counter)
 all_sigs.append(sum_sig)
 pause(0.1)
 while waitforbuttonpress() == 0:
     pass
示例#26
0
文件: vortex.py 项目: ehpor/hcipy
    def __init__(self,
                 input_grid,
                 charge,
                 lyot_stop=None,
                 q=1024,
                 scaling_factor=4,
                 window_size=32):
        self.input_grid = input_grid
        pupil_diameter = input_grid.shape * input_grid.delta

        if hasattr(lyot_stop, 'forward') or lyot_stop is None:
            self.lyot_stop = lyot_stop
        else:
            self.lyot_stop = Apodizer(lyot_stop)

        levels = int(np.ceil(np.log(q / 2) / np.log(scaling_factor))) + 1
        qs = [2 * scaling_factor**i for i in range(levels)]
        num_airys = [input_grid.shape / 2]

        focal_grids = []
        self.focal_masks = []
        self.props = []

        for i in range(1, levels):
            num_airys.append(num_airys[i - 1] * window_size /
                             (2 * qs[i - 1] * num_airys[i - 1]))

        for i in range(levels):
            q = qs[i]
            num_airy = num_airys[i]

            focal_grid = make_focal_grid(q,
                                         num_airy,
                                         pupil_diameter=pupil_diameter,
                                         reference_wavelength=1,
                                         focal_length=1)
            focal_mask = Field(
                np.exp(1j * charge * focal_grid.as_('polar').theta),
                focal_grid)

            focal_mask *= 1 - circular_aperture(1e-9)(focal_grid)

            if i != levels - 1:
                wx = windows.tukey(window_size, 1, False)
                wy = windows.tukey(window_size, 1, False)
                w = np.outer(wy, wx)

                w = np.pad(w, (focal_grid.shape - w.shape) // 2,
                           'constant').ravel()
                focal_mask *= 1 - w

            for j in range(i):
                fft = FastFourierTransform(focal_grids[j])
                mft = MatrixFourierTransform(focal_grid, fft.output_grid)

                focal_mask -= mft.backward(fft.forward(self.focal_masks[j]))

            if i == 0:
                prop = FourierFilter(input_grid, focal_mask, q)
            else:
                prop = FraunhoferPropagator(input_grid, focal_grid)

            focal_grids.append(focal_grid)
            self.focal_masks.append(focal_mask)
            self.props.append(prop)
示例#27
0
def SBIR(IR,
         t_IR,
         fmin,
         fmax,
         winCheck=False,
         spectraCheck=False,
         ms=32,
         method='constant',
         beta=1,
         cosWin=False,
         ABEC=False,
         delta_ABEC=52):
    """

    Function to calculate Speaker Boundary Interference Response

    Parameters
    ----------
    IR, t_IR: 1D arrays, contain bothe Impulse Response magnitude and time step values.
             freq, frf, t, IR = bem(args)

    fmin, fmax: int, minimun and maximum frequency of interest.
            fmin, fmax = 20, 100

    winCheck: bool, option to view windowing in time domain.
            winCheck = True or False

    spectraCheck: bool, option to view frequency response and SBIR in frequency domain.
            spectraCheck = True or False

    modalCheck: bool, option to view room modes prediction of BEM simulation and cuboid approximation.
            modalCheck = True or False
    """

    if len(IR) < 20:
        print('IR resolution not high enough to calculate SBIR')

    if method == 'constant':
        peak = 0  # Window from the start of the IR
        dt = (max(t_IR) / len(t_IR))  # Time axis resolution
        tt_ms = round(
            (ms / 1000) / dt)  # Number of samples equivalent to 64 ms

        # Windows
        post_peak = np.zeros((len(IR[:])))
        pre_peak = np.zeros((len(IR[:])))

        if cosWin is True:
            win_cos = win.cosine(int(2 * tt_ms))**2  # Cosine squared window
        else:
            win_cos = win.tukey(int(2 * tt_ms), beta)  # Cosine window

        window = np.zeros((len(IR[:])))  # Final window
        ##
        win_cos[0:int(tt_ms)] = 1
        window[0:int(2 * tt_ms)] = win_cos
        ##

    elif method == 'peak':
        # Sample of the initial peak
        peak = detect_peaks(IR,
                            mph=(max(IR) * 0.9),
                            threshold=0,
                            edge='rising',
                            show=False)
        if len(peak) > 1:
            peak = peak[0]
            print('More than one peak at the IR')
        #        ind[x] = 0; # Window max from the beginning
        # peak = 0  # Window from the start of the IR
        dt = (max(t_IR) / len(t_IR))  # Time axis resolution
        tt_ms = round(
            (ms / 1000) / dt)  # Number of samples equivalent to 64 ms

        # Windows
        post_peak = np.zeros((len(IR[:])))
        pre_peak = np.zeros((len(IR[:])))
        win_cos = win.tukey(int(2 * tt_ms), beta)  # Cosine window
        window = np.zeros((len(IR[:])))  # Final window

        ms = 64
        # Sample of the initial peak
        peak = detect_peaks(IR,
                            mph=(max(IR) * 0.9),
                            threshold=0,
                            edge='rising',
                            show=False)
        if len(peak) > 1:
            peak = peak[0]
            print('More than one peak at the IR')
        #        ind[x] = 0; # Window max from the beginning
        dt = (max(t_IR) / len(t_IR))  # Time axis resolution
        tt_ms = round(
            (ms / 1000) / dt)  # Number of samples equivalent to 64 ms

        # Windows
        post_peak = np.zeros((len(IR[:])))
        pre_peak = np.zeros((len(IR[:])))
        win_cos = win.cosine(int(2 * tt_ms))  # Cosine window
        window = np.zeros((len(IR[:])))  # Final window
        ##
        # Cosine window pre peak
        win_cos_b = win.cosine(2 * peak + 1)
        pre_peak[0:int(peak)] = win_cos_b[0:int(peak)]
        pre_peak[int(peak)::] = 1

        # Cosine window post peak
        post_peak[int(peak):int(peak + tt_ms
                                )] = win_cos[int(tt_ms):int(2 * tt_ms)] / max(
                                    win_cos[int(1 * tt_ms):int(2 * tt_ms)]
                                )  # Creating Hanning window array

        # Creating final window
        window[0:int(peak)] = pre_peak[0:int(peak)]
        #         window[0:int(peak)] = 1  # 1 from the beggining
        window[int(peak)::] = post_peak[int(peak)::]

    # Applying window
    IR_array = np.zeros((len(IR), 2))  # Creating matrix
    IR_array[:, 0] = IR[:]
    IR_array[:, 1] = IR[:] * window[:]  # FR and SBIR

    # Calculating FFT
    FFT_array = np.zeros((len(IR_array[:, 0]), len(IR_array[0, :])),
                         dtype='complex')  # Creating matrix

    if ABEC is True:
        FFT_array_dB = np.zeros(
            (round(len(IR_array[:, 0]) / 2) - delta_ABEC, len(IR_array[0, :])),
            dtype='complex')
        FFT_array_Pa = np.zeros(
            (round(len(IR_array[:, 0]) / 2) - delta_ABEC, len(IR_array[0, :])),
            dtype='complex')
    else:
        FFT_array_dB = np.zeros(
            (round(len(IR_array[:, 0]) / 2), len(IR_array[0, :])),
            dtype='complex')
        FFT_array_Pa = np.zeros(
            (round(len(IR_array[:, 0]) / 2), len(IR_array[0, :])),
            dtype='complex')

    for i in range(0, len(IR_array[0, :])):
        iIR = IR_array[:, i]
        FFT_array[:, i] = 2 / len(iIR) * np.fft.fft(iIR)
        if ABEC is True:
            FFT_array_Pa[:, i] = FFT_array[delta_ABEC:round(len(iIR) / 2), i]
        else:
            FFT_array_Pa[:, i] = FFT_array[0:round(len(iIR) / 2), i]
    for i in range(0, len(IR_array[0, :])):
        if ABEC is True:
            FFT_array_dB[:, i] = 20 * np.log10(
                np.abs(FFT_array[delta_ABEC:int(len(FFT_array[:, i]) / 2), i])
                / 2e-5)  # applying log and removing aliasing and first 20 Hz
        else:
            FFT_array_dB[:, i] = 20 * np.log10(
                np.abs(FFT_array_Pa[:, i]) /
                2e-5)  # applying log and removing aliasing and first 20 H

    if ABEC is False:
        freq_FFT = np.linspace(0, len(IR) / 2, num=int(
            len(IR) / 2))  # Frequency vector for the FFT
    else:
        freq_FFT = np.linspace(fmin, fmax, num=len(
            FFT_array_dB[:, 0]))  # Frequency vector for the FFT

    # View windowed Impulse Response in time domain:
    if winCheck is True:
        figWin = plt.figure(figsize=(12, 5),
                            dpi=80,
                            facecolor='w',
                            edgecolor='k')
        win_index = 0
        plt.plot(t_IR, IR_array[:, win_index], linewidth=3)
        plt.plot(t_IR, IR_array[:, win_index + 1], '--', linewidth=5)
        plt.plot(t_IR, window[:] * (max(IR_array[:, 0])), '-.', linewidth=5)
        plt.title('Impulse Response Windowing', fontsize=20)
        plt.xlabel('Time [s]', fontsize=20)
        plt.xlim([t_IR[0], t_IR[int(len(t_IR) / 8)]])
        # plt.xticks(np.arange(t_IR[int(peak[0])], t_IR[int(len(t_IR))], 0.032), fontsize=15)
        plt.ylabel('Amplitude [-]', fontsize=20)
        plt.legend(['Modal IR', 'SBIR IR', 'Window'], loc='best', fontsize=20)
        plt.grid(True, 'both')
        plt.yticks(fontsize=15)
        plt.tight_layout()
        plt.show()

    # Frequency Response and SBIR
    if spectraCheck is True:
        figSpectra = plt.figure(figsize=(12, 5),
                                dpi=80,
                                facecolor='w',
                                edgecolor='k')

        if ABEC is False:
            plt.semilogx(freq_FFT[fmin:fmax + 1],
                         FFT_array_dB[fmin:fmax + 1, 0],
                         linewidth=3,
                         label='Full spectrum')
            plt.semilogx(freq_FFT[fmin:fmax + 1],
                         FFT_array_dB[fmin:fmax + 1, 1],
                         '-.',
                         linewidth=3,
                         label='SBIR')
        elif ABEC is True:
            plt.semilogx(freq_FFT,
                         FFT_array_dB[:, 0],
                         linewidth=3,
                         label='Full spectrum')
            plt.semilogx(freq_FFT,
                         FFT_array_dB[:, 1],
                         '-.',
                         linewidth=3,
                         label='SBIR')

        # plt.semilogx(freq_FFT[fmin:fmax + 1], 20 * np.log10(np.abs(FFT_array_Pa[fmin:fmax + 1, 1])/2e-5), ':',
        #              linewidth=3, label='SBIR 2')
        plt.legend(fontsize=15, loc='best')  # , bbox_to_anchor=(0.003, 0.13))
        # plt.title('Processed IR vs ABEC', fontsize=20)
        plt.xlabel('Frequency [Hz]', fontsize=20)
        plt.ylabel('SPL [dB ref. 20 $\mu$Pa]', fontsize=20)
        plt.gca().get_xaxis().set_major_formatter(
            ticker.ScalarFormatter())  # Remove scientific notation from xaxis
        plt.gca().get_xaxis().set_minor_formatter(
            ticker.ScalarFormatter())  # Remove scientific notation from xaxis
        plt.gca().tick_params(
            which='minor',
            length=5)  # Set major and minor ticks to same length
        plt.xticks(fontsize=15)
        plt.grid(True, 'both')
        plt.xlim([fmin, fmax])
        plt.ylim([55, 105])
        plt.yticks(fontsize=15)
        plt.tight_layout(pad=3)
        plt.show()

    return freq_FFT[fmin:fmax + 1], FFT_array_Pa[fmin:fmax + 1, 1], window
示例#28
0
文件: pyDSP.py 项目: amckenna41/pySAR
    def pre_processing(self):
        """
        Complete various pre-processing steps for encoded protein sequences before
        doing any of the DSP-related functions or transformations. Zero-pad
        the sequences, remove any +/- infinity or NAN values, get the approximate
        protein spectra and window function parameter names.

        Parameters
        ----------
        :self (PyDSP object): 
            instance of PyDSP class.
            
        Returns
        -------
        None

        """
        #zero-pad encoded sequences so they are all the same length
        self.protein_seqs = zero_padding(self.protein_seqs)

        #get shape parameters of proteins seqs
        self.num_seqs = self.protein_seqs.shape[0]
        self.signal_len = self.protein_seqs.shape[1]

        #replace any positive or negative infinity or NAN values with 0
        self.protein_seqs[self.protein_seqs == -np.inf] = 0
        self.protein_seqs[self.protein_seqs == np.inf] = 0
        self.protein_seqs[self.protein_seqs == np.nan] = 0

        #replace any NAN's with 0's
        #self.protein_seqs.fillna(0, inplace=True)
        self.protein_seqs = np.nan_to_num(self.protein_seqs)

        #initialise zeros array to store all protein spectra
        self.fft_power = np.zeros((self.num_seqs, self.signal_len))
        self.fft_real = np.zeros((self.num_seqs, self.signal_len))
        self.fft_imag = np.zeros((self.num_seqs, self.signal_len))
        self.fft_abs = np.zeros((self.num_seqs, self.signal_len))

        #list of accepted spectra, window functions and filters
        all_spectra = ['power', 'absolute', 'real', 'imaginary']
        all_windows = [
            'hamming', 'blackman', 'blackmanharris', 'gaussian', 'bartlett',
            'kaiser', 'barthann', 'bohman', 'chebwin', 'cosine', 'exponential'
            'flattop', 'hann', 'boxcar', 'hanning', 'nuttall', 'parzen',
            'triang', 'tukey'
        ]
        all_filters = [
            'savgol', 'medfilt', 'symiirorder1', 'lfilter', 'hilbert'
        ]

        #set required input parameters, raise error if spectrum is none
        if self.spectrum == None:
            raise ValueError(
                'Invalid input Spectrum type ({}) not available in valid spectra: {}'
                .format(self.spectrum, all_spectra))
        else:
            #get closest correct spectra from user input, if no close match then raise error
            spectra_matches = (get_close_matches(self.spectrum,
                                                 all_spectra,
                                                 cutoff=0.4))

            if spectra_matches == []:
                raise ValueError(
                    'Invalid input Spectrum type ({}) not available in valid spectra: {}'
                    .format(self.spectrum, all_spectra))
            else:
                self.spectra = spectra_matches[0]  #closest match in array

        if self.window_type == None:
            self.window = 1  #window = 1 is the same as applying no window
        else:
            #get closest correct window function from user input
            window_matches = (get_close_matches(self.window,
                                                all_windows,
                                                cutoff=0.4))

            #check if sym=True or sym=False
            #get window function specified by window input parameter, if no match then window = 1
            if window_matches != []:
                if window_matches[0] == 'hamming':
                    self.window = hamming(self.signal_len, sym=True)
                    self.window_type = "hamming"
                elif window_matches[0] == "blackman":
                    self.window = blackman(self.signal_len, sym=True)
                    self.window = "blackman"
                elif window_matches[0] == "blackmanharris":
                    self.window = blackmanharris(self.signal_len,
                                                 sym=True)  #**
                    self.window_type = "blackmanharris"
                elif window_matches[0] == "bartlett":
                    self.window = bartlett(self.signal_len, sym=True)
                    self.window_type = "bartlett"
                elif window_matches[0] == "gaussian":
                    self.window = gaussian(self.signal_len, std=7, sym=True)
                    self.window_type = "gaussian"
                elif window_matches[0] == "kaiser":
                    self.window = kaiser(self.signal_len, beta=14, sym=True)
                    self.window_type = "kaiser"
                elif window_matches[0] == "hanning":
                    self.window = hanning(self.signal_len, sym=True)
                    self.window_type = "hanning"
                elif window_matches[0] == "barthann":
                    self.window = barthann(self.signal_len, sym=True)
                    self.window_type = "barthann"
                elif window_matches[0] == "bohman":
                    self.window = bohman(self.signal_len, sym=True)
                    self.window_type = "bohman"
                elif window_matches[0] == "chebwin":
                    self.window = chebwin(self.signal_len, sym=True)
                    self.window_type = "chebwin"
                elif window_matches[0] == "cosine":
                    self.window = cosine(self.signal_len, sym=True)
                    self.window_type = "cosine"
                elif window_matches[0] == "exponential":
                    self.window = exponential(self.signal_len, sym=True)
                    self.window_type = "exponential"
                elif window_matches[0] == "flattop":
                    self.window = flattop(self.signal_len, sym=True)
                    self.window_type = "flattop"
                elif window_matches[0] == "boxcar":
                    self.window = boxcar(self.signal_len, sym=True)
                    self.window_type = "boxcar"
                elif window_matches[0] == "nuttall":
                    self.window = nuttall(self.signal_len, sym=True)
                    self.window_type = "nuttall"
                elif window_matches[0] == "parzen":
                    self.window = parzen(self.signal_len, sym=True)
                    self.window_type = "parzen"
                elif window_matches[0] == "triang":
                    self.window = triang(self.signal_len, sym=True)
                    self.window_type = "triang"
                elif window_matches[0] == "tukey":
                    self.window = tukey(self.signal_len, sym=True)
                    self.window_type = "tukey"

            else:
                self.window = 1  #window = 1 is the same as applying no window

        #calculate convolution from protein sequences
        if self.convolution is not None:
            if self.window is not None:
                self.convoled_seqs = signal.convolve(
                    self.protein_seqs, self.window, mode='same') / sum(
                        self.window)

        if self.filter != None:
            #get closest correct filter from user input
            filter_matches = (get_close_matches(self.filter,
                                                all_filters,
                                                cutoff=0.4))

            #set filter attribute according to approximate user input
            if filter_matches != []:
                if filter_matches[0] == 'savgol':
                    self.filter = savgol_filter(self.signal_len,
                                                self.signal_len)
                elif filter_matches[0] == 'medfilt':
                    self.filter = medfilt(self.signal_len)
                elif filter_matches[0] == 'symiirorder1':
                    self.filter = symiirorder1(self.signal_len, c0=1, z1=1)
                elif filter_matches[0] == 'lfilter':
                    self.filter = lfilter(self.signal_len)
                elif filter_matches[0] == 'hilbert':
                    self.filter = hilbert(self.signal_len)
            else:
                self.filter = ""  #no filter
示例#29
0
exagy = 5

# Get ratio
yrat = np.abs((exagy*dy)/(maxlat-minlat))
xrat = np.abs((exagx*dx)/(maxlon-minlon))


# plt.subplot(312)
fc = 'k'
fig = plt.figure(facecolor=fc, figsize=(16, 9))
ax = plt.axes(facecolor=fc)
plt.subplots_adjust(left=0.0, right=1.0,
                    bottom=0.0, top=1.0)  # 1.0+yrat)

# Create linewidth taper for a fade
taper = tukey(len(nlon), alpha=0.25)
for _i, _ilat in enumerate(nlat[::ny]):

    # Get the data
    x = nlon
    y = _ilat * np.ones_like(nlon) + normtopo[_i * ny, :] * dy * exagy
    z = fdata[_i * ny, :]
    linewidths = normtopo[_i * ny, :]*taper
    baseline = minlat-(dy * exagy)

    # Plot polygons to lines in the back aren't visible
    plt.fill_between(x, y, y2=baseline, facecolor=fc,
                     edgecolor='none', zorder=-_i)

    # Plot lines with linewidth and topo cmap
    lines, sm = lplt.plot_xyz_line(
示例#30
0
def distributed_strategy(args):
    kappa_gen = NISGenerator( # only used to generate pixelated kappa fields
        kappa_fov=args.kappa_fov,
        src_fov=args.source_fov,
        pixels=args.kappa_pixels,
        z_source=args.z_source,
        z_lens=args.z_lens
    )

    min_theta_e = 0.1 * args.image_fov if args.min_theta_e is None else args.min_theta_e
    max_theta_e = 0.45 * args.image_fov if args.max_theta_e is None else args.max_theta_e

    cosmos_files = glob.glob(os.path.join(args.cosmos_dir, "*.tfrecords"))
    cosmos = tf.data.TFRecordDataset(cosmos_files, compression_type=args.compression_type)
    cosmos = cosmos.map(decode_image).map(preprocess_image)
    if args.shuffle_cosmos:
        cosmos = cosmos.shuffle(buffer_size=args.buffer_size, reshuffle_each_iteration=True)
    cosmos = cosmos.batch(args.batch_size)

    window = tukey(args.src_pixels, alpha=args.tukey_alpha)
    window = np.outer(window, window)
    phys = PhysicalModel(
        image_fov=args.image_fov,
        kappa_fov=args.kappa_fov,
        src_fov=args.source_fov,
        pixels=args.lens_pixels,
        kappa_pixels=args.kappa_pixels,
        src_pixels=args.src_pixels,
        method="conv2d"
    )
    noise_a = (args.noise_rms_min - args.noise_rms_mean) / args.noise_rms_std
    noise_b = (args.noise_rms_max - args.noise_rms_mean) / args.noise_rms_std
    psf_a = (args.psf_fwhm_min - args.psf_fwhm_mean) / args.psf_fwhm_std
    psf_b = (args.psf_fwhm_max - args.psf_fwhm_mean) / args.psf_fwhm_std

    options = tf.io.TFRecordOptions(compression_type=args.compression_type)
    with tf.io.TFRecordWriter(os.path.join(args.output_dir, f"data_{THIS_WORKER}.tfrecords"), options) as writer:
        print(f"Started worker {THIS_WORKER} at {datetime.now().strftime('%y-%m-%d_%H-%M-%S')}")
        for i in range((THIS_WORKER - 1) * args.batch_size, args.len_dataset, N_WORKERS * args.batch_size):
            for galaxies in cosmos:
                break
            galaxies = window[np.newaxis, ..., np.newaxis] * galaxies

            noise_rms = truncnorm.rvs(noise_a, noise_b, loc=args.noise_rms_mean, scale=args.noise_rms_std, size=args.batch_size)
            fwhm = truncnorm.rvs(psf_a, psf_b, loc=args.psf_fwhm_mean, scale=args.psf_fwhm_std, size=args.batch_size)
            psf = phys.psf_models(fwhm, cutout_size=args.psf_cutout_size)

            batch_size = galaxies.shape[0]
            _r = tf.random.uniform(shape=[batch_size, 1, 1], minval=0, maxval=args.max_shift)
            _theta = tf.random.uniform(shape=[batch_size, 1, 1], minval=-np.pi, maxval=np.pi)
            x0 = _r * tf.math.cos(_theta)
            y0 = _r * tf.math.sin(_theta)
            ellipticity = tf.random.uniform(shape=[batch_size, 1, 1], minval=0., maxval=args.max_ellipticity)
            phi = tf.random.uniform(shape=[batch_size, 1, 1], minval=-np.pi, maxval=np.pi)
            einstein_radius = tf.random.uniform(shape=[batch_size, 1, 1], minval=min_theta_e, maxval=max_theta_e)

            kappa = kappa_gen.kappa_field(x0, y0, ellipticity, phi, einstein_radius)

            lensed_images = phys.noisy_forward(galaxies, kappa, noise_rms=noise_rms, psf=psf)

            records = encode_examples(
                kappa=kappa,
                galaxies=galaxies,
                lensed_images=lensed_images,
                z_source=args.z_source,
                z_lens=args.z_lens,
                image_fov=phys.image_fov,
                kappa_fov=phys.kappa_fov,
                source_fov=args.source_fov,
                noise_rms=noise_rms,
                psf=psf,
                fwhm=fwhm
            )
            for record in records:
                writer.write(record)
    print(f"Finished work at {datetime.now().strftime('%y-%m-%d_%H-%M-%S')}")
plt.imshow(img)
plt.imshow(img[695:715, 510:525, :])

t = time.time()
led_R, led_G = prep_LED(raw_images[:-1])
r, g = np.mean(led_R, axis=(1, 2)), np.mean(led_G, axis=(1, 2))
elapsed = np.floor((time.time() - t) / 60)
print('The time cost is {} minutes'.format(elapsed))
print(np.size(led_R), np.size(led_G))

# median filter
k_size = 19
r_med, g_med = medfilt(r, kernel_size=k_size), medfilt(g, kernel_size=k_size)
#  tunkey window filter
alpha, n = 0.5, 15
sq_win = tukey(n, alpha=alpha)
r_in = r - r_med + abs(g - g_med)
w = []
for i in range(len(r) - n):
    w = np.append(w, r_in[i:i + n] @ sq_win)
xlim1, xlim2 = 2e4, 2.02e4
plt.plot(w)
plt.xlim(xlim1, xlim2)
plt.title(alpha + n)

pks = find_peaks(w[1200:], height=20, distance=10)
p = pks[0] + 1200

I = w[p]
I_sort = np.sort(I)
I_sort_idx = np.argsort(I)
示例#32
0
def grab_random_model(
        NN=512,
        Nt=25,
        img_thresh=0.15,
        t1_lims=(900, 1200),
        t2_lims=(80, 400),
        # basepath="E:\\image_db\\",
        basepath="./image_db/",
        seed=-1,
        inflow_range=None,
        rseed=None,
        mseed=None):
    final_mask = make_random_mask(NN=NN, rseed=rseed)
    # final_mask = np.ones_like(final_mask)
    r_a, r_b, theta, extra_p = gen_motion_params(NN=NN,
                                                 rseed=mseed,
                                                 extra_poly=4)
    r_a *= 0.04
    r_b *= 0.04

    filt = windows.tukey(Nt, 0.3)[:, np.newaxis, np.newaxis]
    filt[0] = 0.0
    for i in range(Nt // 2, Nt):
        if filt[i] < 0.2:
            filt[i] = (0.2 + filt[i]) / 2
    xx = np.linspace(-1, 1, Nt)[:, np.newaxis, np.newaxis]

    p0, p1 = extra_p[0][np.newaxis], extra_p[1][np.newaxis]
    xmod = (p0 * xx**1.0 +
            p1 * xx**2.0) * filt * (.02 + .01 * np.random.standard_normal())

    p2, p3 = extra_p[2][np.newaxis], extra_p[3][np.newaxis]
    ymod = (p2 * xx**1.0 +
            p3 * xx**2.0) * filt * (.02 + .01 * np.random.standard_normal())

    r_a0 = r_a.copy()
    r_b0 = r_b.copy()
    theta0 = theta.copy()

    img = get_random_image(basepath, NN=NN, seed=seed)

    img_mask = img > img_thresh
    final_mask = final_mask & img_mask
    final_mask0 = final_mask.copy()
    final_mask = final_mask.ravel()

    s = img.ravel()
    # s[~final_mask] *= 0.0
    s = s[final_mask]

    if inflow_range is not None:
        inflow_mask = (img > inflow_range[0]) & (img < inflow_range[1])
        ii1 = gaussian(inflow_mask, 20.0)
        if ii1.max() > 0.0:
            ii1 /= ii1.max()
            ii1 = ii1 > 0.5
            ii1 = opening(ii1, selem=disk(5))
            inflow_mask = ii1.copy()
            inflow_lin = inflow_mask.ravel()
            inflow_lin = inflow_lin[final_mask]
        else:
            inflow_mask = None
    else:
        inflow_mask = None

    # t2 = gen_1D_poly(Nt, [0, 10], order=2)
    # t2 = np.hstack((0.0, t2))
    # t2 = np.cumsum(t2)
    # t2 *= 2*np.pi/t2[-1]
    # t2 = t2[:-1]

    temp_method = np.random.randint(2)
    if temp_method == 0:
        t2 = get_temporal_waveform(Nt)
    elif temp_method == 1:
        t2 = get_temporal_waveform2(Nt)

    # t = np.linspace(0, np.pi * 2, Nt, endpoint=False)
    t = t2.copy()
    t0 = t.copy()
    t = np.tile(t[:, np.newaxis], [1, s.size])

    r_a = r_a.ravel()
    r_a = r_a[final_mask]

    r_b = r_b.ravel()
    r_b = r_b[final_mask]

    theta = theta.ravel()
    theta = theta[final_mask]

    xmod0 = xmod.copy()
    ymod0 = ymod.copy()

    xmod = np.reshape(xmod, [Nt, -1])
    ymod = np.reshape(ymod, [Nt, -1])

    xmod = xmod[:, final_mask]
    ymod = ymod[:, final_mask]

    r_a = np.tile(r_a[np.newaxis, :], [t.shape[0], 1])
    r_b = np.tile(r_b[np.newaxis, :], [t.shape[0], 1])

    # print(t.shape)
    # print(r_a.shape, r_b.shape, theta.shape)

    ell_x = r_a * (np.cos(t) - 1.0)
    ell_y = r_b * np.sin(t)
    dx = np.cos(theta) * ell_x - np.sin(theta) * ell_y + xmod
    dy = np.sin(theta) * ell_x + np.cos(theta) * ell_y + ymod

    y, x = np.meshgrid(np.linspace(-0.5, 0.5, NN, False),
                       np.linspace(-0.5, 0.5, NN, False),
                       indexing="ij")

    x = x.ravel()
    x = x[final_mask]
    x = np.tile(x[np.newaxis, :], [t.shape[0], 1])

    y = y.ravel()
    y = y[final_mask]
    y = np.tile(y[np.newaxis, :], [t.shape[0], 1])

    z = np.zeros_like(x)

    x0 = x.copy()
    y0 = y.copy()
    z0 = z.copy()

    max_displace = np.hypot(dx, dy).max()
    displace_lim = 10 / 256
    descaler = 1.0
    if max_displace > displace_lim:
        descaler = displace_lim / max_displace
        dx *= descaler
        dy *= descaler

    # print(np.hypot(dx, dy).max())
    # print(x.shape, y.shape)

    # brownian_mod = 0.0
    # rrx = brownian_mod * np.random.standard_normal(x.shape)
    # rrx[0] *= 0.0
    # c_rrx = np.cumsum(rrx, axis=0)

    # rry = brownian_mod * np.random.standard_normal(y.shape)
    # rry[0] *= 0.0
    # c_rry = np.cumsum(rry, axis=0)

    x += dx
    y += dy

    r = np.stack((x, y, z), 2)
    if rseed is not None:
        np.random.seed(rseed)
    t1 = map_1Dpoly(s, t1_lims)
    t2 = map_1Dpoly(s, t2_lims)
    if inflow_mask is not None:
        s[inflow_lin > 0] = np.random.uniform(0.20, 0.50)
        t1[inflow_lin > 0] = np.random.uniform(30, 60)
        t2[inflow_lin > 0] = np.random.uniform(10, 20)

    return r, s, t1, t2, final_mask0, r_a0, r_b0, theta0, t0, img, inflow_mask, xmod0, ymod0, descaler