예제 #1
0
def noise(X, spr):
    # Noise salt and pepper noise
    X_copy = X.copy()
    row, col, _ = X_copy[0].shape
    salt_pepper_rate = 0.35
    amount = spr
    num_salt = np.ceil(amount * X_copy[0].size * salt_pepper_rate)
    num_pepper = np.ceil(amount * X_copy[0].size * (1.0 - salt_pepper_rate))

    for Xo in X_copy:
        # Add Salt noise
        coords = [np.random.randint(0, i - 1, int(num_salt)) for i in Xo.shape]
        Xo[coords[0], coords[1], :] = 255

        # Add Pepper noise
        coords = [
            np.random.randint(0, i - 1, int(num_pepper)) for i in Xo.shape
        ]
        Xo[coords[0], coords[1], :] = 0

    if (spr > 0.25):
        #add gaussian noise
        mean = 0.0  # some constant
        std = 1.0  # some constant (standard deviation)
        noisy_img = X_copy + np.random.normal(mean, std, X_copy.shape)
        noisy_img_clipped = np.clip(noisy_img, 0, 255)
    else:
        #add rician noise
        b = 0.775
        r = X_copy + rice.rvs(b, size=X_copy.shape)
        noisy_img_clipped = np.clip(r, 0, 255)

    return noisy_img_clipped
예제 #2
0
    def get_data(self, sigma, noise='normal'):
        """Generates noisy 3d data"""
        size = (50, 50, 50)
        test_data = np.ones(size)
        wmdata = np.zeros(size)
        bgdata = np.zeros(size)
        bgdata[:, :25, :] = 1
        wmdata[bgdata == 0] = 1

        bg_mean = 0
        wm_mean = 600
        test_data[bgdata > 0] = bg_mean
        test_data[wmdata > 0] = wm_mean

        if noise == 'rice':
            test_data += rice.rvs(0.77,
                                  scale=sigma * wm_mean,
                                  size=test_data.shape)
        elif noise == 'rayleigh':
            test_data += np.random.rayleigh(scale=sigma * wm_mean,
                                            size=test_data.shape)
        else:
            test_data += np.random.normal(0.,
                                          scale=sigma * wm_mean,
                                          size=test_data.shape)

        return test_data, wmdata, bgdata
예제 #3
0
def fading(typ: str, dim: tuple = (1,),
           shape: float = 6, seed: int = None) -> np.ndarray:
    """Create a sampled fading channel from a given distribution and given
    dimension.

    Parameters
    __________
    typ : str in dic.channel_types,
        type of the fading channel to be used.
    dim : tuple,
        dimension of the resulting array.
    shape : float [dB],
        shape parameters used in the rice distribution modeling the power of
        the LOS respect to the NLOS rays.
    seed : int,
        seed used in the random number generator to provide the same arrays if
        the same is used.
    """
    if typ not in dic.channel_types:
        raise ValueError(f'Type can only be in {dic.channel_types}')
    elif typ == 'AWGN':
        return np.ones(dim)
    elif typ == "Rayleigh":
        vec = norm.rvs(size=2 * np.prod(dim), random_state=seed)
        return (vec[0:2] + 1j * vec[1:2]).reshape(dim)
    elif typ == "Rice":
        return rice.rvs(10 ** (shape / 10), size=np.prod(dim),
                        random_state=seed).reshape(dim)
    elif typ == "Shadowing":
        return norm.rvs(scale=10 ** (shape / 10), random_state=seed)
예제 #4
0
    def noisyImage(self, inputPath, outputName, noiseType):

        for id, i in enumerate(self.noiseLevels):
            img = itk.imread(inputPath)
            dat = itk.GetArrayFromImage(img)
            dat = dat.astype(np.float32)
            # simulated CT noise poisson + gaussian noise
            if (noiseType == "poisson"):
                datNoisy = 0.5 * np.random.poisson(
                    dat, None) + 0.5 * np.random.normal(dat, i, None)
            elif (noiseType == "rician"):
                datNoisy = rice.rvs(dat / i, scale=i)
            else:
                print("error noise type not supported")

            datNoisy[datNoisy < 0] = 0
            datNoisy[datNoisy > 255] = 255
            # writing image on disk
            noisyImg = itk.GetImageFromArray(datNoisy.astype(np.uint8))

            print(i)
            outputPath = outputName + "_" + str(i) + ".nii"
            itk.imwrite(noisyImg, outputPath)

            print(outputPath)
예제 #5
0
    def FadingModel(self, Time=1, Graphs=False, Results=False):
        """
		"""
        from scipy.stats import rice
        from scipy import integrate

        #CALCULATING THE RICE CONTINUOUS DISTIBUTION
        shape = 0.775
        #MeanPowerGain = (1/Time) * integrate(pow(rice.logcdf(t, shape), 2), (t, 0, Time))
        h = lambda x: pow(rice.logpdf(x, shape), 2)
        MeanPowerGain, err = integrate.quad(h, 0, Time)

        #PLOTING THE PROBABILITY DENSITY FUNCTION
        if Graphs is True:
            fig, ax = plt.subplots(1, 1)
            x = linspace(rice.ppf(0.01, shape), rice.ppf(0.99, shape), 100)
            ax.plot(x,
                    rice.pdf(x, shape),
                    'r-',
                    lw=5,
                    alpha=0.6,
                    label='Rice PDF')

            rv = rice(shape)
            ax.plot(x, rv.pdf(x), 'k-', lw=2, label='Frozen PDF')

            r = rice.rvs(shape, size=1000)
            ax.hist(r, normed=True, histtype='stepfilled', alpha=0.2)
            ax.legend(loc='best', frameon=False)

        #PRINTING RESULTS
        if Results is True:
            print("Fading - Mean Power Gain: {}".format(
                (1 / Time) * MeanPowerGain))
        return (1 / Time) * MeanPowerGain
예제 #6
0
    def __init__(self, L, c, strat_point = 0, end_point = 5, level = 60):

        # Sample from a rice distribution using scipy.stats's random number generator
        self.samples = rice.rvs(c, size=L)
        self.bins = np.linspace(strat_point, end_point, level)
        self.histogram, self.bins = np.histogram(self.samples, bins = self.bins, normed = True)
        self.bin_centers = 0.5*(self.bins[1:] + self.bins[:-1])
예제 #7
0
파일: stack.py 프로젝트: gbaier/sarsim
def get_outliers(shape, n_outliers):
    outliers = rice.rvs(4, size=n_outliers) * np.exp(
        1j * np.random.uniform(-np.pi, np.pi, size=n_outliers))
    x_coords = np.random.randint(0, shape[0], size=n_outliers)
    y_coords = np.random.randint(0, shape[1], size=n_outliers)
    for outlier, x, y in zip(outliers, x_coords, y_coords):
        yield outlier, (x, y)
예제 #8
0
    def noisyImage(self, DirPath, file, outputFile, noiseType):
        imgPath = DirPath + "/" + file

        print(DirPath)
        print(imgPath)

        for id, i in enumerate(self.noiseLevels):
            img = itk.imread(imgPath)
            dat = itk.GetArrayFromImage(img)

            # simulated CT noise poisson + gaussian noise
            if (noiseType == "poisson"):
                datNoisy = 0.5 * np.random.poisson(
                    dat, None) + 0.5 * np.random.normal(dat, i, None)
            elif (noiseType == "rician"):
                datNoisy = rice.rvs(dat / i, scale=i)
            else:
                print("error noise type not supported")

            datNoisy[datNoisy < 0] = 0
            datNoisy[datNoisy > 255] = 255
            # writing image on disk
            if (dat.dtype == np.uint8):
                noisyImg = itk.GetImageFromArray(datNoisy.astype(np.uint8))
            else:
                noisyImg = itk.GetImageFromArray(
                    datNoisy.astype(np.float32)
                )  # data is in double but it is not supported in itk

            print(i)
            outputPath = DirPath + "/" + outputFile + "_" + str(i) + ".nii"
            itk.imwrite(noisyImg, outputPath)

            print(outputPath)
예제 #9
0
 def __apply_rician_noise__(self, image, est_std):
     [img_shape, img_elem, _] = get_data_information(image)
     if image.is_cuda:
         device = torch.device("cuda:0")
     else:
         device = torch.device("cpu")
     image_vec = torch.Tensor(rice.rvs(1,image.double().view(img_elem), est_std)).double().to(device=device)
     return image_vec.view(img_shape)
예제 #10
0
 def change_position(self):
     if p_true(self.config.move_p):
         if p_true(0.5):
             self.increase_distance()
         else:
             self.decrease_distance()
     # small scale channel gain. 20% to be changed
     if p_true(0.2):
         self.small_scale_gain = rice.rvs(self.shape, scale=self.scale)
예제 #11
0
    def __init__(self, id, config):
        self.id = id
        self.config = config
        self.dis = 0
        self.init_distance()

        self.scale = 0.559
        self.shape = 0.612 / self.scale
        self.small_scale_gain = rice.rvs(self.shape, scale=self.scale)
예제 #12
0
 def takeSimulatedMeasurement(self,
                              ESSID,
                              distance,
                              v=2.4e9,
                              b=0.009,
                              loc=-7.001,
                              scale=12.551):
     rss_base = 147.55 - 20.0 * np.log10(v) - 20.0 * np.log10(distance)
     rss = rss_base - rice.rvs(b, loc=loc, scale=scale)
     return rss
예제 #13
0
파일: stack.py 프로젝트: gbaier/sarsim
def gen_outliers(amp):
    """ an infinite generator of outliers

    Outliers have Ricean distributed amplitude and uniformly distributed phase between -pi and pi


    :param amp: float
        amplitude of the Rice line-of-sight component

    :returns: the outliers

    """

    return iter(
        lambda: rice.rvs(amp) * np.exp(1j * np.random.uniform(-np.pi, np.pi)),
        1)
예제 #14
0
	def resample(self):
		# The function that resamples the projected b/a and lgSMA values of the observed galaxies.
		# The probability distribution assumed for lgSMA and b/a are Gaussian and Rice, respectively,
		# with the central values being the ones in the catalog, and the dispersion being the 1-sigma
		# error given by the catalog.
		# ***Note*** that self.sma/ba will get overwritten by resampled values. But the real values are
		# stored in self.sma/ba_real in ReadData().
		try:
			sma_new = self.sma_real[:]
			ba_new = self.ba_real[:]
			for i in range(len(sma_new)):
				sma_new[i] = norm.rvs(loc=sma_new[i], scale=self.dsma[i])
				ba_new[i] = 1 - rice.rvs((1 - ba_new[i]) / self.dba[i]) * self.dba[i]
			self.sma = sma_new
			self.ba = ba_new
			self.bin_obs = np.histogram2d(self.ba, self.sma, bins=[int(1 / self.ba_step), round(2 / self.lgSMA_step)], range=[[0,1],[-1, 1]])[0].flatten()
		except AttributeError:
			print 'no uncertainty data, unable to resample!'
예제 #15
0
    def get_data(self, sigma, noise='normal'):
        """Generates noisy 3d data"""
        size = (50, 50, 50)
        test_data = np.ones(size)
        wmdata = np.zeros(size)
        bgdata = np.zeros(size)
        bgdata[:, :25, :] = 1
        wmdata[bgdata == 0] = 1

        bg_mean = 0
        wm_mean = 600
        test_data[bgdata > 0] = bg_mean
        test_data[wmdata > 0] = wm_mean

        if noise == 'rice':
            test_data += rice.rvs(0.77, scale=sigma*wm_mean, size=test_data.shape)
        elif noise == 'rayleigh':
            test_data += np.random.rayleigh(scale=sigma*wm_mean, size=test_data.shape)
        else:
            test_data += np.random.normal(0., scale=sigma*wm_mean, size=test_data.shape)

        return test_data, wmdata, bgdata
예제 #16
0
	def GenerateModelHist(self, view_num=100000, save_name='./ETa_lgSMA.mat', oblate_only=False):
		# the function to generate the model ***projected*** b/a-lgSMA histograms
		# on a series of (E, T, a) grid points. The method is straightforward,
		# i.e. view the ellipsoid with (E, T, a) in a bunch of random directions
		# and bin the yielded b/a, and correct the intrinsic distribution to
		# the observed one, taking the asymmetric error into account. For 
		# detailed discussions on this asymmetric error, see Chang et al. (2013),
		# doi:10.1088/0004-637X/773/2/149.
		# 
		# parameters:
		# 	view_num: int, optional
		# 		the number of random directions used for each combination of (E, T, a). 
		# 		default: 100000
		# 	save_name: str, optional
		# 		the directory to save the generated histograms.
		# 		default: './ETa_lgSMA.mat'
		# oblate_only: Boolean, optional
		# 		Whether we only use the oblate (i.e. disky) galaxies in the modeling of
		# 		the observed ***projected*** b/a-lgSMA distributions.
		E_grid = np.linspace(self.E_range[0], self.E_range[-1] - self.E_step, \
					int((self.E_range[1] - self.E_range[0]) / self.E_step))
		# print E_grid.shape
		T_grid = np.linspace(self.T_range[0], self.T_range[-1] - self.T_step, 
					int((self.T_range[1] - self.T_range[0]) / self.T_step))
		# print T_grid.shape
		a_grid = np.linspace(np.log10(self.a_step), np.log10(self.a_range[-1]), 
					int((self.a_range[1] - self.a_range[0]) / self.a_step))

		self.E_grid = E_grid
		self.T_grid = T_grid
		self.a_grid = a_grid

		# Serialize the 3D grid in (E, T, a) param space.
		self.ETa_grid = np.array([cc for cc in product(E_grid, T_grid, a_grid)])

		# If the model ***projected*** b/a-lgSMA distributions are already calculated and
		# saved, just read it.
		if os.path.exists(save_name):
			try:
				hist = sio.loadmat(save_name)
				self.ba_lgSMA_bins = hist['ba_lgSMA_bins']
				self.ETa_grid = hist['grid_pts']
				self.grid_pts = hist['grid_pts']
			# if the file is too large for sio to read, use h5py instead.
			except NotImplementedError:
				hist = h5py.File(save_name, 'r')
				self.ba_lgSMA_bins = hist['ba_lgSMA_bins'].value.T
				# renormalize the model ***projected*** b/a-lgSMA distributions again to ensure correct
				# answers. The index 12345 is arbitrarily picked, since all b/a-lgSMA distributions should
				# have the same normalization (no matter what that value is).
				self.ba_lgSMA_bins /= np.sum(self.ba_lgSMA_bins[12345])
				# Note the needed transposition.
				self.ETa_grid = hist['grid_pts'].value.T
			
			# Calculate the ***intrinsic*** c/a and b/a of galaxy shape with parameters (E, T, a),
			# according to their definitions.
			self.ca_set = 1 - self.ETa_grid[:,0]
			self.ba_set = ((1 - self.ETa_grid[:,1]) * 1 + self.ETa_grid[:,1] * self.ca_set**2)**0.5
			self.ind_type = {}
			# Pick out different shapes according to the definition used by van der Wel et al. (2014) and
			# Zhang et al. (2019).
			self.ind_type['prolate'] = np.where(((1 - self.ba_set)**2 + self.ca_set**2 > 0.16) & (self.ba_set  < 1 - self.ca_set))
			self.ind_type['oblate'] = np.where(((1 - self.ba_set)**2 + self.ca_set**2 <= 0.16))
			self.ind_type['spheroidal'] = np.where(((1 - self.ba_set)**2 + self.ca_set**2 > 0.16) & (self.ba_set  >= 1 - self.ca_set))
			if oblate_only:
				# If we only use the oblate galaxies, just discard ptolate and oblate ones.
				oblate_inds = np.where((1 - self.ba_set)**2 + self.ca_set**2 <= 0.16)
				self.ba_lgSMA_bins = self.ba_lgSMA_bins[oblate_inds]
				self.ETa_grid = self.ETa_grid[oblate_inds]
				self.grid_pts = self.grid_pts[oblate_inds]
				# print 'the number of oblate elements: ', len(oblate_inds[0])
			# The calculation of dust extinction for galaxies is deprecated in the current version,
			# so please just ignore this.
			try:
				self.Av_bins = hist['AV_bins']
				if oblate_only:
					self.Av_bins = self.Av_bins[oblate_inds]
			except KeyError:
				print 'no dust maps in precalculated data.'
				pass
			return
		
		# If, instead, there's no pre-calculated ***projected*** b/a-lgSMA distributions for different
		# intrinsic shapes of galaxies, we have to calculate this using the following codes.
		grid_pts = [] # The list that is to contain all the (E, T, a) grid points whose corresponding ***projected*** b/a-lgSMA distributions are calculated.
		ba_lgSMA_bins = [] # The list that is to contain all the ***projected*** b/a-lgSMA distributions.
		
		# initialize the MPI communicator, in order to scatter the calculation to different sub-processes,
		# which enhances the speed.
		comm = MPI.COMM_WORLD
		comm_rank = comm.Get_rank()
		comm_size = comm.Get_size()

		# naive application of MPI: only scatter different E values to sub-processes.
		sendbuf = None
		if not comm_rank:
			sendbuf = E_grid
		local_E_grid = np.empty(len(E_grid) / comm_size, dtype=np.float64)
		comm.Scatter(sendbuf, local_E_grid, root=0)
		# print 'local_E_grid: ', local_E_grid
		

		local_ba_lgSMA_bins = [] # The list that is to contain all the ***projected*** b/a-lgSMA distributions calculated in the sub-process.
		local_grid_pts = [] # The list that is to contain all the (E, T, a) calculated in the sub-process.
		for i in range(len(local_E_grid)):
			# if not comm_rank:
			# print 'start the %d-th outer loop.' %i
			E = local_E_grid[i]
			for j in range(len(T_grid)):
				if not comm_rank:
					print j
				T = T_grid[j]
				for k in range(len(a_grid)):
					a = 10**a_grid[k]
					c = a * (1 - E)
					b = ((1 - T) * a**2 + T * c**2)**0.5

					# Note that to generate random viewing angles uniformly in 4pi solid space, 
					# we need to generate cos(theta) that is uniformly distributed in [-1, 1].
					coss = np.random.uniform(-1, 1, size=view_num)
					theta = np.arccos(coss)
					phi = np.random.uniform(0, 2 * np.pi, size=view_num)

					# Calculate the ***projected*** semi-major axis and b/a axis ratio based on the
					# intrinsic main axis and viewing angles.
					SMA, ba = axis_ratio_2d([a, b, c], theta, phi)


					if b/a >= c/b:
						sma_face = a
					else:
						sma_face = b
					ba_face = np.max([b/a, c/b])

					lgSMA_obs = []
					ba_obs = []
					# randomly sampling, taking observation errors into account
					for l in range(view_num):
						# The distribution used here is called Rice distribution, which is implemented in SciPy.
						# For more discussions on this issue, see Chang et al. (2013), doi:10.1088/0004-637X/773/2/149.
						# Here we choose 0.04 as a typical uncertainty for projected b/a.
						ba_tmp = 1 - rice.rvs((1 - ba[l]) / (0.04 * ba[l])) * 0.04 * ba[l]
						# We assume that the ***projected*** semi-minor axis of the image will not be 
						# affected by the error in b/a, which can be wrong. Based on this assumption, 
						# the error in b/a would affect the value of semi-major axis in the following way:
						lgsma_tmp = np.log10(SMA[l] * ba[l] / ba_tmp)

						# # The correction of systematic trend, deprecated
						# lgsma_tmp = lgsma_tmp - 0.1237 * (ba_tmp - ba_face) + 0.008408
						# sigma_sma = 0.07 / np.log(10)
						# lgsma_tmp = np.random.normal(lgsma_tmp, sigma_sma)
						# lgsma_tmp = np.random.normal(np.log10(SMA[l]), sigma_sma)
						
						# Append the randomized b/a and lgSMA to the list for binning.
						ba_obs.append(ba_tmp)
						lgSMA_obs.append(lgsma_tmp)

					# Bin the data.
					ba_lgSMA_bin = np.histogram2d(ba_obs, lgSMA_obs, range=[[0,1],[-1, 1]], bins=[int(1 / self.ba_step), round(2/ self.lgSMA_step)], normed=True)[0]
					# ba_lgSMA_bin = np.histogram2d(ba, np.log10(SMA), range=[[0,1],[-1, 1]], bins=[int(1 / self.ba_step), round(2 / self.lgSMA_step)], normed=True)[0]
					
					# Normalization. A special case is that the galaxy is observed in none of the bins.
					if np.sum(ba_lgSMA_bin) == 0:
						# ba_lgSMA_bin[:,:] = 0
						pass
					else:
						ba_lgSMA_bin = ba_lgSMA_bin / np.sum(ba_lgSMA_bin)

					# We further linearize the ***projected*** b/a-lgSMA 2D histograms, 
					# for the convenience of manipulations with tensor products.
					local_ba_lgSMA_bins.append(ba_lgSMA_bin.flatten())
					local_grid_pts.append([E, T, np.log10(a)])

					# Bug checking.
					# print ba_lgSMA_bin[0, 0], np.isnan(ba_lgSMA_bin).any()
					if (np.isnan(ba_lgSMA_bin).any() and (not np.isnan(ba_lgSMA_bin).all())):
						print 'E, T, a: ', E, T, a
						print 'SMA: ', SMA
						print 'ba: ', ba
						# plt.imshow(ba_lgSMA_bin)
						# plt.scatter(np.arange(len(ba_lgSMA_bin.flatten())), ba_lgSMA_bin)
						# plt.show()
						# plt.close()

		# Gather the calculated data back to the root process and write to the file.
		local_ba_lgSMA_bins = np.array(local_ba_lgSMA_bins)
		local_grid_pts = np.array(local_grid_pts)
		ba_lgSMA_bins = None
		grid_pts = None			
		if not comm_rank:
			grid_pts = np.empty([len(E_grid) * len(T_grid) * len(a_grid), 3], dtype=np.float64)
			ba_lgSMA_bins = np.empty((len(E_grid) * len(T_grid) * len(a_grid), int(round(1 / self.ba_step * 2 / self.lgSMA_step))), dtype=np.float64)
		comm.Gather(local_ba_lgSMA_bins, ba_lgSMA_bins, root=0)
		comm.Gather(local_grid_pts, grid_pts, root=0)
		print 'finished gathering.'
		if not comm_rank:
			save_dict = {'grid_pts': grid_pts, 'ba_lgSMA_bins': ba_lgSMA_bins}
			sio.savemat(save_name, save_dict)
예제 #17
0
  
  # Create random dictionary
  np.random.seed(12345)

  input_image = nibabel.load(args.input)
  data = input_image.get_data().astype(float)
  
  maxval = 0
  if args.normalize==True:
    maxval = np.max(data)
    data /= maxval
    
  if args.rician==False:
    data += np.random.normal(args.mean, args.std, data.shape)
  else:
    data += rice.rvs(args.b, loc=args.mean, scale=args.std, size=data.shape)
    

  if args.normalize==True:
    data *= maxval
    
  nibabel.save(nibabel.Nifti1Image(data,input_image.affine),args.output)  
    
  """A Rice continuous random variable. (from scipy code)
    Notes
    -----
    The probability density function for `rice` is::
        rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
    for ``x > 0``, ``b > 0``.
  """
 
예제 #18
0
                                                 ret, self.n_step)
        GLOBAL_LOGGER.get_tb_logger().add_scalar('N_CH_TX_OK_' + str(self.id),
                                                 n_successful_tx, self.n_step)
        self.change_position()

        return float(n_successful_tx)


if __name__ == '__main__':
    # err = tx_error_rate_for_n_bytes(32, 6, 1.0221785259170593, 0.000125, 180000.0)
    # print(err)

    for x in range(50):
        err = tx_error_rate_for_n_bytes(50., x + 1, db_to_dec(0), 1e-4, 180e3)
        print(err, x)

    from scipy.stats import expon
    import matplotlib.pyplot as plt

    scale = 0.559
    shape = 0.612 / scale
    print(rice.rvs(shape, scale=scale))
    fig, ax = plt.subplots(1, 1)
    x = np.linspace(rice.ppf(0.0001, shape, scale=scale),
                    rice.ppf(0.9999, shape, scale=scale), 10000)
    ax.plot(x, rice.pdf(x, shape, scale=scale), 'r-', label='rice pdf')
    x = np.linspace(expon.ppf(0.01), expon.ppf(0.99), 100)
    ax.plot(x, expon.pdf(x), 'r-', lw=5, alpha=0.6, label='expon pdf')
    print(rice.rvs(shape, scale=scale))
    plt.show()
예제 #19
0
    # Create random dictionary
    np.random.seed(12345)

    input_image = nibabel.load(args.input)
    data = input_image.get_data().astype(float)

    maxval = 0
    if args.normalize == True:
        maxval = np.max(data)
        data /= maxval

    if args.rician == False:
        data += np.random.normal(args.mean, args.std, data.shape)
    else:
        data += rice.rvs(args.b,
                         loc=args.mean,
                         scale=args.std,
                         size=data.shape)

    if args.normalize == True:
        data *= maxval

    nibabel.save(nibabel.Nifti1Image(data, input_image.affine), args.output)
    """A Rice continuous random variable. (from scipy code)
    Notes
    -----
    The probability density function for `rice` is::
        rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
    for ``x > 0``, ``b > 0``.
  """
예제 #20
0
    def validate(self, val_loader, with_noise=False):
        self.logger.info('Validating...')

        val_losses = utils.RunningAverage()
        val_accuracy = utils.RunningAverage()

        try:
            with torch.no_grad():
                start_time = time.time()

                if self.dataset == 'malc':
                    dice_coeffs = torch.zeros((15, 28)).to(self.device)
                    count_b = np.zeros(15)

                for i, t in enumerate(val_loader):
                    if len(t) == 3:
                        input, target, b = t
                        input = input.unsqueeze(1).float()
                        input, target = input.to(self.device), target.to(self.device)
                        weight = None
                    else:
                        input, target, b, weight = t
                        input, target, weight = input.to(self.device), target.to(self.device), weight.to(self.device)

                    if hasattr(self.loss_criterion, 'ignore_index') and self.loss_criterion.ignore_index is not None:
                        unique_labels = torch.unique(target)
                        if len(unique_labels) == 1 and unique_labels.item() == self.loss_criterion.ignore_index:
                            self.logger.info(f'Skipping validation batch {i} (contains only ignore_index)...')
                            continue

                    target = target.squeeze(1)

                    if with_noise:
                        input = input + torch.from_numpy(0.05 *
                                                         rice.rvs(0.775, size=(input.shape[0], input.shape[1],
                                                                              input.shape[2], input.shape[3],
                                                                              input.shape[4]))).float().to(self.device)

                    output, loss, accuracy = self._forward_pass(input, target, weight,
                                                                is_training=(False if self.dataset == 'malc' else True))

                    val_losses.update(loss.item(), input.size(0))
                    if self.dataset == 'hippo':
                        val_accuracy.update(accuracy.item(), input.size(0))
                    else:
                        dice_coeffs[b.long()] += accuracy
                        # print(accuracy)
                        count_b[b.long().detach().cpu().numpy()] += 1

                    if self.validate_iters is not None and self.validate_iters <= i:
                        # stop validation
                        break
                    if i % self.log_after_iters == 0:
                        self.logger.info(f'Validation iteration {i}')
                        # self.logger.info(f'GPU Memory usage: {torch.cuda.memory_allocated()}')

                if self.dataset == 'malc':
                    for j in range(dice_coeffs.shape[0]):
                        dice_coeffs[j] /= count_b[j]
                        val_accuracy.update(np.mean(dice_coeffs[j].detach().cpu().numpy()), j)

                end_time = time.time()
                self._log_stats('val', val_losses.avg, val_accuracy.avg, end_time-start_time)
                self.logger.info(f'Validation finished. Loss: {val_losses.avg}. Accuracy: {val_accuracy.avg}')
                self.logger.info(f'Time elapsed for this validation run: {end_time - start_time} s')
                return val_accuracy.avg, end_time-start_time
        finally:
            self.model.train()
예제 #21
0
def simulate(family,params,paramsList,bins,\
             seed=None,N=None,noise=None,output=None,\
             dump=None,version=2,verbose=False,area=None,\
             skadsf=None,pole_posns=None,simarrayf=None,\
             simdocatnoise=True):
    """
    Based on lumfunc.simtable()
    Specify family + parameters
    Specify number of sources
    Build CDF (or set up function)
    Draw deviates
    Sample CDF given deviates
    Add noise (None or some value)
    Bin
    Write out
    Return

    Look at simulate.ipynb for an example run

    Need to add normalization capability
    
    Families:
    ========

    skads:
    -----

    r=countUtils.simulate('skads',[0.01,85.0],['S0','S1'],numpy.linspace(-60.0,100.0,26),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)
    
    ppl:
    ---

    r=countUtils.simulate('ppl',[1000.0,5.0,75.0,-1.6],['C','S0','S1','a0'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)

    r=countUtils.simulate('ppl',[1000.0,5.0,25.0,75.0,-1.6,-2.5],['C','S0','S1','S2','a0','a1'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)

    r=countUtils.simulate('ppl',[1000.0,5.0,25.0,40.0,75.0,-1.6,-2.5,-1.0],['C','S0','S1','S2','S3','a0','a1','a2'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)

    r=countUtils.simulate('ppl',[1000.0,5.0,25.0,40.0,75.0,90.0,-1.6,-2.5,-1.0,2.0],['C','S0','S1','S2','S3','S4','a0','a1','a2','a3'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)

    poly:
    ----

    r=countUtils.simulate('poly',[5.0,75.0,1.0],['S0','S1','p0'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)

    r=countUtils.simulate('poly',[5.0,75.0,1.0,-1.0],['S0','S1','p0','p1'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)
    
    r=countUtils.simulate('poly',[5.0,75.0,1.0,-1.0,5.0],['S0','S1','p0','p1','p2'],numpy.linspace(-20.0,100.0,22),seed=1234,N=40000,noise=17.0,dump='R.txt',output='dummy.txt',verbose=True)
    
    bins:
    ----

    

    test:
    ----

    array:
    -----


    """

    # Initialize seed for variates AND any noise
    if seed is not None:
        numpy.random.seed(seed=SEED_SIM)

    if family == 'ppl':
        C = alpha = Smin = Smax = beta = S0 = gamma = S1 = delta = S2 = -99.0
        nlaws = int(0.5 * len(paramsList) - 1)
        C = params[paramsList.index('C')]
        Smin = params[paramsList.index('S0')]
        alpha = params[paramsList.index('a0')]
        if nlaws > 1:
            beta = params[paramsList.index('a1')]
            S0 = params[paramsList.index('S1')]
        if nlaws > 2:
            gamma = params[paramsList.index('a2')]
            S1 = params[paramsList.index('S2')]
        if nlaws > 3:
            delta = params[paramsList.index('a3')]
            S2 = params[paramsList.index('S3')]
        iSmax = int([i for i in paramsList if i.startswith('S')][-1][-1])
        Smax = params[paramsList.index('S%i' % iSmax)]

        function = lambda S:powerLawFuncWrap(nlaws,S,C,alpha,-99.0,beta,\
                                      Smin/1e6,Smax/1e6,S0/1e6,gamma,S1/1e6,delta,S2/1e6,1.0)

    elif family == 'test':
        Smin = params[paramsList.index('S0')]
        Smax = params[paramsList.index('S1')]
        function = lambda S: S**2

    elif family == 'poly':
        Smin = params[paramsList.index('S0')]
        Smax = params[paramsList.index('S1')]
        coeffs = [
            params[paramsList.index(p)] for p in paramsList
            if p.startswith('p')
        ]
        S_1 = 1.0
        function = lambda S: polyFunc(S, S_1, Smin, Smax, coeffs)

    elif family == 'bins':
        Smin = params[paramsList.index('S0')]
        Smax = params[paramsList.index('S1')]
        coeffs = [
            params[paramsList.index(p)] for p in paramsList
            if p.startswith('b')
        ]
        if pole_posns is None:
            pole_posns = numpy.logspace(numpy.log10(Smin), numpy.log10(Smax),
                                        len(coeffs) + 1)
        assert (len(coeffs) == len(pole_posns) -
                1), '***Mismatch in number of poles!!'
        Smin = pole_posns[0]
        Smax = pole_posns[-1]
        function = lambda S: polesFunc(S, pole_posns, Smin, Smax, coeffs)

    elif family == 'array':
        Smin = params[paramsList.index('S0')]
        Smax = params[paramsList.index('S1')]
        assert (simarrayf
                is not None), '***Need to specify an input simulation!'
        print 'Reading %s...' % simarrayf
        dataMatrix = numpy.genfromtxt(simarrayf)
        dndsInArr = dataMatrix[:, 4]
        binsDogleg = numpy.concatenate((dataMatrix[:, 0], [dataMatrix[-1, 1]]))
        binsMedian = dataMatrix[:, 2]
        assert ((
            medianArray(binsDogleg) == binsMedian).all()), '***bin mismatch!'
        Smin = binsDogleg[0]
        Smax = binsDogleg[-1]
        if not simdocatnoise:
            Smin = -5.01  #-2.01 # binsMedian[0]
        print dndsInArr
        function = lambda S: arrayFunc(S, binsMedian, dndsInArr, Smin, Smax)

        #function2=lambda S:arrayFunc(S,binsMedian,dndsInArr,Smin,Smax)
        #for x in numpy.linspace(-10.0,100.0,500):
        #    print x,function(x),function2(x)
        #sys.exit(0)

    elif family == 'skads':
        Smin = params[paramsList.index('S0')]
        Smax = params[paramsList.index('S1')]
        function = None
        assert (skadsf is not None), '***Need to specify input SKADS file!'
        print 'Reading %s...' % skadsf
        R = Jy2muJy * 10**numpy.genfromtxt(skadsf)
        numpy.ndarray.sort(R)
        iRmin, Rmin = find_nearest(R, Smin)
        iRmax, Rmax = find_nearest(R, Smax)
        F = R[iRmin:iRmax]
        print '%i/%i sources ingested after Smin/Smax cuts' % (len(F), len(R))
        if N is not None:
            F = numpy.random.choice(F, size=N, replace=False)
        N = len(F)
        print 'NSKADS = %i' % N

    elif family == 'Lrad':
        Smin = params[paramsList.index('LoptMIN')]
        Smax = params[paramsList.index('LoptMAX')]
        A = params[paramsList.index('A')]
        B = params[paramsList.index('B')]
        sigma_Lrad = params[paramsList.index('sigma_Lrad')]
        #print Loptmin,Loptmax
        print 'Doing LF simulation'
        inta = None
        #intg = integrate.quad(lambda Lopt:Lopt2Lrad(Lopt,A=A,B=B,flux=False),Loptmin,Loptmax,epsabs=0.)[0]

        function = lambda Lopt: Lopt2Lrad(Lopt, A=A, B=B, flux=False)
    elif family in ['LFsch', 'LFdpl']:
        redshift = 0.325
        z_min = 0.2
        z_max = 0.45
        Lmin = params[paramsList.index('LMIN')]
        Lmax = params[paramsList.index('LMAX')]
        [Smin, Smax] = SMIN_SIM, SMAX_SIM
        print Smin, Smax
        [Smin, Smax] = get_sbins([10**Lmin, 10**Lmax], redshift, dl) * 1e6
        print Smin, Smax, Lmin, Lmax
        print 'Doing LF simulation'
        Vmax = get_Vmax(z_min, z_max)
        dsdl = get_dsdl(redshift, dl)
        inta = None
        intg = integrate.quad(lambda S:LF(S,redshift,dsdl,Vmax,dl,params=params,paramsList=paramsList,\
                inta=inta,area=area,family=family),Smin*1e-6,Smax*1e-6,epsabs=0.)[0]
        print intg * Vmax
        print Vmax
        area = N / (Vmax * intg)
        area1 = area
        print N, area

        function = lambda S:dNdS_LF(S,z_min,redshift,z_max,dl,params=params,paramsList=paramsList,\
                area=area,family=family)

    if family != 'skads':
        # Set up the 'rough' array
        gridlength = 10000  # Good enough to prevent bleeding at the edges
        Ss = numpy.linspace(Smin, Smax, gridlength)
        print Smin, Smax
        print 'checking for one sample'
        kl = function(20 / 1e6)
        print kl
        #sys.exit()
        values = numpy.array([function(ix / 1e6) for ix in Ss])
        print values[:10]
        # Build the CDF
        CDF = buildCDF(values)
        plt.plot(CDF, Ss)
        #plt.xscale('log')
        #plt.yscale('log')
        plt.ylabel('Flux')
        plt.xlabel('CDF')
        plt.show()
        print CDF.max()
        # Create the interpolant object
        sampler = interp1d(CDF, Ss)

        plt.plot(Ss, values, '.')
        plt.xscale('log')
        plt.yscale('log')
        plt.xlabel('Flux')
        plt.ylabel('LF')
        plt.show()

        x = numpy.linspace(0., 1., 10000)
        z = numpy.logspace(0, 1, 1000) / 10.
        f = sampler(z)
        y = sampler(x)
        plt.yscale('log')
        #plt.xscale('log')
        plt.axhline(Smin)
        plt.axhline(Smax)
        plt.xlabel('R')
        plt.ylabel('Sampler(R) [flux]')
        #plt.plot(x,y)
        plt.plot(z, f)
        plt.show()
        #sys.exit()

        # Test that the sampler extrema match
        print Smin, sampler(0.0), 'you know wa mean'
        print Smax, sampler(0.99999)
        #        assert(numpy.isclose(sampler(0.0),Smin)[0])
        #        assert(numpy.isclose(sampler(0.99999),Smax,atol=1.0e-3)[0])

        # Draw the random deviates
        R = numpy.random.rand(N)
        print len(R)
        F = sampler(R)
        Nt = 0.
        for f in F:
            if f < 1.:
                Nt += 1.
        F = F[F > 1.]
        print N, Nt
        print len(F)
        Nt = len(F)
        #sys.exit()

        # Normalize here - this is N2C
        # EITHER N is specified explicitly
        # BOTH N2C and C2N are useful
        # Integrate the original function
        #intg = integrate.quad(lambda S:LF(S,redshift,dsdl,Vmax,dl,params=params,paramsList=paramsList,\
        #    inta=inta,area=area,family=family),Smin*1e-6,Smax*1e-6,epsabs=0.)[0]
        #print intg*Vmax
        #print Vmax
        #area = Nt/(Vmax*intg)
        #print N,area,area1
        #plt.show()
        #sys.exit()
        A = integrate.quad(function, Smin, Smax)[0]
        #        print A,N
        # Bin the random samples
        bbins = numpy.linspace(Smin, Smax, 100)
        E = numpy.histogram(F, bins=bbins)[0]
        # And calculate their area
        G = integrate.trapz(E, x=medianArray(bbins))
        #        print G
        #        print G/A
        # Gunpowder, treason and....
        if False:
            plt.xlim(0.0, 100.0)
            plt.xlabel('S / $\mu$Jy')
            plt.hist(F, bins=bbins)
            plt.plot(Ss, values * G / A, 'r')
            plt.savefig('N2C.pdf')
            plt.close()

    # Want: C given N, to compare to original C
    numbins = 1000
    if family == 'ppl':
        C_calc = N / N2C(function, F, Smin, Smax, numbins)
        #print N2C(function,F,Smin,Smax,numbins),C
        print 'For %i sources, C is %e (should be %e)' % (N, C_calc, C)
    elif family == 'poly':
        C_calc = log10(N / N2C(function, F, Smin, Smax, numbins))
        print 'For %i sources, C is %e (should be %e)' % (N, C_calc, coeffs[0])

    # Dump noiseless fluxes to file
    puredumpf = dump
    idl_style = False
    numpy.savetxt(puredumpf, F)
    print 'Draws (noiseless) are in %s' % puredumpf
    writeCountsFile(output[1],
                    bins,
                    F,
                    area,
                    idl_style=idl_style,
                    verbose=verbose)
    print output[1]
    # Now add noise if requested
    if simdocatnoise:
        numpy.random.seed(seed=SEED_SIM)
        poln = False
        if poln:
            F += rice.rvs(F / noise, size=N)
        else:
            F += numpy.random.normal(0.0, noise, Nt)

    # Dump noisy fluxes to file
    if dump is not None:
        noisydumpf = '%s_noisy.txt' % puredumpf.split('.')[0]
        numpy.savetxt(noisydumpf, F)
        print 'Draws (noisy) are in %s' % noisydumpf
        print 'Minimum flux in catalogue = %f' % F.min()
        print 'Maximum flux in catalogue = %f' % F.max()

    # Write counts file
    print output[0]
    writeCountsFile(output[0],
                    bins,
                    F,
                    area,
                    idl_style=idl_style,
                    verbose=verbose)
    print N, area  #,area1

    return F
예제 #22
0
 def gen_sample(self, n):
     return rice.rvs(self.b, loc=self.mu, scale=self.sigma, size=n)
예제 #23
0
def ricianNoise(data, b, scale):
    data += scale * rice.rvs(b, size=data.shape)
    return data
예제 #24
0
# Display the probability density function (``pdf``):

x = np.linspace(rice.ppf(0.01, b), rice.ppf(0.99, b), 100)
ax.plot(x, rice.pdf(x, b), 'r-', lw=5, alpha=0.6, label='rice pdf')

# Alternatively, the distribution object can be called (as a function)
# to fix the shape, location and scale parameters. This returns a "frozen"
# RV object holding the given parameters fixed.

# Freeze the distribution and display the frozen ``pdf``:

rv = rice(b)
ax.plot(x, rv.pdf(x), 'k-', lw=2, label='frozen pdf')

# Check accuracy of ``cdf`` and ``ppf``:

vals = rice.ppf([0.001, 0.5, 0.999], b)
np.allclose([0.001, 0.5, 0.999], rice.cdf(vals, b))
# True

# Generate random numbers:

r = rice.rvs(b, size=1000)

# And compare the histogram:

ax.hist(r, density=True, histtype='stepfilled', alpha=0.2)
ax.legend(loc='best', frameon=False)
plt.show()
 def create_Rice_channel(self, size_x, size_y, b=2):
     h = rice.rvs(b, size=(size_x, size_y))
     return h