def generate_samples(self, eval_points, n, img_shape):
        tiled_points = tf.tile(tf.expand_dims(eval_points, 0), [n, 1, 1, 1, 1])
        noised_eval_im = tf.clip_by_value(
            tiled_points +
            tf.random_poisson(self.lamda, tf.shape(tiled_points)), 0, 1)

        return noised_eval_im
def add_channel_noise(chan, a_sd, b_si, batchsize, im_h, im_w):
    ##
    ## determine sensor noise at each pixel using non-clipped poisson-gauss model from FOI et al
    ##
    if a_sd == 0.0:
        chi = 0
        sigdep = chan
    else:
        chi = 1.0 / a_sd
        rate = tf.maximum(chi * chan, 0)
        sigdep = tf.random_poisson(rate, shape=[]) / chi
        #
    sigindep = tf.sqrt(b_si) * tf.random_normal(
        shape=(batchsize, im_h, im_w, 1), mean=0.0, stddev=1.0)
    # sum the two noise sources
    chan_noise = sigdep + sigindep
    #
    #sigdep = tf.sqrt(a_sd*chan)*tf.random_normal(shape=(batchsize,im_h, im_w, 1), mean=0.0, stddev=1.0)
    #sigindep = tf.sqrt(b_si)*tf.random_normal(shape=(batchsize,im_h, im_w, 1), mean=0.0, stddev=1.0)
    #chan_noise = chan + sigdep + sigindep
    #
    #chan_noise = chan + tf.sqrt(a_sd*chan + b_si)*tf.random_normal(shape=(batchsize,im_h, im_w, 1), mean=0.0, stddev=1.0)
    #
    # clip the noise between 0 and 1 (baking in 0 and 255 limits)
    clip_chan_noise = tf.clip_by_value(chan_noise, 0.0, 1.0)
    #
    return clip_chan_noise
Пример #3
0
def add_poisson_noise(input_tensor):
    if not isinstance(input_tensor, tf.Tensor):
        raise TypeError("Requre Tensor, got {}.".format(type(input_tensor)))
    with tf.name_scope('add_poisson_noise'):
        if input_tensor.dtype != tf.float32:
            input_tensor = tf.cast(input_tensor, tf.float32)
        return tf.random_poisson(input_tensor, [])
Пример #4
0
    def Predict_image(self,src,noisy=False,max_noise_rms=0.1):
        '''
        Given an input for the source image (and a lens model),
        return an output image corresponding to the lens model raytraced
        image.
        '''
        self.src = src
        xsrc , ysrc = self.raytrace()
        
        xsrc = tf.reshape(xsrc,[-1])
        ysrc = tf.reshape(ysrc,[-1])

        img_pred = self._interpolate(self.src,xsrc,ysrc,[self.numpix_side,self.numpix_side],self.src_res)
        img_pred = tf.reshape(img_pred,[-1,self.numpix_side,self.numpix_side,1])
        
        if self.psf == True:
            #img_pred = batch_conv(img_pred,self.psf_pl)
            img_pred = tf.nn.conv2d(img_pred,self.psf_pl,strides=[1,1,1,1],padding='SAME')
        
        if self.poisson:
            counts = tf.random_uniform([1,1,1,1],minval=100,maxval=1000)
            img_pred = tf.random_poisson(counts*img_pred,[1])[0]/counts

        if noisy ==True:
            noise_rms = tf.random_uniform(shape=[1],minval=max_noise_rms/100.,maxval=max_noise_rms)
            noise = tf.random_normal(tf.shape(img_pred),mean=0.0,stddev = noise_rms)
            img_pred = tf.add(img_pred,noise)
            self.noise_rms = noise_rms
        return img_pred
Пример #5
0
    def _get_histogram_var_by_type(self,
                                   histogram_type,
                                   shape,
                                   name=None,
                                   **kwargs):
        with tf.name_scope(name, "get_hist_{}".format(histogram_type)):
            if histogram_type == "normal":
                # Make a normal distribution, with a shifting mean
                mean = tf.Variable(kwargs['mean'])
                stddev = tf.Variable(kwargs['stddev'])
                return tf.random_normal(
                    shape=shape, mean=mean, stddev=stddev), [mean, stddev]
            elif histogram_type == "gamma":
                # Add a gamma distribution
                alpha = tf.Variable(kwargs['alpha'])
                return tf.random_gamma(shape=shape, alpha=alpha), [alpha]
            elif histogram_type == "poisson":
                lam = tf.Variable(kwargs['lam'])
                return tf.random_poisson(shape=shape, lam=lam), [lam]
            elif histogram_type == "uniform":
                # Add a uniform distribution
                maxval = tf.Variable(kwargs['maxval'])
                return tf.random_uniform(shape=shape, maxval=maxval), [maxval]

            raise Exception('histogram type error %s' % histogram_type,
                            'builtin type', self._histogram_distribute_list)
Пример #6
0
 def add_train_noise_tf(self, x: tf.Tensor) -> tf.Tensor:
     """Defines an operation to add poisson noise to a tensor of an image with a random chi-value
     in the range from 0.001 to the specified max value."""
     chi_rng = tf.random_uniform(shape=[1, 1, 1], minval=0.001, maxval=self.lam_max)
     # Add 0.5 to pixel to make it positive as arg for poisson distribution.
     # Subtract 0.5 at the end to fit in range again.
     return tf.random_poisson(chi_rng*(x+0.5), shape=[])/chi_rng - 0.5
Пример #7
0
	def log_prob(self, z_L, w_L, batch):
		weight_log_prob = tf.reduce_sum([tf.reduce_sum(gamma_log_prob(self.prior_W_alpha, self.prior_W_beta, w)) for w in w_L])
		z_log_prob = gamma_log_prob(self.prior_Z_alpha, self.prior_Z_beta, z_L[0])

		l_prob = weight_log_prob + tf.reduce_sum(z_log_prob)

		layer_wise = [z_log_prob,]

		z_count = 0
		for (z, w) in zip(z_L[:-1], w_L[:-1]):
			z_sum_w = tf.matmul(z, w)
			g_alpha = self.layer_alpha
			g_beta = g_alpha / z_sum_w
			l_p = tf.reduce_sum(gamma_log_prob(g_alpha, g_beta, z_L[z_count + 1]))
			layer_wise.append(l_p)
			l_prob += l_p
			z_count += 1

		obs_sum_w = tf.matmul(z_L[-1], w_L[-1])
		l_prob += tf.reduce_sum(poisson_log_prob(batch, obs_sum_w))

		for i in range(len(layer_wise)):
			tf.summary.scalar("l_p_%i" %(i), tf.reduce_mean(layer_wise[i]))

		#tf.summary.image("batch", tf.transpose(tf.reshape(batch, [320, 64, 64, 1]), [0, 2, 1, 3]), max_outputs = 4)
		#tf.summary.image("obs_sum_w", tf.transpose(tf.reshape(obs_sum_w, [320, 64, 64, 1]), [0, 2, 1, 3]), max_outputs=4)
		if do_images:
			tf.summary.image("sampled_reconstruction", tf.transpose(tf.reshape(tf.minimum(tf.random_poisson(obs_sum_w, [1]), 255.0), [320, 64, 64, 1]), [0, 2, 1, 3]), max_outputs = 4)

		return tf.reduce_sum(l_prob)
Пример #8
0
 def _add_noise(
         x: typing.Union[np.ndarray, tf.Tensor],
         random_state: typing.Optional[np.random.RandomState] = None
 ) -> typing.Union[np.ndarray, tf.Tensor]:
     if random_state is None:
         return tf.squeeze(tf.random_poisson(x, [1]), axis=0)
     else:
         return random_state.poisson(x)
Пример #9
0
    def _sample_y(self):

        # expand dims to account for time and mc dims when applying mapping
        # now (1 x num_samples x num_time_pts x dim_latent)
        z_samples_ex = tf.expand_dims(self.z_samples_prior, axis=0)

        y_means_ls = []  # contribution from latent space
        y_means_lp = []  # contribution from linear predictors
        y_means = []
        for pop, pop_dim in enumerate(self.dim_obs):
            y_means_ls.append(
                tf.squeeze(self.networks[pop].apply_network(
                    z_samples_ex[:, :, :, self.latent_indxs[pop][0]:self.
                                 latent_indxs[pop][-1]]),
                           axis=0))
            if self.num_clusters is not None:
                F = tf.expand_dims(y_means_ls[-1], axis=2)
                y_means_ls[-1] = tf.squeeze(tf.matmul(F, self.mark_probs))
            if self.dim_predictors is not None:
                # append new list for this population
                y_means_lp.append([])
                for pred, pred_dim in enumerate(self.dim_predictors):
                    if self.predictor_indx[pop][pred] is not None:
                        net_out = self.networks_linear[pop][pred]. \
                            apply_network(self.linear_predictors_phs[pred])
                        y_means_lp[-1].append(net_out)
                    # else:
                    #     self.y_pred_lp[-1].append(0.0)
                y_means.append(tf.add(y_means_ls[-1],
                                      tf.add_n(y_means_lp[-1])))
            else:
                y_means.append(y_means_ls[-1])

        # get random samples from observation space
        if self.noise_dist is 'gaussian':
            obs_rand_samples = []
            for pop, pop_dim in enumerate(self.dim_obs):
                obs_rand_samples.append(
                    tf.random_normal(shape=[
                        self.num_samples_ph, self.num_time_pts, pop_dim
                    ],
                                     mean=0.0,
                                     stddev=1.0,
                                     dtype=self.dtype,
                                     name=str('obs_rand_samples_%02i' % pop)))
                self.y_samples_prior.append(
                    y_means[pop] +
                    tf.multiply(obs_rand_samples[pop], self.R_sqrt[pop]))

        elif self.noise_dist is 'poisson':
            for pop, pop_dim in enumerate(self.dim_obs):
                self.y_samples_prior.append(
                    tf.squeeze(tf.random_poisson(lam=y_means[pop],
                                                 shape=[1],
                                                 dtype=self.dtype),
                               axis=0))
Пример #10
0
def random_poisson(lam: Any,
                   shape: Any,
                   dtype: DType = ztypes.float,
                   seed: Any = None,
                   name: Any = None):
    return tf.random_poisson(lam=lam,
                             shape=shape,
                             dtype=dtype,
                             seed=seed,
                             name=name)
Пример #11
0
def run_all(logdir, verbose=False):
    """Generate a bunch of histogram data, and write it to logdir."""
    k = tf.placeholder(tf.float32)

    # Make a normal distribution, with a shifting mean
    mean_moving_normal = tf.random_normal(shape=[1000], mean=(5 * k), stddev=1)
    # Record that distribution into a histogram summary
    tf.summary.histogram("normal/moving_mean", mean_moving_normal)

    # Make a normal distribution with shrinking variance
    variance_shrinking_normal = tf.random_normal(shape=[1000],
                                                 mean=0,
                                                 stddev=1 - (k))
    # Record that distribution too
    tf.summary.histogram("normal/shrinking_variance",
                         variance_shrinking_normal)

    # Let's combine both of those distributions into one dataset
    normal_combined = tf.concat(
        [mean_moving_normal, variance_shrinking_normal], 0)
    # We add another histogram summary to record the combined distribution
    tf.summary.histogram("normal/bimodal", normal_combined)

    # Add a gamma distribution
    gamma = tf.random_gamma(shape=[1000], alpha=k)
    tf.summary.histogram("gamma", gamma)

    # And a poisson distribution
    poisson = tf.random_poisson(shape=[1000], lam=k)
    tf.summary.histogram("poisson", poisson)

    # And a uniform distribution
    uniform = tf.random_uniform(shape=[1000], maxval=k * 10)
    tf.summary.histogram("uniform", uniform)

    # Finally, combine everything together!
    all_distributions = [
        mean_moving_normal, variance_shrinking_normal, gamma, poisson, uniform
    ]
    all_combined = tf.concat(all_distributions, 0)
    tf.summary.histogram("all_combined", all_combined)

    summaries = tf.summary.merge_all()

    # Setup a session and summary writer
    sess = tf.Session()
    writer = tf.summary.FileWriter(logdir)

    # Setup a loop and write the summaries to disk
    N = 400
    for step in xrange(N):
        k_val = step / float(N)
        summ = sess.run(summaries, feed_dict={k: k_val})
        writer.add_summary(summ, global_step=step)
Пример #12
0
 def _get_sinogram_mice(self):
     from ..raw.mice import Dataset
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     with tf.name_scope('mice_sinogram_dataset'):
         dataset = Dataset(self.name / 'mice_sinogram')
         self.register_node('id', dataset['id'])
         bs = dataset.param('batch_size')
         stat = {'mean': dataset.MEAN, 'std': dataset.STD}
         dataset = dataset['sinogram']
         if self.param('with_poission_noise'):
             with tf.name_scope('add_with_poission_noise'):
                 if self.param('low_dose'):
                     ratio = self.param('low_dose_ratio')
                     ratio_norm = 4e6 * bs / ratio
                     dataset = dataset / tf.reduce_sum(dataset) * ratio_norm
                     stat['mean'] = stat['mean'] / ratio
                     stat['std'] = stat['std'] / ratio
                 else:
                     dataset = dataset / tf.reduce_sum(dataset) * 4e6 * bs
                 noise = tf.random_poisson(dataset, shape=[])
                 dataset = tf.concat([noise, dataset], axis=0)
         if self.param('with_white_normalization'):
             dataset = FixWhite(name=self.name / 'fix_white',
                                inputs=dataset,
                                mean=stat['mean'],
                                std=stat['std']).as_tensor()
         dataset = tf.random_crop(dataset, [shape_as_list(dataset)[0]] +
                                  list(self.param('target_shape')) + [1])
         dataset = SuperResolutionDataset(
             self.name / 'super_resolution',
             lambda: {'image': dataset},
             input_key='image',
             nb_down_sample=self.param('nb_down_sample'))
         keys = [
             'image{}x'.format(2**i)
             for i in range(dataset.param('nb_down_sample') + 1)
         ]
         result = dict()
         if self.param('with_poission_noise'):
             result.update({
                 'noise/' + k:
                 dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
                 for k in keys
             })
             result.update({
                 'clean/' + k:
                 dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
                 for k in keys
             })
         else:
             result.update({'clean/' + k: dataset[k] for k in keys})
             result.update({'noise/' + k: dataset[k] for k in keys})
     return result
Пример #13
0
	def estimate_elbo(self, batch):
		elbo_z_L = [z.elbo_sample() for z in self.latent_layers]
		elbo_w_L = [w.elbo_sample() for w in self.weight_layers]
		the_prob = self.log_prob(elbo_z_L, elbo_w_L, batch)

		new_sample = tf.matmul(elbo_z_L[-1], elbo_w_L[-1])

		if do_images:
			tf.summary.image("unconditional_sample", tf.transpose(tf.reshape(tf.minimum(tf.random_poisson(new_sample, [1]), 255.0), [320, 64, 64, 1]), [0, 2, 1, 3]), max_outputs = 4)

		return the_prob + tf.reduce_sum([z.entropy() for z in self.latent_layers]) + tf.reduce_sum([w.entropy() for w in self.weight_layers])
Пример #14
0
 def _get_sinogram_aps(self):
     from ..raw.analytical_phantom_sinogram import Dataset
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     fields = ['sinogram', 'id', 'phantom']
     with tf.name_scope('aps_sinogram_dataset'):
         dataset = Dataset(self.name / 'analytical_phantom_sinogram',
                           fields=fields)
         self.register_node('id', dataset['id'])
         self.register_node('phantom', dataset['phantom'])
         if self.param('log_scale'):
             stat = dataset.LOG_SINO_STAT
         else:
             stat = dataset.SINO_STAT
         dataset = dataset['sinogram']
         if self.param('with_poission_noise'):
             with tf.name_scope('add_with_poission_noise'):
                 noise = tf.random_poisson(dataset, shape=[])
                 dataset = tf.concat([noise, dataset], axis=0)
             if self.param('log_scale'):
                 dataset = tf.log(dataset + 0.4)
         if self.param('with_white_normalization'):
             dataset = FixWhite(name=self.name / 'fix_white',
                                inputs=dataset,
                                mean=stat['mean'],
                                std=stat['std']).as_tensor()
         dataset = tf.random_crop(dataset, [shape_as_list(dataset)[0]] +
                                  list(self.param('target_shape')) + [1])
         dataset = SuperResolutionDataset(
             self.name / 'super_resolution',
             lambda: {'image': dataset},
             input_key='image',
             nb_down_sample=self.param('nb_down_sample'))
         keys = [
             'image{}x'.format(2**i)
             for i in range(dataset.param('nb_down_sample') + 1)
         ]
         result = dict()
         if self.param('with_poission_noise'):
             result.update({
                 'noise/' + k:
                 dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
                 for k in keys
             })
             result.update({
                 'clean/' + k:
                 dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
                 for k in keys
             })
         else:
             result.update({'clean/' + k: dataset[k] for k in keys})
             result.update({'noise/' + k: dataset[k] for k in keys})
     return result
Пример #15
0
def build_denoising_unet(ggg, rgb, p=0.7, is_realnoisy=False):
    _, h, w, c = np.shape(ggg)
    ggg_tensor = tf.identity(ggg)
    rgb_tensor = tf.identity(rgb)
    # stain_tensor = tf.identity(stain)
    # stain_tensor = tf.transpose(stain_tensor, [0, 3, 1, 2])
    is_flip_lr = tf.placeholder(tf.int16)
    is_flip_ud = tf.placeholder(tf.int16)
    ggg_tensor = data_arg(ggg_tensor, is_flip_lr, is_flip_ud)
    rgb_tensor = data_arg(rgb_tensor, is_flip_lr, is_flip_ud)
    response = tf.transpose(ggg_tensor, [0, 3, 1, 2])
    mask_tensor = tf.ones_like(response)
    mask_tensor = tf.nn.dropout(mask_tensor, p) * p
    # mask_tensor = tf.multiply(mask_tensor,stain_tensor)
    response = tf.multiply(mask_tensor, response)
    slice_avg = tf.get_variable('slice_avg',
                                shape=[_, h, w, c],
                                initializer=tf.initializers.zeros())
    if is_realnoisy:
        response = tf.squeeze(tf.random_poisson(25 * response, [1]) / 25, 0)
    response = autoencoder(response,
                           mask_tensor,
                           channel=c,
                           width=w,
                           height=h,
                           p=p)
    response = tf.transpose(response, [0, 2, 3, 1])
    mask_tensor = tf.transpose(mask_tensor, [0, 2, 3, 1])
    data_loss = mask_loss(response, rgb_tensor, 1. - mask_tensor)
    response = data_arg(response, is_flip_lr, is_flip_ud)
    avg_op = slice_avg.assign(slice_avg * 0.99 + response * 0.01)
    our_image = response

    training_error = data_loss
    tf.summary.scalar('data loss', data_loss)

    merged = tf.summary.merge_all()
    saver = tf.train.Saver(max_to_keep=3)
    model = {
        'training_error': training_error,
        'data_loss': data_loss,
        'saver': saver,
        'summary': merged,
        'our_image': our_image,
        'is_flip_lr': is_flip_lr,
        'is_flip_ud': is_flip_ud,
        'avg_op': avg_op,
        'slice_avg': slice_avg,
    }

    return model
Пример #16
0
 def _sample_n(self, n, seed=None):
     # Here we use the fact that if:
     # lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
     # then X ~ Poisson(lam) is Negative Binomially distributed.
     rate = tf.random_gamma(shape=[n],
                            alpha=self.total_count,
                            beta=tf.exp(-self.logits),
                            dtype=self.dtype,
                            seed=seed)
     return tf.random_poisson(rate,
                              shape=[],
                              dtype=self.dtype,
                              seed=distribution_util.gen_new_seed(
                                  seed, "negative_binom"))
Пример #17
0
 def _sample_n(self, n, seed=None):
     # Here we use the fact that if:
     # lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
     # then X ~ Poisson(lam) is Negative Binomially distributed.
     stream = seed_stream.SeedStream(seed, salt="NegativeBinomial")
     rate = tf.random_gamma(shape=[n],
                            alpha=self.total_count,
                            beta=tf.exp(-self.logits),
                            dtype=self.dtype,
                            seed=stream())
     return tf.random_poisson(rate,
                              shape=[],
                              dtype=self.dtype,
                              seed=stream())
Пример #18
0
 def _sample_n(self, n, seed=None):
   # Here we use the fact that if:
   # lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
   # then X ~ Poisson(lam) is Negative Binomially distributed.
   rate = tf.random_gamma(
       shape=[n],
       alpha=self.total_count,
       beta=tf.exp(-self.logits),
       dtype=self.dtype,
       seed=seed)
   return tf.random_poisson(
       rate,
       shape=[],
       dtype=self.dtype,
       seed=distribution_util.gen_new_seed(seed, "negative_binom"))
Пример #19
0
 def _sample_n(self, n, seed=None):
   # Here we use the fact that if:
   # lam ~ Gamma(concentration=total_count, rate=(1-probs)/probs)
   # then X ~ Poisson(lam) is Negative Binomially distributed.
   stream = seed_stream.SeedStream(seed, salt="NegativeBinomial")
   rate = tf.random_gamma(
       shape=[n],
       alpha=self.total_count,
       beta=tf.exp(-self.logits),
       dtype=self.dtype,
       seed=stream())
   return tf.random_poisson(
       rate,
       shape=[],
       dtype=self.dtype,
       seed=stream())
Пример #20
0
def analytical_super_resolution_dataset(image_type: str,
                                        poission_noise: bool,
                                        batch_size: int,
                                        nb_down_sample: int,
                                        target_shape: typing.List[int],
                                        idxs: typing.List[int],
                                        *,
                                        name: str = 'dataset',
                                        path_dataset: "str|None" = None):
    """
    Args:
        -   image_type: 'sinogram' or 'image'
        -   batch_size
    Returns:
        a `Graph` object, which has several nodes:
    Raises:
    """
    from ...model.normalizer.normalizer import FixWhite, ReduceSum
    from ..super_resolution import SuperResolutionDataset
    from ...model.tensor import ShapeEnsurer
    from ...config import config
    normalizer_configs = {'analytical_phantoms': {'mean': 4.88, 'std': 4.68}}
    config_origin = {}
    config_normalizer = normalizer_configs[dataset_name]
    config['dataset'] = {
        'origin': config_origin,
        'fix_white': config_normalizer
    }
    with tf.name_scope('{img_type}_dataset'.format(img_type=image_type)):
        if image_type == 'sinogram':
            dataset_origin = AnalyticalPhantomSinogramDataset(
                name=name, batch_size=batch_size, fields=['sinogram'], idxs)
        dataset_summed = ReduceSum('dataset/reduce_sum',
                                   dataset_origin[image_type],
                                   fixed_summation_value=1e6).as_tensor()

        dataset = FixWhite(name='dataset/fix_white', inputs=dataset_summed)()
        if poission_noise:
            with tf.name_scope('add_poission_noise'):
                dataset = tf.random_poisson(dataset, shape=[])
        dataset = tf.random_crop(dataset,
                                 [batch_size] + list(target_shape) + [1])
        dataset = SuperResolutionDataset('dataset/super_resolution',
                                         lambda: {'image': dataset},
                                         input_key='image',
                                         nb_down_sample=3)
    return dataset
Пример #21
0
 def _get_sinogram_external(self):
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     with tf.name_scope('external_dataset'):
         dataset = self._make_input_place_holder()
         self.register_node('external_place_holder', dataset)
         if self.param('with_poission_noise'):
             with tf.name_scope('add_with_poission_noise'):
                 noise = tf.random_poisson(dataset, shape=[])
                 dataset = tf.concat([noise, dataset], axis=0)
         if self.param('with_white_normalization'):
             dataset = FixWhite(name=self.name / 'fix_white',
                                inputs=dataset,
                                mean=self.param('mean'),
                                std=self.param('std')).as_tensor()
         if self.param('with_random_crop'):
             dataset = tf.random_crop(dataset, self._target_shape(dataset))
         dataset = SuperResolutionDataset(
             self.name / 'super_resolution',
             lambda: {'image': dataset},
             input_key='image',
             nb_down_sample=self.param('nb_down_sample'))
         keys = [
             'image{}x'.format(2**i)
             for i in range(dataset.param('nb_down_sample') + 1)
         ]
         result = dict()
         if self.param('with_poission_noise'):
             result.update({
                 'noise/' + k:
                 dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
                 for k in keys
             })
             result.update({
                 'clean/' + k:
                 dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
                 for k in keys
             })
         else:
             result.update({'clean/' + k: dataset[k] for k in keys})
             result.update({'noise/' + k: dataset[k] for k in keys})
     return result
Пример #22
0
  def _sample_n(self, n, seed=None):
    # Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
    # ids as a [n]-shaped vector.
    batch_size = self.batch_shape.num_elements()
    if batch_size is None:
      batch_size = tf.reduce_prod(self.batch_shape_tensor())
    # We need to "sample extra" from the mixture distribution if it doesn't
    # already specify a probs vector for each batch coordinate.
    # We only support this kind of reduced broadcasting, i.e., there is exactly
    # one probs vector for all batch dims or one for each.
    stream = seed_stream.SeedStream(
        seed, salt="PoissonLogNormalQuadratureCompound")
    ids = self._mixture_distribution.sample(
        sample_shape=concat_vectors(
            [n],
            distribution_util.pick_vector(
                self.mixture_distribution.is_scalar_batch(),
                [batch_size],
                np.int32([]))),
        seed=stream())
    # We need to flatten batch dims in case mixture_distribution has its own
    # batch dims.
    ids = tf.reshape(
        ids,
        shape=concat_vectors([n],
                             distribution_util.pick_vector(
                                 self.is_scalar_batch(), np.int32([]),
                                 np.int32([-1]))))

    # Stride `quadrature_size` for `batch_size` number of times.
    offset = tf.range(
        start=0,
        limit=batch_size * self._quadrature_size,
        delta=self._quadrature_size,
        dtype=ids.dtype)
    ids += offset
    rate = tf.gather(tf.reshape(self.distribution.rate, shape=[-1]), ids)
    rate = tf.reshape(
        rate, shape=concat_vectors([n], self.batch_shape_tensor()))
    return tf.random_poisson(lam=rate, shape=[], dtype=self.dtype, seed=seed)
Пример #23
0
    def _sample_n(self, n, seed=None):
        # Get ids as a [n, batch_size]-shaped matrix, unless batch_shape=[] then get
        # ids as a [n]-shaped vector.
        batch_size = self.batch_shape.num_elements()
        if batch_size is None:
            batch_size = tf.reduce_prod(self.batch_shape_tensor())
        # We need to "sample extra" from the mixture distribution if it doesn't
        # already specify a probs vector for each batch coordinate.
        # We only support this kind of reduced broadcasting, i.e., there is exactly
        # one probs vector for all batch dims or one for each.
        stream = seed_stream.SeedStream(
            seed, salt="PoissonLogNormalQuadratureCompound")
        ids = self._mixture_distribution.sample(sample_shape=concat_vectors(
            [n],
            distribution_util.pick_vector(
                self.mixture_distribution.is_scalar_batch(), [batch_size],
                np.int32([]))),
                                                seed=stream())
        # We need to flatten batch dims in case mixture_distribution has its own
        # batch dims.
        ids = tf.reshape(ids,
                         shape=concat_vectors([n],
                                              distribution_util.pick_vector(
                                                  self.is_scalar_batch(),
                                                  np.int32([]),
                                                  np.int32([-1]))))

        # Stride `quadrature_size` for `batch_size` number of times.
        offset = tf.range(start=0,
                          limit=batch_size * self._quadrature_size,
                          delta=self._quadrature_size,
                          dtype=ids.dtype)
        ids += offset
        rate = tf.gather(tf.reshape(self.distribution.rate, shape=[-1]), ids)
        rate = tf.reshape(rate,
                          shape=concat_vectors([n], self.batch_shape_tensor()))
        return tf.random_poisson(lam=rate,
                                 shape=[],
                                 dtype=self.dtype,
                                 seed=seed)
Пример #24
0
def fixed_row_histogram():
    k = tf.placeholder(tf.float32)
    shape_s = 20
    t = tf.random_uniform([shape_s, 40], maxval=k * 1)
    # t = tf.Variable([])
    i = 0
    hist = tf.constant(0, shape=[0, shape_s], dtype=tf.int32)
    cond = lambda i, _: i < shape_s

    def loop_body(i, hist):
        h = tf.histogram_fixed_width(t[i, :], [0.0, 10.0], nbins=shape_s)
        return i + 1, tf.concat([hist, tf.expand_dims(h, 0)], axis=0)

    i, hist = tf.while_loop(
        cond,
        loop_body, [i, hist],
        shape_invariants=[tf.TensorShape([]),
                          tf.TensorShape([None, shape_s])])

    tf.summary.histogram("hist", hist)

    poisson = tf.random_poisson(shape=[1000], lam=k)
    tf.summary.histogram("poisson", poisson)

    # sess = tf.InteractiveSession()
    # print(hist.eval())

    summaries = tf.summary.merge_all()

    # Setup a session and summary writer
    sess = tf.Session()
    writer = tf.summary.FileWriter(LOGDIR)

    cols = np.arange(0., 100., 10)
    # Setup a loop and write the summaries to disk
    N = 20
    for step in range(N):
        k_val = step / float(N)
        summ = sess.run(summaries, feed_dict={k: k_val})
        writer.add_summary(summ, global_step=step)
Пример #25
0
 def predict_f(self, mu, sigma, phi_h_decoder, n_sampling):
     f_predicted = 0
     for i in range(n_sampling):
         epsilon_sampling = tf.random_normal(tf.shape(mu), mean=0.0, stddev=1.0, dtype=tf.float32)
         z_sampling = tf.multiply(epsilon_sampling, sigma) + mu
         with tf.variable_scope("Decoder"):
             with tf.variable_scope("Phi_z"):
                 phi_z_decoder_sampling = tf.layers.dense(z_sampling,
                                                          self.n_phi_z_decoder,
                                                          name='layer1',
                                                          activation=tf.nn.relu,
                                                          reuse=True)
             with tf.variable_scope("Lambda_f"):
                 lmbda_sampling = tf.layers.dense(tf.concat([phi_h_decoder, phi_z_decoder_sampling], axis=1),
                                                  1,
                                                  name='layer1',
                                                  activation=tf.nn.softplus,
                                                  reuse=True)
         with tf.variable_scope("F"):
             f_predicted += tf.random_poisson(lmbda_sampling, dtype=tf.float32, shape=[])
     f_predicted = f_predicted / n_sampling
     return f_predicted
Пример #26
0
def poisson_gauss_tf(img_batch, a, gauss_var, clip=(0., 1.)):
    # Apply poissonian-gaussian noise model following A.Foi et al.
    # Foi, A., "Practical denoising of clipped or overexposed noisy images",
    # Proc. 16th European Signal Process. Conf., EUSIPCO 2008, Lausanne, Switzerland, August 2008.
    batch_shape = tf.shape(img_batch)

    a_p = a[:, None, None, None]
    out = tf.random_poisson(
        shape=[], lam=tf.maximum(img_batch / a_p, 0.0), dtype=tf.float32) * a_p
    #out = tf.Print(out, [tf.reduce_max(out), tf.reduce_min(out)])
    gauss_var = tf.maximum(gauss_var, 0.0)

    gauss_noise = tf.sqrt(gauss_var[:, None, None, None]) * tf.random_normal(
        shape=batch_shape, dtype=tf.float32)  #Gaussian component

    out += gauss_noise

    # Clipping
    if clip is not None:
        out = tf.clip_by_value(out, clip[0], clip[1])

    # Return the simulated image
    return out
Пример #27
0
def make_batch_hqjitter(patches, burst_length, batch_size, repeats, height, width,
                        to_shift, upscale, jitter, smalljitter):
    # patches is [burst_length, h_up, w_up, 1]
    j_up = jitter * upscale
    h_up = height * upscale  # + 2 * j_up
    w_up = width * upscale  # + 2 * j_up

    bigj_patches = patches
    # print ('bigj_patches: ', bigj_patches.shape)
    delta_up = (jitter - smalljitter) * upscale
    smallj_patches = patches[:, delta_up:-delta_up, delta_up:-delta_up, ...]

    unique = batch_size//repeats
    batch = []
    for i in range(unique):
        for j in range(repeats):
            curr = [patches[i, j_up:-j_up, j_up:-j_up, :]]
            prob = tf.minimum(tf.cast(tf.random_poisson(
                1.5, []), tf.float32)/burst_length, 1.)
            for k in range(burst_length - 1):
                flip = tf.random_uniform([])
                p2use = tf.cond(flip < prob, lambda: bigj_patches,
                                lambda: smallj_patches)
                curr.append(tf.random_crop(p2use[i, ...], [h_up, w_up, 1]))
            # new added see comment below
            curr = [tf.random_crop(item, [height, width, 1]) for item in curr]
            curr = tf.stack(curr, axis=0)
            # original implementation
            # curr = tf.image.resize_images(
            #     curr, [height, width], method=tf.image.ResizeMethod.AREA)
            # use crop inside instead for raw
            curr = tf.transpose(curr, [1, 2, 3, 0])
            batch.append(curr)
    batch = tf.stack(batch, axis=0)
    # print ('batch_inside_make_batch_hqjitter shape: ', batch.shape)
    return batch
Пример #28
0
def run_all(logdir, verbose=False):
  """Generate a bunch of histogram data, and write it to logdir."""
  del verbose

  tf.set_random_seed(0)

  k = tf.placeholder(tf.float32)

  # Make a normal distribution, with a shifting mean
  mean_moving_normal = tf.random_normal(shape=[1000], mean=(5*k), stddev=1)
  # Record that distribution into a histogram summary
  histogram_summary.op("normal/moving_mean",
                       mean_moving_normal,
                       description="A normal distribution whose mean changes "
                                   "over time.")

  # Make a normal distribution with shrinking variance
  shrinking_normal = tf.random_normal(shape=[1000], mean=0, stddev=1-(k))
  # Record that distribution too
  histogram_summary.op("normal/shrinking_variance", shrinking_normal,
                       description="A normal distribution whose variance "
                                   "shrinks over time.")

  # Let's combine both of those distributions into one dataset
  normal_combined = tf.concat([mean_moving_normal, shrinking_normal], 0)
  # We add another histogram summary to record the combined distribution
  histogram_summary.op("normal/bimodal", normal_combined,
                       description="A combination of two normal distributions, "
                                   "one with a moving mean and one with  "
                                   "shrinking variance. The result is a "
                                   "distribution that starts as unimodal and "
                                   "becomes more and more bimodal over time.")

  # Add a gamma distribution
  gamma = tf.random_gamma(shape=[1000], alpha=k)
  histogram_summary.op("gamma", gamma,
                       description="A gamma distribution whose shape "
                                   "parameter, α, changes over time.")

  # And a poisson distribution
  poisson = tf.random_poisson(shape=[1000], lam=k)
  histogram_summary.op("poisson", poisson,
                       description="A Poisson distribution, which only "
                                   "takes on integer values.")

  # And a uniform distribution
  uniform = tf.random_uniform(shape=[1000], maxval=k*10)
  histogram_summary.op("uniform", uniform,
                       description="A simple uniform distribution.")

  # Finally, combine everything together!
  all_distributions = [mean_moving_normal, shrinking_normal,
                       gamma, poisson, uniform]
  all_combined = tf.concat(all_distributions, 0)
  histogram_summary.op("all_combined", all_combined,
                       description="An amalgamation of five distributions: a "
                                   "uniform distribution, a gamma "
                                   "distribution, a Poisson distribution, and "
                                   "two normal distributions.")

  summaries = tf.summary.merge_all()

  # Setup a session and summary writer
  sess = tf.Session()
  writer = tf.summary.FileWriter(logdir)

  # Setup a loop and write the summaries to disk
  N = 400
  for step in xrange(N):
    k_val = step/float(N)
    summ = sess.run(summaries, feed_dict={k: k_val})
    writer.add_summary(summ, global_step=step)
Пример #29
0
 def _process_sinogram(self, dataset):
     from ...model.normalizer.normalizer import ReduceSum, FixWhite
     from dxpy.learn.model.normalizer import FixWhite
     from ..super_resolution import SuperResolutionDataset
     from ...utils.tensor import shape_as_list
     if self.param('log_scale'):
         stat = dataset.LOG_SINO_STAT
     else:
         stat = dataset.SINO_STAT
     # dataset = ReduceSum(self.name / 'reduce_sum', dataset['sinogram'],
     # fixed_summation_value=1e6).as_tensor()
     if 'phantom' in dataset:
         phan = dataset['phantom']
     else:
         phan = None
     dataset = dataset['sinogram']
     if self.param('with_poission_noise'):
         with tf.name_scope('add_with_poission_noise'):
             noise = tf.random_poisson(dataset, shape=[])
             dataset = tf.concat([noise, dataset], axis=0)
     if self.param('log_scale'):
         dataset = tf.log(dataset + 0.4)
     if self.param('with_white_normalization'):
         dataset = FixWhite(name=self.name / 'fix_white',
                            inputs=dataset,
                            mean=stat['mean'],
                            std=stat['std']).as_tensor()
     # random phase shift
     # if self.param('with_phase_shift'):
     #     phase_view = tf.random_uniform(
     #         [], 0, shape_as_list(dataset)[1], dtype=tf.int64)
     #     dataset_l = dataset[:, phase_view:, :, :]
     #     dataset_r = dataset[:, :phase_view, :, :]
     #     dataset = tf.concat([dataset_l, dataset_r], axis=1)
     dataset = tf.random_crop(dataset, [shape_as_list(dataset)[0]] +
                              list(self.param('target_shape')) + [1])
     dataset = SuperResolutionDataset(
         self.name / 'super_resolution',
         lambda: {'image': dataset},
         input_key='image',
         nb_down_sample=self.param('nb_down_sample'))
     keys = [
         'image{}x'.format(2**i)
         for i in range(dataset.param('nb_down_sample') + 1)
     ]
     if self.param('with_poission_noise'):
         result = {
             'input/' + k: dataset[k][:shape_as_list(dataset[k])[0] // 2,
                                      ...]
             for k in keys
         }
         result.update({
             'label/' + k: dataset[k][shape_as_list(dataset[k])[0] // 2:,
                                      ...]
             for k in keys
         })
     else:
         result = {'input/' + k: dataset[k] for k in keys}
         result.update({'label/' + k: dataset[k] for k in keys})
     result.update({
         'noise/' + k: dataset[k][:shape_as_list(dataset[k])[0] // 2, ...]
         for k in keys
     })
     result.update({
         'clean/' + k: dataset[k][shape_as_list(dataset[k])[0] // 2:, ...]
         for k in keys
     })
     if phan is not None:
         result.update({'phantom': phan})
     return result
Пример #30
0
 def _sample_n(self, n, seed=None):
   return tf.random_poisson(self.rate, [n], dtype=self.dtype, seed=seed)
# YOUR CODE

###############################################################################
# 1e: Create a diagnoal 2-d tensor of size 6 x 6 with the diagonal values of 1,
# 2, ..., 6
# Hint: Use tf.range() and tf.diag().
###############################################################################
x = tf.diag(tf.range(start=1, limit=6))
# YOUR CODE

###############################################################################
# 1f: Create a random 2-d tensor of size 10 x 10 from any distribution.
# Calculate its determinant.
# Hint: Look at tf.matrix_determinant().
###############################################################################
x = tf.random_poisson([10, 10])
# YOUR CODE

###############################################################################
# 1g: Create tensor x with value [5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9].
# Return the unique elements in x
# Hint: use tf.unique(). Keep in mind that tf.unique() returns a tuple.
###############################################################################

x = tf.constant([5, 2, 3, 5, 10, 6, 2, 3, 4, 2, 1, 1, 0, 9])
out, indexes = tf.unique(x)
# YOUR CODE

###############################################################################
# 1h: Create two tensors x and y of shape 300 from any normal distribution,
# as long as they are from the same distribution.
Пример #32
0
    def __init__(self, is_training=True):
        self.graph = tf.Graph()
        with self.graph.as_default():
            if is_training:
                self.x, self.y, self.xloc, self.yloc, self.m, self.num_batch = get_batch_data(
                )  # (N, T)
            else:  # inference
                self.x = tf.placeholder(tf.int32, shape=(None, hp.x_maxlen))
                self.y = tf.placeholder(tf.int32, shape=(None, hp.y_maxlen))
                self.xloc = tf.placeholder(tf.int32, shape=(None, hp.x_maxlen))
                self.yloc = tf.placeholder(tf.int32, shape=(None, hp.y_maxlen))
                self.m = tf.placeholder(tf.int32, shape=(None, hp.x_maxlen))

            # define decoder inputs
            self.decoder_inputs = tf.concat(
                (tf.ones_like(self.y[:, :1]) * 2, self.y[:, :-1]), -1)  # 2:<S>

            # Load vocabulary
            src2idx, idx2src = load_src_vocab()
            des2idx, idx2des = load_des_vocab()

            self.hidden_units = hp.hidden_units

            # Encoder
            with tf.variable_scope("encoder"):
                ## Embedding
                self.enc = embedding(self.x,
                                     vocab_size=len(src2idx),
                                     num_units=self.hidden_units,
                                     scale=True,
                                     scope="enc_embed")
                clue_level = tf.random_poisson(shape=[1],
                                               lam=1,
                                               dtype=tf.int32)
                #clue_level = tf.Print(clue_level, [clue_level])
                #self.enc_mask = tf.expand_dims(tf.cast(tf.equal(self.m, 1), tf.float32), 2)
                self.enc_mask = tf.expand_dims(
                    tf.cast(
                        tf.logical_and(tf.greater_equal(self.m, 1),
                                       tf.less_equal(self.m, clue_level)),
                        tf.float32), 2)
                self.enc = tf.concat([self.enc, self.enc_mask], axis=2)
                self.hidden_units += 1

                ## Positional Encoding
                if hp.sinusoid:
                    self.enc += positional_encoding(
                        self.x,
                        num_units=self.hidden_units,
                        zero_pad=False,
                        scale=False,
                        scope="enc_pe")
                else:
                    self.enc += embedding(tf.tile(
                        tf.expand_dims(tf.range(tf.shape(self.x)[1]), 0),
                        [tf.shape(self.x)[0], 1]),
                                          vocab_size=hp.x_maxlen,
                                          num_units=self.hidden_units,
                                          zero_pad=False,
                                          scale=False,
                                          scope="enc_pe")

                tf.add_to_collection('explain_input', self.enc)

                ## Dropout
                self.enc = tf.layers.dropout(
                    self.enc,
                    rate=hp.dropout_rate,
                    training=tf.convert_to_tensor(is_training))

                ## Blocks
                for i in range(hp.num_blocks):
                    with tf.variable_scope("num_blocks_{}".format(i)):
                        ### Multihead Attention
                        self.enc = multihead_attention(
                            queries=self.enc,
                            keys=self.enc,
                            num_units=self.hidden_units,
                            num_heads=hp.num_heads,
                            dropout_rate=hp.dropout_rate,
                            is_training=is_training,
                            causality=False)

                        ### Feed Forward
                        self.enc = feedforward(self.enc,
                                               num_units=[
                                                   4 * self.hidden_units,
                                                   self.hidden_units
                                               ])

            # Decoder
            with tf.variable_scope("decoder"):
                ## Embedding
                self.dec = embedding(self.decoder_inputs,
                                     vocab_size=len(des2idx),
                                     num_units=self.hidden_units,
                                     scale=True,
                                     scope="dec_embed")

                ## Positional Encoding
                if hp.sinusoid:
                    self.dec += positional_encoding(
                        self.decoder_inputs,
                        vocab_size=hp.y_maxlen,
                        num_units=self.hidden_units,
                        zero_pad=False,
                        scale=False,
                        scope="dec_pe")
                else:
                    self.dec += embedding(tf.tile(
                        tf.expand_dims(
                            tf.range(tf.shape(self.decoder_inputs)[1]), 0),
                        [tf.shape(self.decoder_inputs)[0], 1]),
                                          vocab_size=hp.y_maxlen,
                                          num_units=self.hidden_units,
                                          zero_pad=False,
                                          scale=False,
                                          scope="dec_pe")

                tf.add_to_collection('explain_input', self.dec)
                ## Dropout
                self.dec = tf.layers.dropout(
                    self.dec,
                    rate=hp.dropout_rate,
                    training=tf.convert_to_tensor(is_training))

                ## Blocks
                for i in range(hp.num_blocks):
                    with tf.variable_scope("num_blocks_{}".format(i)):
                        ## Multihead Attention ( self-attention)
                        self.dec = multihead_attention(
                            queries=self.dec,
                            keys=self.dec,
                            num_units=self.hidden_units,
                            num_heads=hp.num_heads,
                            dropout_rate=hp.dropout_rate,
                            is_training=is_training,
                            causality=True,
                            scope="self_attention")

                        ## Multihead Attention ( vanilla attention)
                        self.dec = multihead_attention(
                            queries=self.dec,
                            keys=self.enc,
                            num_units=self.hidden_units,
                            num_heads=hp.num_heads,
                            dropout_rate=hp.dropout_rate,
                            is_training=is_training,
                            causality=False,
                            scope="vanilla_attention")

                        ## Feed Forward
                        with tf.variable_scope(
                                "num_blocks_fc_dec_{}".format(i)):
                            self.dec = feedforward(self.dec,
                                                   num_units=[
                                                       4 * self.hidden_units,
                                                       self.hidden_units
                                                   ])

            self.loc_enc = self.enc
            self.loc_logits = attention_matrix(queries=self.loc_enc,
                                               keys=self.dec,
                                               num_units=self.hidden_units,
                                               dropout_rate=hp.dropout_rate,
                                               is_training=is_training,
                                               causality=False,
                                               scope="copy_matrix")

            xloc_vec = tf.one_hot(self.xloc,
                                  depth=hp.y_maxlen,
                                  dtype=tf.float32)
            yloc_vec = tf.one_hot(self.yloc,
                                  depth=hp.y_maxlen,
                                  dtype=tf.float32)
            loc_label = tf.matmul(yloc_vec, tf.transpose(xloc_vec, [0, 2, 1]))
            self.loc_label_history = tf.cumsum(loc_label,
                                               axis=1,
                                               exclusive=True)

            # Final linear projection
            self.loc_logits = tf.transpose(self.loc_logits, [0, 2, 1])

            self.loc_logits = tf.stack(
                [self.loc_logits, self.loc_label_history], axis=3)
            self.loc_logits = tf.squeeze(tf.layers.dense(self.loc_logits, 1),
                                         axis=[3])

            x_masks = tf.tile(tf.expand_dims(tf.equal(self.x, 0), 1),
                              [1, hp.y_maxlen, 1])
            #y_masks = tf.tile(tf.expand_dims(tf.equal(self.y, 0), -1), [1, 1, hp.x_maxlen])
            paddings = tf.ones_like(self.loc_logits) * (-1e6)
            self.loc_logits = tf.where(x_masks, paddings,
                                       self.loc_logits)  # (N, T_q, T_k)
            #self.loc_logits = tf.where(y_masks, paddings, self.loc_logits) # (N, T_q, T_k)
            self.logits = tf.layers.dense(self.dec, len(des2idx))
            self.final_logits = tf.concat([self.logits, self.loc_logits],
                                          axis=2)
            tf.add_to_collection('explain_output', self.final_logits)
            #self.final_logits = tf.Print(self.final_logits, [self.final_logits[0][0][-3:]], message="final_logits_last")
            #self.final_logits = tf.Print(self.final_logits, [self.final_logits[0][0][:3]], message="final_logits_first")

            self.preds = tf.to_int32(tf.argmax(self.final_logits, axis=-1))
            self.istarget = tf.to_float(tf.not_equal(self.y, 0))

            if is_training:
                label = tf.one_hot(self.y,
                                   depth=len(des2idx),
                                   dtype=tf.float32)
                # A special case, when copy is open, we should not need unk label
                unk_pos = label[:, :, 1]
                copy_pos = tf.sign(tf.reduce_sum(loc_label, axis=2))
                fix_pos = unk_pos * copy_pos
                #fix_pos = tf.Print(fix_pos, [tf.reduce_sum(unk_pos, axis=-1), tf.shape(unk_pos)], message="\nunk_pos", summarize=16)
                #fix_pos = tf.Print(fix_pos, [tf.reduce_sum(fix_pos, axis=-1), tf.shape(fix_pos)], message="\nfix_pos", summarize=16)
                fix_label = tf.expand_dims(label[:, :, 1] - fix_pos, axis=2)
                label = tf.concat(
                    [label[:, :, :1], fix_label, label[:, :, 2:]], axis=-1)

                self.final_label = tf.concat([label, loc_label], axis=2)
                #self.final_label = tf.Print(self.final_label, [self.final_label[0][0][-3:]], message="final_label")
                # Loss
                self.min_logit_loc = min_logit_loc = tf.argmax(
                    self.final_logits + (-1e6) * (1.0 - self.final_label),
                    axis=-1)
                #min_logit_loc = tf.Print(min_logit_loc, [min_logit_loc[0]], message="min_logit_loc")
                self.min_label = tf.one_hot(min_logit_loc,
                                            depth=len(des2idx) + hp.x_maxlen,
                                            dtype=tf.float32)

                vocab_count = len(des2idx) + hp.x_maxlen - tf.reduce_sum(
                    tf.cast(tf.equal(self.x, 0), dtype=tf.int32), axis=-1)
                #vocab_count = tf.Print(vocab_count, [vocab_count[0]], message="vocab_count")
                self.y_smoothed = label_smoothing_mask(self.min_label,
                                                       vocab_count)
                #self.final_logits = tf.Print(self.final_logits, [self.final_logits[0][1][min_logit_loc[0][1]]], message="final_logits")
                #self.y_smoothed = tf.Print(self.y_smoothed, [self.y_smoothed[0][1][min_logit_loc[0][1]]], message="y_smoothed")
                self.loss = tf.nn.softmax_cross_entropy_with_logits_v2(
                    logits=self.final_logits, labels=self.y_smoothed)
                #self.loss = tf.Print(self.loss, [self.final_label[0][1][min_logit_loc[0][1]]], message="final_label")
                #self.loss = tf.Print(self.loss, [self.loss[0][-3:]], message="loss_last")
                #self.loss = tf.Print(self.loss, [self.loss[0][:3]], message="loss_first")
                self.mean_loss = tf.reduce_sum(
                    self.loss * self.istarget) / (tf.reduce_sum(self.istarget))

                # Training Scheme
                self.global_step = tf.Variable(0,
                                               name='global_step',
                                               trainable=False)
                self.optimizer = tf.train.AdamOptimizer(learning_rate=hp.lr,
                                                        beta1=0.9,
                                                        beta2=0.98,
                                                        epsilon=1e-8)
                self.train_op = self.optimizer.minimize(
                    self.mean_loss, global_step=self.global_step)

                # Summary
                tf.summary.scalar('mean_loss', self.mean_loss)
                self.merged = tf.summary.merge_all()
Пример #33
0
mean_moving_normal = tf.random_normal(shape=[1000], mean=(5 * k), stddev=1)
tf.summary.histogram('normal/moving_mean', mean_moving_normal)

variance_shrinking_normal = tf.random_normal(shape=[1000],
                                             mean=0,
                                             stddev=1 - k)
tf.summary.histogram('normal/shrinking_variance', variance_shrinking_normal)

normal_combined = tf.concat([mean_moving_normal, variance_shrinking_normal], 0)
tf.summary.histogram('normal/bimodal', normal_combined)

gamma = tf.random_gamma(shape=[1000], alpha=k)
tf.summary.histogram('gamma', gamma)

poisson = tf.random_poisson(shape=[1000], lam=k)
tf.summary.histogram('poisson', poisson)

uniform = tf.random_uniform(shape=[1000], maxval=k * 10)
tf.summary.histogram('uniform', uniform)

all_distributions = [
    mean_moving_normal, variance_shrinking_normal, normal_combined, gamma,
    poisson, uniform
]
all_combined = tf.concat(all_distributions, 0)
tf.summary.histogram('all_combined', all_combined)

sess = tf.Session()

if tf.gfile.Exists('/tmp/histogram_example'):
Пример #34
0
 def add_train_noise_tf(self, x):
     chi_rng = tf.random_uniform(shape=[1, 1, 1],
                                 minval=0.001,
                                 maxval=self.lam_max)
     return tf.random_poisson(chi_rng * (x + 0.5), shape=[]) / chi_rng - 0.5
Пример #35
0
from typing import Dict, Any, Union
import matplotlib.pyplot as plt
import tensorflow as tf

a = tf.add(2, 5)
b = tf.multiply(a, 3)

sess = tf.Session()
replace_dict: Dict[Union[object, Any], int] = {a: 15}
print(sess.run(b, feed_dict=replace_dict))

plt.subplot(211)
num_list = tf.random_normal([2, 20])
out = sess.run(num_list)
x, y = out
plt.scatter(x, y)
#plt.show()
num_list_1 = tf.random_poisson([6, 20], [2, 20])
out1 = sess.run(num_list_1)
x1, y1 = out1
plt.subplot(212)
plt.scatter(x1, y1, c='red', label='red', alpha=0.35, edgecolors='black')
plt.show()