示例#1
0
    def __init__(self, params):

        self.params = params
        dist = self.params.noise['distribution']
        loc_normal = self.params.noise['loc_normal']
        scale_normal = self.params.noise['scale_normal']
        scale_uniform = self.params.noise['scale_uniform']
        low = self.params.noise['low']
        high = self.params.noise['high']

        if dist == "normal":
            self.noise = normal.Normal(loc_normal, scale_normal)
        elif dist == 'uniform':
            self.noise = Uniform(low, high, scale_uniform)
        elif dist == 'uniform+normal':
            noise1 = normal.Normal(loc_normal, scale_normal)
            noise2 = Uniform(low, high, scale_uniform)
            self.noise = MixtureNoise(noise1, noise2, method='sum')
        elif dist == 'mix_rand_uniform_normal':
            noise1 = normal.Normal(loc_normal, scale_normal)
            noise2 = Uniform(low, high, scale_uniform)
            self.noise = MixtureNoise(noise1, noise2, method='rand')
        else:
            raise ValueError('Noise not recognized.')
        logging.info('Noise Injection {}'.format(dist))
示例#2
0
文件: loss.py 项目: kushal-sa/VAE
def log_prob_ratio_normal(z, mu_z_prior, logstd_z_prior, mu_z, logstd_z):

    return (-normal.Normal(mu_z_prior.flatten(start_dim=1),
                           logstd_z_prior.exp().flatten(start_dim=1)).log_prob(
                               z.flatten(start_dim=1)) +
            normal.Normal(mu_z.flatten(start_dim=1),
                          logstd_z.exp().flatten(start_dim=1)).log_prob(
                              z.flatten(start_dim=1))).sum(1).mean()
示例#3
0
    def down(self, input, sample=False):
        bs = input.shape[0]
        device = input.device
        x = self.celu(input)
        x = self.down_conv_a(x)

        pz_mean, pz_std, rz_mean, rz_std, down_context, h_det = x.split(
            [1] * 5 + [self.args.h_channel], 1)
        pz_std = torch.nn.functional.softplus(pz_std)
        rz_std = torch.nn.functional.softplus(rz_std)
        prior = normal.Normal(loc=pz_mean, scale=pz_std + 1e-4)

        if sample:
            z = prior.rsample()
            kl = torch.zeros(bs).to(device)
        else:
            posterior = normal.Normal(loc=rz_mean + self.qz_mean,
                                      scale=rz_std + self.qz_std + 1e-4)

            hard_encode = rz_mean + self.qz_mean

            z = posterior.rsample()
            logqs = posterior.log_prob(z)
            context = self.up_context + down_context

            if self.iaf:
                x = self.down_ar_conv(z, context)
                arw_mean, arw_std = x[0] * 0.1, x[1] * 0.1
                arw_std = torch.nn.functional.softplus(arw_std)
                z = (z - arw_mean) / (arw_std + 1e-4)
                # z = arw_mean + z * torch.exp(arw_logsd)
                # z = (z - arw_mean) / 0.001
                logqs += torch.log(arw_std + 1e-4)

                x_hard = self.down_ar_conv(rz_mean + self.qz_mean, context)
                arw_mean_hard, arw_std_hard = x_hard[0] * 0.1, x_hard[1] * 0.1
                arw_std_hard = torch.nn.functional.softplus(arw_std_hard)

                hard_encode = (rz_mean + self.qz_mean -
                               arw_mean_hard) / (arw_std_hard + 1e-4)
                # hard_encode = (rz_mean + self.qz_mean) * torch.exp(arw_logsd_hard) + arw_mean_hard

            logps = prior.log_prob(z)
            # logps = torch.ones_like(logqs)
            kl = logqs - logps

        h = torch.cat((z, h_det), 1)
        h = self.celu(h)

        h = self.down_conv_b(h)

        return input + 0.1 * h, kl, z, hard_encode
    def __init__(self, dist: str, config: Mapping):
        """
        Generate a set of random rotation and translation error based on a specified distribution

        :param dist: 'uniform' or 'normal', distributions where the random samples are picked from
        :param config: type=dict, keys=['R', 'T'], which give the configuration of the distribution,
                       default values are given in the code
        """
        self.error = None

        if dist == 'uniform':
            self.dist = 'uniform'
            R_range = config.get(
                'R', np.deg2rad(2))  # rotation error upto 2 degrees by default
            T_range = config.get(
                'T', 0.2)  # translation error upto 20 cm by default
            LOG.warning('Rotation error range (degrees): [-%.2f, %.2f]' %
                        (np.rad2deg(R_range), np.rad2deg(R_range)))
            LOG.warning('Translation error range (meters): [-%.3f, %.3f]' %
                        (T_range, T_range))
            self.R_dist = uniform.Uniform(-R_range, R_range)
            self.T_dist = uniform.Uniform(-T_range, T_range)

            # statistics
            self.R_mean, self.T_mean = 0, 0  # (a+b)/2
            self.R_std = 2 * R_range / np.sqrt(12)  # (b-a)/sqrt(12)
            self.T_std = 2 * T_range / np.sqrt(12)
        elif dist == 'normal':
            self.dist = 'normal'
            R_params = config.get(
                'R',
                (np.deg2rad(2), np.deg2rad(0.1)
                 ))  # rotation error at 2 degrees as mean, 0.1 degree for std.
            T_params = config.get(
                'T',
                (0.2, 0.01))  # translation error at 20cm as mean, 1cm for std.
            LOG.warning('Rotation error mean (degrees): -%.2f, std: %.2f' %
                        (np.rad2deg(R_params[0]), np.rad2deg(R_params[1])))
            LOG.warning('Translation error mean (meters): -%.3f, std: %.3f' %
                        (T_params[0], T_params[1]))
            self.R_dist = normal.Normal(*R_params)
            self.T_dist = normal.Normal(*T_params)

            # statistics
            self.R_mean, self.R_std = R_params
            self.T_mean, self.T_std = T_params
        else:
            LOG.error('Unknown distribution given: %s' % dist)
        return
def loss_funct(out, x):
    prior_mu, prior_sig, decoder_mu, decoder_sig, x_decoded = out
    loss = 0.
    for i in range(x.shape[1]):

        #KL div
        a = Norm.Normal(prior_mu[i], prior_sig[i])
        b = Norm.Normal(decoder_mu[i], decoder_sig[i])
        kl_div = torch.mean(KL.kl_divergence(a, b))

        crossent = torch.mean(
            F.binary_cross_entropy(x_decoded[i], x[:, i, :], reduction='none'))
        loss += crossent + kl_div

    return loss
示例#6
0
def get_normal_distribution_mappings(mean, sd, values):
    from torch.distributions import normal
    n = normal.Normal(mean, sd)
    y = dict()
    for i in values:
        y[round(i, 0)] = round(1 - n.cdf(i).item(), 3)
    return y
示例#7
0
    def forward(self, observations):
        h1 = F.elu(self.lin1(observations))
        mu = self.mu(h1)
        sigma = self.softplus(self.sigma(h1))
        out = normal.Normal(mu, sigma)

        return out
示例#8
0
    def __init__(self,n,p,sig,o=0.0,bias=True):
        super(CUDAvoir,self).__init__()

        self.n = torch.tensor(n)
        self.p = torch.tensor(p)
        self.sig = torch.tensor(sig)

        self.v = torch.zeros(self.n) ## Recurrent Layer State Vector
        self.w = torch.zeros(self.n,self.n) ## Recurrent Layer Weight Matrix

        self.ol = nn.Linear(self.n, 1, bias=False) ## Linear Output Layer
        self.o = torch.tensor([o]) ## Initalize Output Neuron
        self.fb = nn.Linear(1, self.n, bias=False) ## Linear Feedback Layer

        if bias: ## Recurrent Layer Bias
            self.b = torch.FloatTensor(n).uniform_(0,1)
        else:
            self.b = torch.zeros(self.n)
        
        ## Populate Recurrent Layer Weight Matrix
        norm = normal.Normal(loc=0,scale=self.sig)
        uni = uniform.Uniform(0,1) 
        for i in range(self.n):
            for j in range(self.n):
                uni_draw = uni.sample()
                if uni_draw < self.p:
                    self.w[i,j] = norm.sample()
示例#9
0
    def __init__(self,
                 z_dim=20,
                 h_dim_tar=100,
                 h_dim_q=10,
                 x_bin_n=19,
                 x_con_n=5):
        super().__init__()

        # init networks (overwritten per replication)
        self.p_x_za_dist = p_x_za(dim_in=z_dim + 1,
                                  nh=1,
                                  dim_h=20,
                                  dim_out_bin=x_bin_n,
                                  dim_out_con=x_con_n).cuda()
        self.p_t_za_dist = p_t_za(dim_in=z_dim,
                                  nh=1,
                                  dim_h=h_dim_tar,
                                  dim_out=1).cuda()
        self.p_y_zta_dist = p_y_zta(dim_in=z_dim, dim_h=h_dim_tar,
                                    dim_rep=20).cuda()
        self.q_z_xa_dist = q_z_xa(dim_in=x_bin_n + x_con_n + 1,
                                  nh=1,
                                  dim_h=h_dim_q,
                                  dim_out=z_dim).cuda()
        self.p_z_dist = normal.Normal(
            torch.zeros(z_dim).cuda(),
            torch.ones(z_dim).cuda())
    def __init__(self):
        super(Model, self).__init__()
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        self.latent = 512
        self.normal_sampler = normal.Normal(torch.zeros(self.latent),
                                            torch.ones(self.latent))
        self.training = False

        self.relu = nn.ReLU(inplace=True)
        self.encoder = nn.Sequential(Conv2d(3, 32, 3),
                                     nn.MaxPool2d((4, 4), stride=(4, 4)),
                                     Conv2d(32, 64, 3),
                                     nn.MaxPool2d((4, 4), stride=(4, 4)),
                                     Conv2d(64, 256, 3),
                                     nn.MaxPool2d((2, 2), stride=(2, 2)),
                                     Conv2d(256, self.latent, 3),
                                     nn.MaxPool2d((2, 2), stride=(2, 2)))

        self.fc_mu = nn.Linear(self.latent, self.latent)
        self.fc_logvar = nn.Linear(self.latent, self.latent)
        self.fc_out = nn.Linear(self.latent, self.latent)

        self.decoder = nn.Sequential(ConvTransposes2d(self.latent, 256, 2),
                                     ConvTransposes2d(256, 64, 2),
                                     ConvTransposes2d(64, 32, 4),
                                     ConvTransposes2d(32, 3, 4))
示例#11
0
    def add_noise(self, data):
        norm = normal.Normal(0, 0.1)
        noise = norm.sample(data.shape)

        data = data + noise

        return F.relu(data)
示例#12
0
def create_actor_distribution(action_types, actor_output, action_size):
    '''
    :param action_types:
    :param actor_output:
    :param action_size:
    :return: 创建动作的分布,agent可以随机进行选择
    '''
    if action_types == "DISCRETE":
        assert actor_output.size(
        )[1] == action_size, "Actor output the wrong size"
        action_distribution = Categorical(actor_output)  # 创建一个进行抽样的分布
    else:
        assert actor_output.size(
        )[1] == action_size * 2, "Actor output the wrong size"
        means = actor_output[:, :action_size].squeeze(0)
        stds = actor_output[:, action_size:].squeeze(0)
        if len(means.shape) == 2:
            means = means.squeeze(-1)
        if len(stds.shape) == 2:
            stds = stds.squeeze(-1)
        if len(stds.shape) > 1 or len(means.shape) > 1:
            raise ValueError("Wrong mean and std shapes - {} -- {}".format(
                stds.shape, means.shape))
        action_distribution = normal.Normal(means.squeeze(0), torch.abs(stds))
    return action_distribution
    def forward(self, x, only_mu=True):

        x = F.relu(self.bn1_fc(self.fc1(x)))
        x = F.dropout(x, training=self.training)

        fc2_sigma_pos = F.softplus(self.fc2_sigma - 2)
        fc2_distribution = normal.Normal(self.fc2_mu, fc2_sigma_pos)

        if self.training:
            classifiers = []
            for index in range(self.num_classifiers_train):
                fc2_w = fc2_distribution.rsample()
                classifiers.append(fc2_w)

            outputs = []
            for index in range(self.num_classifiers_train):
                out = F.linear(x, classifiers[index], self.fc2_bias)
                outputs.append(out)
            return outputs
        else:
            if only_mu:
                # Only use mu for classification
                out = F.linear(x, self.fc2_mu, self.fc2_bias)
                return [out]
            else:
                classifiers = []
                for index in range(self.num_classifiers_test):
                    fc2_w = fc2_distribution.rsample()
                    classifiers.append(fc2_w)

                outputs = []
                for index in range(self.num_classifiers_test):
                    out = F.linear(x, classifiers[index], self.fc2_bias)
                    outputs.append(out)
                return outputs
    def __init__(self,
                 input_size,
                 output_size,
                 n_gaussians,
                 n_local,
                 n_global,
                 fix_values=False,
                 local1=None,
                 local2=None):
        super(SparseLayer, self).__init__()
        self.input_size = input_size
        self.output_size = output_size
        self.shape = th.Tensor([input_size, output_size])
        self.standard = normal.Normal(th.tensor([0.]), th.tensor([1.]))

        # Hyperparameters
        self.n_gauss = n_gaussians
        self.n_local = n_local
        self.n_global = n_global
        self.tau = 0.1
        if local1 is None:
            local1 = local2 = 4.
        self.local_shape = th.Tensor([local1, local2])

        # Parameters
        self.D = nn.Parameter(th.randn(n_gaussians, 2))
        self.sigma = nn.Parameter(th.randn(n_gaussians))
        if fix_values:
            self.v = th.ones(n_gaussians)
        else:
            self.v = nn.Parameter(th.randn(n_gaussians))
示例#15
0
def loss(package, x):

    prior_means, prior_var, decoder_means, decoder_var, x_decoded = package
    loss = 0.
    for i in range(x.shape[1]):
        # Kl loss
        norm_dis1 = Norm.Normal(prior_means[i], prior_var[i])
        norm_dis2 = Norm.Normal(decoder_means[i], decoder_var[i])
        kl_loss = torch.mean(KL.kl_divergence(norm_dis1, norm_dis2))

        # reconstruction loss
        xent_loss = torch.mean(
            F.binary_cross_entropy(x_decoded[i], x[:, i, :], reduction='none'))
        loss += xent_loss + kl_loss

    return loss
 def sample_z(self, mu, log_var):
     dis = normal.Normal(mu, torch.exp(log_var))
     ''' 
     sample() may have no error for running, but does not calculate gradients
     only rsample() with parameterization trick can calculate and backpropagate gradients through samples
     '''
     return dis.rsample()
示例#17
0
 def forward(self, za):
     za_embed = F.elu(self.input(za))
     # No need for TAR heads because of simplicity and small dimensionality
     h1 = F.elu(self.h1(za_embed))
     h2 = F.elu(self.h2(h1))
     x = normal.Normal(self.mu_x(h2), torch.exp(self.sigma_x(h2)))
     return x
    def __init__(self, latent, noise='uniform', path='.', batch_size=512):
        '''
        Initializes the Transporter, including creating the model.

        latent: (np array) Latent space distribution to map to. Must be an
        array of one dimensional vectors.
        noise: (str) Noise distribution to map from. Must be either 'uniform',
        'normal', or 'gaussian'
        path: (str) Path to store any images/weights of the model
        batch_size: (int) Batch Size
        '''
        self.latent = torch.Tensor(latent)
        self.dim = len(latent[0])

        if noise.lower() == 'uniform':
            self.noise = uniform.Uniform(-1, 1)
        elif noise.lower() == 'normal' or noise.lower() == 'gaussian':
            self.noise = normal.Normal(0, 1)
        else:
            raise Exception("{} has not been implemented yet".format(noise))

        self.path = path
        self.batch_size = batch_size

        self.create_model()
示例#19
0
    def forward(self, z, t, x):
        # Separated forwards for different t values, TAR

        x_t0 = F.elu(self.input_t0(z))
        for i in range(self.nh):
            x_t0 = F.elu(self.hidden_t0[i](x_t0))

        if x.shape[1] > -1:
            x_t0 = torch.cat([x, x_t0], axis=1)
            for i in range(self.nh):
                x_t0 = F.elu(self.hidden_x0[i](x_t0))

        mu_t0 = F.elu(self.mu_t0(x_t0))

        x_t1 = F.elu(self.input_t1(z))
        for i in range(self.nh):
            x_t1 = F.elu(self.hidden_t1[i](x_t1))

        if x.shape[1] > -1:
            x_t1 = torch.cat([x, x_t1], axis=1)
            for i in range(self.nh):
                x_t1 = F.elu(self.hidden_x1[i](x_t1))

        mu_t1 = F.elu(self.mu_t1(x_t1))

        # set mu according to t value
        y = normal.Normal((1 - t) * mu_t0 + t * mu_t1, 1)

        return y
示例#20
0
    def post_epoch_visualize(self, epoch, split):
        # if self.flags.visualize_only:
        #     self.do_plots()
        # else:
        if True:
            print('* Visualizing', split)
            Z = torch.linspace(0.0 + 1e-3, 1.0 - 1e-3, steps=20)
            Z = torch.cartesian_prod(Z, Z).view(20, 20, 2)
            if self.flags.z_size == 2 and self.flags.normal_latent:
                dist = normal.Normal(0.0, 1.0)
            x_gens = []
            for row in range(20):
                if self.flags.z_size == 2:
                    z = Z[row]
                    if self.flags.normal_latent:
                        z = dist.icdf(z)
                else:
                    if self.flags.normal_latent:
                        z = torch.randn(20, self.flags.z_size)
                    else:
                        z = torch.rand(20, self.flags.z_size)
                z = self.model.prepare_batch(z)
                x_gen = self.model.run_batch([z]).view(20, self.img_chan, self.img_size, self.img_size).detach().cpu()
                x_gens.append(x_gen)

            x_full = torch.cat(x_gens, dim=0).numpy()
            if split == 'test':
                fname = self.flags.log_dir + '/test.png'
            else:
                fname = self.flags.log_dir + '/vis_%03d.png' % self.model.get_train_steps()
            misc.save_comparison_grid(fname, x_full, border_width=0, retain_sequence=True)
            print('* Visualizations saved to', fname)
示例#21
0
def create_actor_distribution(action_types, actor_output, action_size):

    if action_types == "DISCRETE":
        assert actor_output.size(
        )[1] == action_size, "Actor output the wrong size"
        action_distribution = Categorical(
            actor_output)  # this creates a distribution to sample from

    else:
        assert actor_output.size(
        )[1] == action_size * 2, "Actor output the wrong size"

        means = actor_output[:, :action_size]
        stds = actor_output[:, action_size:]

        means = means.squeeze(0)
        stds = stds.squeeze(0)

        if len(means.shape) == 2:
            means = means.squeeze(-1)

        if len(stds.shape) == 2:
            stds = stds.squeeze(-1)

        if len(stds.shape) > 1 or len(means.shape) > 1:
            raise ValueError("Wrong mean and std shapes")
        action_distribution = normal.Normal(means.squeeze(0), torch.abs(stds))

    return action_distribution
示例#22
0
 def __init__(self):
     super(SACActorNN, self).__init__()
     self.fc1 = nn.Linear(STATE_DIM, hidden_layer_size)
     self.fc2 = nn.Linear(hidden_layer_size, hidden_layer_size)
     self.mean = nn.Linear(hidden_layer_size, ACTION_DIM)
     self.log_stdev = nn.Linear(hidden_layer_size, ACTION_DIM)
     self.normal_dist = normal.Normal(0, 1)
示例#23
0
    def forward(self, z, t, a):
        # Separated forwards for different t values, TAR

        h = F.elu(self.h(z))
        rep = F.elu(self.rep(h))
        h_a0 = F.elu(self.h_a0(rep))
        h_a1 = F.elu(self.h_a1(rep))
        rep_a0 = F.elu(self.rep_a0(h_a0))
        rep_a1 = F.elu(self.rep_a1(h_a1))
        h_a0_t0 = F.elu(self.h_a0_t0(rep_a0))
        h_a0_t1 = F.elu(self.h_a0_t1(rep_a0))
        h_a1_t0 = F.elu(self.h_a1_t1(rep_a1))
        h_a1_t1 = F.elu(self.h_a1_t1(rep_a1))
        mu_a0_t0 = F.elu(self.mu_a0_t0(h_a0_t0))
        mu_a0_t1 = F.elu(self.mu_a0_t1(h_a0_t1))
        mu_a1_t0 = F.elu(self.mu_a1_t0(h_a1_t0))
        mu_a1_t1 = F.elu(self.mu_a1_t1(h_a1_t1))
        sigma_a0_t0 = torch.exp(self.sigma_a0_t0(h_a0_t0))
        sigma_a0_t1 = torch.exp(self.sigma_a0_t1(h_a0_t1))
        sigma_a1_t0 = torch.exp(self.sigma_a1_t0(h_a1_t0))
        sigma_a1_t1 = torch.exp(self.sigma_a1_t1(h_a1_t1))

        mu = (1-a)*(1-t) * mu_a0_t0 + \
             (1-a) * t * mu_a0_t1 + \
             a * (1-t) * mu_a1_t0 + \
             a * t * mu_a1_t1
        sigma = (1 - a) * (1 - t) * sigma_a0_t0 + \
             (1 - a) * t * sigma_a0_t1 + \
             a * (1 - t) * sigma_a1_t0 + \
             a * t * sigma_a1_t1

        # set mu according to t value
        y = normal.Normal(mu, sigma)

        return y
示例#24
0
def main():
    model_urls = {'alexnet': 'http://download.pytorch.org/models/alexnet-owt-4df8aa71.pth', }
    nn_model = ConvNet(rgb_sinusoid)
    nn_model.load_state_dict(model_zoo.load_url(model_urls['alexnet']))
    estimator = Estimator(nn_model, layer_idx=3)

    theta_range = np.linspace(0.0, np.pi, 100)
    mean_est = np.zeros(theta_range.shape)
    std_est  = np.zeros(theta_range.shape)

    for idx, theta in enumerate(theta_range):
        response = nn_model.orientation_response(theta, layer_idx=3)
        noise = torch_normal.Normal(loc=torch.tensor([0.0]),
                                    scale=torch.tensor([1.0]))

        estimates = np.array([estimator.mle(response + noise.sample(response.size()).view(1, -1),
                                            lb=theta-0.25 * np.pi,
                                            ub=theta+0.25 * np.pi) for _ in range(1000)])
        mean_est[idx] = np.mean(estimates)
        std_est[idx]  = np.std(estimates)

    np.save('./data_mean', mean_est)
    np.save('./data_std', std_est)

    plt.plot(theta_range / np.pi, mean_est)
    plt.show()

    plt.plot(theta_range / np.pi, std_est, 'o-')
    plt.show()
示例#25
0
    def forward(self, zb, a):
        h1 = F.elu(self.lin1(zb))
        h2 = F.elu(self.lin2(h1))

        # finish forward binary and categorical covariates
        bin_out_dict = dict()

        # for each categorical variable
        for i in range(len(self.headnames)):
            # calculate probability paramater
            p_a0 = self.binheads_a0[i](h2)
            p_a1 = self.binheads_a1[i](h2)
            dist_p_a0 = torch.sigmoid(p_a0)
            dist_p_a1 = torch.sigmoid(p_a1)
            # create distribution in dict
            if self.headnames[i] == 'BINARY':
                bin_out_dict[self.headnames[i]] = bernoulli.Bernoulli((1-a)*dist_p_a0 + a*dist_p_a1)
            else:
                bin_out_dict[self.headnames[i]] = OneHotCategorical((1-a)*dist_p_a0 + a*dist_p_a1)

        # finish forward continuous vars for the right TAR head
        mu_a0 = self.mu_a0(h2)
        mu_a1 = self.mu_a1(h2)
        sigma_a0 = self.softplus(self.sigma_a0(h2))
        sigma_a1 = self.softplus(self.sigma_a1(h2))
        # cap sigma to prevent collapse for continuous vars being 0
        sigma_a0 = torch.clamp(sigma_a0, min=0.1)
        sigma_a1 = torch.clamp(sigma_a1, min=0.1)
        con_out = normal.Normal((1-a) * mu_a0 + a * mu_a1, (1-a)* sigma_a0 + a * sigma_a1)

        return con_out, bin_out_dict
def create_kl_gaussian_samples(cfg, sample_size):
    overall_scres = []
    if cfg.use_anal:
        if cfg.use_metric == 1:
            proc_calc = gauss.kl_ind_standard

        if cfg.use_metric == 2:

            proc_calc = wass.dist_W2_indepdnet_sn

            # proc_calc = wass.dist_W2_diag

    for i in range(sample_size):
        zrnd = torch.randn(cfg.batch_size, cfg.n_samples, cfg.latent_size)

        loc = torch.mean(zrnd, axis=1).unsqueeze(axis=1)
        scale = torch.std(zrnd, axis=1).unsqueeze(axis=1)
        idn_struc = independent.Independent(normal.Normal(loc, scale), 1)
        kl = proc_calc(idn_struc)
        real_score, _ = convert_metric_2_score(kl)
        overall_scres.append(real_score)

        # overall_scres.append(score.detach().numpy()[0])
        # print (overall_scres)

    return overall_scres
示例#27
0
 def __init__(self):
     super(SACRoboschoolHopperActorNN, self).__init__()
     self.fc1 = nn.Linear(15, 256)
     self.fc2 = nn.Linear(256, 256)
     self.mean = nn.Linear(256, 3)
     self.log_stdev = nn.Linear(256, 3)
     self.normal_dist = normal.Normal(0, 1)
示例#28
0
def generator_eval(generator):
    z = torch.zeros([2, args.latent_dim])
    z[0] = normal.Normal(0, 0.1).sample((1, args.latent_dim))
    z[1] = normal.Normal(0, 1).sample((1, args.latent_dim))
    z = z.float()

    z_diff = (z[0] - z[1]) / 9.0
    z_img = torch.zeros([9, args.latent_dim])

    # 7 interpolation steps
    for idx in range(9):
        z_img[idx] = z[0] + (idx * z_diff)

    gen_imgs = generator(z_img)  # batch_size * 28 * 28
    save_imgs = gen_imgs.view(-1, 1, 28, 28).cpu()
    save_image(save_imgs.data[0:9], 'images/1.png', nrow=9, normalize=True)
示例#29
0
def add_noise(img, sigma, mean):
    sigma = sigma * 2 / 255  # 255->[-1,1]: sigma*2/255
    gauss = normal.Normal(mean, sigma)
    noise = gauss.sample(img.size())
    gauss = noise.reshape(img.size())
    img_noisy = torch.tensor(img + gauss)

    return img_noisy
示例#30
0
    def analytic_score(self, loc, scale, z):
        # scale = scale.pow(2)  #conver std to var

        d1 = independent.Independent(
            normal.Normal(torch.squeeze(loc), torch.squeeze(scale)), 1)
        anal_kl = gauss.kl_ind_standard(d1)

        return anal_kl