Exemplo n.º 1
0
class TestBernoulliClass(unittest.TestCase):
    def setUp(self):
        self.bernoulli = Bernoulli('numbers_binomial.txt')

    def test_readdata(self):
        self.assertEqual(self.bernoulli.data,\
         [0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0], 'data not read in correctly')

    def test_meancalculation(self):
        mean = self.bernoulli.calculate_mean()
        self.assertEqual(round(mean, 1), 0.6,
                         'calculated mean not as expected')

    def test_stdevcalculation(self):
        stdev = self.bernoulli.calculate_stdev()
        self.assertEqual(
            round(stdev, 2), 0.49, 'calculated standard deviation \
        not as expected')

    def test_pmf(self):
        self.assertEqual(
            round(self.bernoulli.pmf(1), 3), 0.615, 'propability\
         mass function not as expected')
        self.assertEqual(
            round(self.bernoulli.pmf(0), 3), 0.385, 'propability\
         mass function not as expected')

    def test_def(self):
        self.assertEqual(str(self.bernoulli),
        "Propability of success p {}, propability of failure q {}" .\
        format(0.62, 0.38),'Parameters of the Bernoulli not as expected')
Exemplo n.º 2
0
    def __init__(self, obs_shape, action_space, base=None, base_kwargs=None):
        super(Policy, self).__init__()
        if base_kwargs is None:
            base_kwargs = {}
        if base is None:
            if len(obs_shape) == 3:
                base = CNNBase
            elif len(obs_shape) == 1:
                base = MLPBase
            else:
                raise NotImplementedError

        self.base = base(obs_shape[0], **base_kwargs)

        if action_space.__class__.__name__ == "Discrete":
            num_outputs = action_space.n
            num_action_outputs = 512
            self.dist = Categorical(num_action_outputs,num_outputs)
        elif action_space.__class__.__name__ == "Box":
            num_outputs = action_space.shape[0]
            self.dist = DiagGaussian(self.base.output_size, num_outputs)
        elif action_space.__class__.__name__ == "MultiBinary":
            num_outputs = action_space.shape[0]
            self.dist = Bernoulli(self.base.output_size, num_outputs)
        else:
            raise NotImplementedError
Exemplo n.º 3
0
    def test_init_with_errors(self):
        params = (
            None, '-3', '3', 
            '-0.2', 'string',
        )

        for param in params:
            with self.assertRaises(ValueError):
                Bernoulli(param)
Exemplo n.º 4
0
    def test_bernoulli():
        bernoulli = Bernoulli(3)

        new_p = np.array([[0.5, 0.5, 0.5], [.9, .9, .9]], dtype=np.float32)
        old_p = np.array([[.9, .9, .9], [.1, .1, .1]], dtype=np.float32)

        x = np.array([[1, 0, 1], [1, 1, 1]], dtype=np.float32)

        x_sym = tf.constant(x)
        new_p_sym = tf.constant(new_p)
        old_p_sym = tf.constant(old_p)

        new_info = dict(p=new_p)
        old_info = dict(p=old_p)

        new_info_sym = dict(p=new_p_sym)
        old_info_sym = dict(p=old_p_sym)

        # np.testing.assert_allclose(
        #     np.sum(bernoulli.entropy(dist_info=new_info)),
        #     np.sum(- new_p * np.log(new_p + 1e-8) - (1 - new_p) * np.log(1 - new_p + 1e-8)),
        # )

        # np.testing.assert_allclose(
        #     np.sum(bernoulli.kl(old_info_sym, new_info_sym).eval()),
        #     np.sum(old_p * (np.log(old_p + 1e-8) - np.log(new_p + 1e-8)) + (1 - old_p) * (np.log(1 - old_p + 1e-8) -
        #                                                                                   np.log(1 - new_p + 1e-8))),
        # )
        # np.testing.assert_allclose(
        #     np.sum(bernoulli.kl(old_info, new_info)),
        #     np.sum(old_p * (np.log(old_p + 1e-8) - np.log(new_p + 1e-8)) + (1 - old_p) * (np.log(1 - old_p + 1e-8) -
        #                                                                                   np.log(1 - new_p + 1e-8))),
        # )
        # np.testing.assert_allclose(
        #     bernoulli.likelihood_ratio_sym(x_sym, old_info_sym, new_info_sym).eval(),
        #     np.prod((x * new_p + (1 - x) * (1 - new_p)) / (x * old_p + (1 - x) * (1 - old_p) + 1e-8), axis=-1)
        # )
        np.testing.assert_allclose(
            bernoulli.logli(x_sym, old_info_sym).eval(session=sess),
            np.sum(x * np.log(old_p + 1e-8) +
                   (1 - x) * np.log(1 - old_p + 1e-8),
                   axis=-1))
Exemplo n.º 5
0
    def __init__(self, obs_shape, action_space, base_kwargs=None):
        super(Policy, self).__init__()
        if base_kwargs is None:
            base_kwargs = {}

        base = MLPBase
        # print(base_kwargs)
        self.base = base(obs_shape[0], **base_kwargs)
        # ss('making policy')
        if action_space.__class__.__name__ == "Discrete":
            num_outputs = action_space.n
            self.dist = Categorical(self.base.output_size, num_outputs)
        elif action_space.__class__.__name__ == "Box":
            num_outputs = action_space.shape[0]
            self.dist = DiagGaussian(self.base.output_size, num_outputs)
        elif action_space.__class__.__name__ == "MultiBinary":
            num_outputs = action_space.shape[0]
            self.dist = Bernoulli(self.base.output_size, num_outputs)
        else:
            raise NotImplementedError
Exemplo n.º 6
0
class TestBernoulli(unittest.TestCase):

    def setUp(self):
        self.distribution = Bernoulli(0.70)

    def test_init_with_errors(self):
        params = (
            None, '-3', '3', 
            '-0.2', 'string',
        )

        for param in params:
            with self.assertRaises(ValueError):
                Bernoulli(param)

    def test_mean(self):
        mean = self.distribution.mean()
        self.assertEqual(mean, 0.69999999999999996)

    def test_variance(self):
        variance = self.distribution.variance()
        self.assertEqual(variance, 0.21000000000000002)

    def test_std(self):
        std = self.distribution.std()
        self.assertEqual(std, 0.45825756949558405)

    def test_cdf(self):
        cdf = self.distribution.cdf()
        self.assertEqual(cdf, 1.0)

    def test_pmf(self):
        pmf = self.distribution.pmf()
        self.assertEqual(pmf, 0.69999999999999996)

    def test_pmfs(self):
        t_pmfs = np.array([ 0.3,  0.7])
        pmfs = self.distribution.pmfs()
        self.assertIsNotNone(pmfs)
        numpy_testing.assert_allclose(t_pmfs, pmfs)
Exemplo n.º 7
0
    def __construct__(self, arch):
        """
        Construct the model from the architecture dictionary.
        :param arch: architecture dictionary
        :return None
        """

        # these are the same across all latent levels
        encoding_form = arch['encoding_form']
        variable_update_form = arch['variable_update_form']
        const_prior_var = arch['constant_prior_variances']
        posterior_form = arch['posterior_form']

        latent_level_type = RecurrentLatentLevel if arch['encoder_type'] == 'recurrent' else DenseLatentLevel

        encoder_arch = None
        if arch['encoder_type'] == 'inference_model':
            encoder_arch = dict()
            encoder_arch['non_linearity'] = arch['non_linearity_enc']
            encoder_arch['connection_type'] = arch['connection_type_enc']
            encoder_arch['batch_norm'] = arch['batch_norm_enc']
            encoder_arch['weight_norm'] = arch['weight_norm_enc']
            encoder_arch['dropout'] = arch['dropout_enc']

        decoder_arch = dict()
        decoder_arch['non_linearity'] = arch['non_linearity_dec']
        decoder_arch['connection_type'] = arch['connection_type_dec']
        decoder_arch['batch_norm'] = arch['batch_norm_dec']
        decoder_arch['weight_norm'] = arch['weight_norm_dec']
        decoder_arch['dropout'] = arch['dropout_dec']

        # construct a DenseLatentLevel for each level of latent variables
        for level in range(len(arch['n_latent'])):
            # get specifications for this level's encoder and decoder

            if arch['encoder_type'] == 'inference_model':
                encoder_arch['n_in'] = self.encoder_input_size(level, arch)
                encoder_arch['n_units'] = arch['n_units_enc'][level]
                encoder_arch['n_layers'] = arch['n_layers_enc'][level]

            decoder_arch['n_in'] = self.decoder_input_size(level, arch)
            decoder_arch['n_units'] = arch['n_units_dec'][level+1]
            decoder_arch['n_layers'] = arch['n_layers_dec'][level+1]

            n_latent = arch['n_latent'][level]
            n_det = [arch['n_det_enc'][level], arch['n_det_dec'][level]]

            learn_prior = True if arch['learn_top_prior'] else (level != len(arch['n_latent'])-1)

            self.levels[level] = latent_level_type(self.batch_size, encoder_arch, decoder_arch, n_latent, n_det,
                                                  encoding_form, const_prior_var, variable_update_form, posterior_form, learn_prior)

        # construct the output decoder
        decoder_arch['n_in'] = self.decoder_input_size(-1, arch)
        decoder_arch['n_units'] = arch['n_units_dec'][0]
        decoder_arch['n_layers'] = arch['n_layers_dec'][0]
        self.output_decoder = MultiLayerPerceptron(**decoder_arch)

        # construct the output distribution
        if self.output_distribution == 'bernoulli':
            self.output_dist = Bernoulli(self.input_size, None)
            self.mean_output = Dense(arch['n_units_dec'][0], self.input_size, non_linearity='sigmoid', weight_norm=arch['weight_norm_dec'])
        elif self.output_distribution == 'multinomial':
            self.output_dist = Multinomial(self.input_size, None)
            self.mean_output = Dense(arch['n_units_dec'][0], self.input_size, non_linearity='linear', weight_norm=arch['weight_norm_dec'])
        elif self.output_distribution == 'gaussian':
            self.output_dist = DiagonalGaussian(self.input_size, None, None)
            self.mean_output = Dense(arch['n_units_dec'][0], self.input_size, non_linearity='sigmoid', weight_norm=arch['weight_norm_dec'])
            if self.constant_variances:
                if arch['single_output_variance']:
                    self.trainable_log_var = Variable(torch.zeros(1), requires_grad=True)
                else:
                    self.trainable_log_var = Variable(torch.normal(torch.zeros(self.input_size), 0.25), requires_grad=True)
            else:
                self.log_var_output = Dense(arch['n_units_dec'][0], self.input_size, weight_norm=arch['weight_norm_dec'])

        # make the state trainable if encoder_type is EM
        if arch['encoder_type'] in ['em', 'EM']:
            self.trainable_state()
Exemplo n.º 8
0
 def setUp(self):
     self.bernoulli = Bernoulli('numbers_binomial.txt')
Exemplo n.º 9
0
 def setUp(self):
     self.distribution = Bernoulli(0.70)
Exemplo n.º 10
0
    print '%d: %d' % (d, image_counts[d])
print

# Initialize summary image
summary = Image.new('L', (28 * num_components + 65, 28 * len(num_blocks)), 255)

# Do inference for varying numbers of blocks
idxs = np.argsort(map(np.sum, emissions))
reps = []
for block_i, num_block in enumerate(num_blocks):
    # Block data
    blocks = np.array_split(idxs, num_block)

    # Run EM
    results = em(emissions, [
        Product([Bernoulli() for i in range(28 * 28)])
        for n in range(num_components)
    ],
                 count_restart=3.0,
                 blocks=blocks,
                 gamma_seed=137,
                 init_gamma=(init_to_labels and labels or None))
    dists = results['dists']
    print 'Reps: %d' % results['reps']
    reps.append(results['reps'])

    # Produce summary image
    offset = 0
    im = Image.new('L', (28 * len(dists), 28))
    for d in results['dists']:
        digit = Image.new('L', (28, 28))
Exemplo n.º 11
0
                id, True,
                Gaussian(ldata['new_node_rate_mean'],
                         ldata['new_node_rate_variance']),
                Uniform_Discrete(ldata['time_to_stay_min'],
                                 ldata['time_to_stay_max'])))
    num_left_classes = len(phase_data['left_classes'])
    for (id, ldata) in enumerate(phase_data['right_classes']):
        phase_env_classes.append(
            Class_Env(
                id + num_left_classes, False,
                Gaussian(ldata['new_node_rate_mean'],
                         ldata['new_node_rate_variance']),
                Uniform_Discrete(ldata['time_to_stay_min'],
                                 ldata['time_to_stay_max'])))
    for (ids, edge_data) in phase_data['edge_data'].items():
        class_edge = Class_Env_Edge(Bernoulli(edge_data['mean']),
                                    edge_data['weight'])
        l_class = [c for c in phase_env_classes if c.id == ids[0]][0]
        r_class = [
            c for c in phase_env_classes if c.id == ids[1] + num_left_classes
        ][0]
        l_class.set_edge_data(r_class, class_edge)
        r_class.set_edge_data(l_class, class_edge)

    env_classes.append(phase_env_classes)

environment = Environment(env_classes)

# ------------------------------------

# Instantiate the main Graph