Пример #1
0
def nist_sampling_format(output, metadata, columns_list, col_maps):
    """
    Output layer format for generator data plus performing random sampling
     from the output softmax and bernoulli distributions.
    """
    with tf.name_scope('nist_sampling_format'):
        output_list = []
        cur_idx = 0
        for k in columns_list:
            v = col_maps[k]
            if isinstance(v, dict):
                if len(v) == 2:
                    output_list.append(
                        tf.cast(
                            tf.expand_dims(
                                Bernoulli(logits=output[:, cur_idx]).sample(), axis=1), tf.float32)
                    )
                    cur_idx += 1
                else:
                    output_list.append(
                        tf.cast(tf.expand_dims(
                            Categorical(logits=output[:, cur_idx: cur_idx+len(v)]).sample(), axis=1), tf.float32))

                    cur_idx += len(v)
            elif v == 'int':
                output_list.append(
                    tf.nn.relu(output[:, cur_idx:cur_idx+1]))
                cur_idx += 1
            elif v == 'int_v':
                output_list.append(tf.nn.sigmoid(output[:, cur_idx:cur_idx+1]))
                output_list.append(tf.nn.relu(output[:, cur_idx+1:cur_idx+2]))
                cur_idx += 2
            elif v == 'void':
                pass
        return tf.concat(output_list, axis=1)
Пример #2
0
    def _init_ibp_variational_parameters(self, q_tau, q_nu, alpha, num_features, num_objects, rescale=100.):

        # init tau
        if q_tau is None:
            q_tau = np.ones((2, num_features))
            alpha_K = alpha / num_features
            q_tau[0,:] = alpha_K * np.ones(shape=(1, num_features))
            q_tau = q_tau + 1.5 * min(1, alpha_K)* (np.random.rand(2, num_features) - 1.5)
            self._q_tau = Param(q_tau, name="q_tau", transform=transforms.positive)
        else:
            self._q_tau = Param(q_tau, name="q_tau", transform=transforms.positive)

        # init nu
        if q_nu is None:
            q_nu = np.random.uniform(0., 1., (num_objects, num_features))
            in_range = transforms.Chain(transforms.Logistic(a=0., b=1.), transforms.Rescale(rescale))
            self._q_nu = Param(q_nu, transform=in_range, trainable=True, name="q_nu")

        self.q_nu_un = tf.unstack(self.q_nu)
        self.q_nu_un = [tf.reshape(nu_n, shape=(1, self.num_ibp_features)) for nu_n in self.q_nu_un]
        self.sampler = [Bernoulli(probs=nu_n, validate_args=True) for nu_n in self.q_nu_un]
Пример #3
0
    def __init__(self, n_input, n_list, n_y=None,y_weight=100):
        '''
        n_input - number of input neurons
        n_list - list of numbers of neurons in the hidden layers
        n_y: optional - number of features that will be given as input y during training
        y_weight - relative weight of losses (VAE vs regression for y features). Trial-and-error
        '''
        # input data
        self.X = tf.placeholder(tf.float32, shape=(None, n_input))
        # input y features
        if (n_y is not None):
            self.y = tf.placeholder(tf.float32, shape=(None, n_y))
        
        # encoder
        self.encoder_layers = []
        # input layer
        previous = n_input
        # current is the output of each layer (skip last because there is nothing after it)
        for current in n_list[:-1]:
            h = DenseLayer(previous,current)
            self.encoder_layers.append(h)
            previous = current
        # latent features number
        latent = n_list[-1]
        encoder_output = DenseLayer(current,latent*2,activation='none')
        self.encoder_layers.append(encoder_output)
        
        # feed forward through encoder
        c_X = self.X
        for layer in self.encoder_layers:
            c_X = layer.feed_forward(c_X)
        # c_X now holds the output of the encoder
        # first half are the means
        self.means = c_X[:,:latent]
        # second half are the std; must be positive; +1e-6 for smoothing
        self.std = tf.nn.softplus(c_X[:,latent:]) + 1e-6
        
        # optional loss for steered latent features
        if (n_y is not None):
            self.yhat = self.means[:,:n_y]
            self.error = tf.losses.mean_squared_error(labels=self.y,predictions=self.yhat)
        
        # reparameterization trick
        normal = Normal(loc=self.means,scale=self.std)
        self.Z = normal.sample()

        # decoder
        self.decoder_layers = []
        previous = latent
        for current in reversed(n_list[:-1]):
            h = DenseLayer(previous,current)
            self.decoder_layers.append(h)
            previous = current
        # output is the reconstruction
        decoder_output = DenseLayer(previous,n_input,activation=lambda x:x)
        self.decoder_layers.append(decoder_output)

        #feed forward through decoder, using the sampled 'data'
        c_X = self.Z
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        # use logits for cost function below
        neg_cross_entropy = -tf.nn.sigmoid_cross_entropy_with_logits(labels=self.X,
                    logits=logits)
        neg_cross_entropy = tf.reduce_sum(neg_cross_entropy, 1)
        
        # output
        self.y_prob = Bernoulli(logits=logits)
        
        # sample from output
        self.post_pred = self.y_prob.sample()
        self.post_pred_probs = tf.nn.sigmoid(logits)
        
        # generate 'de-novo' output
        self.gen = tf.Variable(0)
        Z_std = Normal(0.0,1.0).sample([self.gen,latent])
        c_X = Z_std
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        
        prior_pred_dist = Bernoulli(logits=logits)
        self.prior_pred = prior_pred_dist.sample()
        self.prior_pred_probs = tf.nn.sigmoid(logits)
        
        # manually input Z
        self.Z_input = tf.placeholder(np.float32, shape=(None, latent))
        c_X = self.Z_input
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        self.manual_prior_prob = tf.nn.sigmoid(logits)
        
        # cost function
        # Kullback–Leibler divergence
        kl = -tf.log(self.std) + 0.5*(self.std**2 + self.means**2) - 0.5
        kl = tf.reduce_sum(kl, axis=1)
        # ELBO
        self.elbo = tf.reduce_sum(neg_cross_entropy - kl)
        
        if (n_y is None):
            # only ELBO
            self.optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(-self.elbo)
        else:
            # weighted regression loss and ELBO
            self.optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(
                tf.reduce_sum(y_weight*self.error-self.elbo))

        self.init = tf.global_variables_initializer()
        self.session = tf.Session()
        self.session.run(self.init)
Пример #4
0
class SteeredVAE:
    
    def __init__(self, n_input, n_list, n_y=None,y_weight=100):
        '''
        n_input - number of input neurons
        n_list - list of numbers of neurons in the hidden layers
        n_y: optional - number of features that will be given as input y during training
        y_weight - relative weight of losses (VAE vs regression for y features). Trial-and-error
        '''
        # input data
        self.X = tf.placeholder(tf.float32, shape=(None, n_input))
        # input y features
        if (n_y is not None):
            self.y = tf.placeholder(tf.float32, shape=(None, n_y))
        
        # encoder
        self.encoder_layers = []
        # input layer
        previous = n_input
        # current is the output of each layer (skip last because there is nothing after it)
        for current in n_list[:-1]:
            h = DenseLayer(previous,current)
            self.encoder_layers.append(h)
            previous = current
        # latent features number
        latent = n_list[-1]
        encoder_output = DenseLayer(current,latent*2,activation='none')
        self.encoder_layers.append(encoder_output)
        
        # feed forward through encoder
        c_X = self.X
        for layer in self.encoder_layers:
            c_X = layer.feed_forward(c_X)
        # c_X now holds the output of the encoder
        # first half are the means
        self.means = c_X[:,:latent]
        # second half are the std; must be positive; +1e-6 for smoothing
        self.std = tf.nn.softplus(c_X[:,latent:]) + 1e-6
        
        # optional loss for steered latent features
        if (n_y is not None):
            self.yhat = self.means[:,:n_y]
            self.error = tf.losses.mean_squared_error(labels=self.y,predictions=self.yhat)
        
        # reparameterization trick
        normal = Normal(loc=self.means,scale=self.std)
        self.Z = normal.sample()

        # decoder
        self.decoder_layers = []
        previous = latent
        for current in reversed(n_list[:-1]):
            h = DenseLayer(previous,current)
            self.decoder_layers.append(h)
            previous = current
        # output is the reconstruction
        decoder_output = DenseLayer(previous,n_input,activation=lambda x:x)
        self.decoder_layers.append(decoder_output)

        #feed forward through decoder, using the sampled 'data'
        c_X = self.Z
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        # use logits for cost function below
        neg_cross_entropy = -tf.nn.sigmoid_cross_entropy_with_logits(labels=self.X,
                    logits=logits)
        neg_cross_entropy = tf.reduce_sum(neg_cross_entropy, 1)
        
        # output
        self.y_prob = Bernoulli(logits=logits)
        
        # sample from output
        self.post_pred = self.y_prob.sample()
        self.post_pred_probs = tf.nn.sigmoid(logits)
        
        # generate 'de-novo' output
        self.gen = tf.Variable(0)
        Z_std = Normal(0.0,1.0).sample([self.gen,latent])
        c_X = Z_std
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        
        prior_pred_dist = Bernoulli(logits=logits)
        self.prior_pred = prior_pred_dist.sample()
        self.prior_pred_probs = tf.nn.sigmoid(logits)
        
        # manually input Z
        self.Z_input = tf.placeholder(np.float32, shape=(None, latent))
        c_X = self.Z_input
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        self.manual_prior_prob = tf.nn.sigmoid(logits)
        
        # cost function
        # Kullback–Leibler divergence
        kl = -tf.log(self.std) + 0.5*(self.std**2 + self.means**2) - 0.5
        kl = tf.reduce_sum(kl, axis=1)
        # ELBO
        self.elbo = tf.reduce_sum(neg_cross_entropy - kl)
        
        if (n_y is None):
            # only ELBO
            self.optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(-self.elbo)
        else:
            # weighted regression loss and ELBO
            self.optimizer = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(
                tf.reduce_sum(y_weight*self.error-self.elbo))

        self.init = tf.global_variables_initializer()
        self.session = tf.Session()
        self.session.run(self.init)
    
    def steer(self,X,y,epochs=10,batch=50):
        '''Replaces fit, user provides the y features for the latent steering'''
        n_batches = len(X) // batch
        for epoch in range(epochs):
            print('Epoch:',epoch+1)
            cost = 0
            e_cost = 0
            for b in range(n_batches):
                c_batch = X[b*batch:(b+1)*batch]
                y_batch = y[b*batch:(b+1)*batch]
                _,c,e, = self.session.run((self.optimizer, self.elbo,self.error),feed_dict={self.X: c_batch,self.y:y_batch})
                # accumulate cost
                cost+=c
                e_cost+=e
            print('Cost:', cost,e_cost)
    
    def fit(self,X,epochs=10,batch=50):
        n_batches = len(X) // batch
        for epoch in range(epochs):
            print('Epoch:',epoch+1)
            cost = 0
            for b in range(n_batches):
                c_batch = X[b*batch:(b+1)*batch]
                _,c, = self.session.run((self.optimizer, self.elbo),feed_dict={self.X: c_batch})
                # accumulate cost
                cost+=c
            print('Cost:', cost)
                       
    def predict(self,X,out='prob'):
        '''
        Pass data through encoder and decoder and retrieve reconstructed output
            by default the probabilities are returned, user can specify 'sample' or 'both'
        '''
        # correct shape if needed
        if (X.ndim==1):
            X = X.reshape([1,-1])
        pred,prob,mm = self.session.run((self.post_pred,self.post_pred_probs,self.means),feed_dict={self.X:X})
        if (out=='prob'):
            return prob,mm
        elif (out=='sample'):
            return pred
        else:
            return pred,prob

    def generate(self,n=1,out='prob'):
        '''
        Generate output
            by default the probabilities are returned, user can specify 'sample' or 'both'
            User specifies the number of points requested 
        '''
        pred,prob = self.session.run((self.prior_pred,self.prior_pred_probs),feed_dict={self.gen:n})
        if (out=='prob'):
            return prob
        elif (out=='sample'):
            return pred
        else:
            return pred,prob
    
    def feed(self,Z):
        '''Generate output using provided latent-space input Z'''
        # correct shape if needed
        if (Z.ndim==1):
            Z = Z.reshape([1,-1])
        return self.session.run(self.manual_prior_prob,feed_dict={self.Z_input:Z})
    
    def close(self):
        self.session.close()
Пример #5
0
    def __init__(self, n_input, n_list, ent_weight=1, lr=0.001):
        '''
        number of input neurons and a list of the number of neurons in the encoder hidden layers
            The last number provided should be the number of latent features desired
                
        The decoder will have an inverted architecture
        
        Note: the actual number of neurons in the last layer of the encoder will be x2, for mean and std
            '''
        # input data
        self.X = tf.placeholder(tf.float32, shape=(None, n_input))

        # encoder
        self.encoder_layers = []
        # input layer
        previous = n_input
        # in case there is only one hidden layer (for loop will be skipped)
        current = n_input
        # current is the output of each layer (skip last because there is nothing after it)
        for current in n_list[:-1]:
            h = DenseLayer(previous, current)
            self.encoder_layers.append(h)
            previous = current
        # latent features number
        latent = n_list[-1]
        encoder_output = DenseLayer(current, latent * 2, activation='none')
        self.encoder_layers.append(encoder_output)

        # feed forward through encoder
        c_X = self.X
        for layer in self.encoder_layers:
            c_X = layer.feed_forward(c_X)
        # c_X now holds the output of the encoder
        # first half are the means
        self.means = c_X[:, :latent]
        # second half are the std; must be positive; +1e-6 for smoothing
        self.std = tf.nn.softplus(c_X[:, latent:]) + 1e-6

        # reparameterization trick
        normal = Normal(loc=self.means, scale=self.std)
        self.Z = normal.sample()

        # decoder
        self.decoder_layers = []
        previous = latent
        for current in reversed(n_list[:-1]):
            h = DenseLayer(previous, current)
            self.decoder_layers.append(h)
            previous = current
        # output is the reconstruction
        decoder_output = DenseLayer(previous, n_input, activation=lambda x: x)
        self.decoder_layers.append(decoder_output)

        #feed forward through decoder, using the sampled 'data'
        c_X = self.Z
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        # use logits for cost function below
        neg_cross_entropy = -tf.nn.sigmoid_cross_entropy_with_logits(
            labels=self.X, logits=logits)
        neg_cross_entropy = tf.reduce_sum(neg_cross_entropy, 1)

        # output
        self.y_prob = Bernoulli(logits=logits)

        # sample from output
        self.post_pred = self.y_prob.sample()
        self.post_pred_probs = tf.nn.sigmoid(logits)

        # generate 'de-novo' output
        self.gen = tf.Variable(0)
        Z_std = Normal(0.0, 1.0).sample([self.gen, latent])
        c_X = Z_std
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X

        prior_pred_dist = Bernoulli(logits=logits)
        self.prior_pred = prior_pred_dist.sample()
        self.prior_pred_probs = tf.nn.sigmoid(logits)

        # manually input Z
        self.Z_input = tf.placeholder(np.float32, shape=(None, latent))
        c_X = self.Z_input
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        self.manual_prior_prob = tf.nn.sigmoid(logits)

        # cost function
        # Kullback–Leibler divergence
        kl = -tf.log(self.std) + 0.5 * (self.std**2 + self.means**2) - 0.5
        kl = tf.reduce_sum(kl, axis=1)
        # ELBO
        self.elbo = tf.reduce_sum(ent_weight * neg_cross_entropy - kl)

        self.optimizer = tf.train.RMSPropOptimizer(
            learning_rate=lr).minimize(-self.elbo)

        self.init = tf.global_variables_initializer()
        self.session = tf.Session()
        self.session.run(self.init)
Пример #6
0
class VariationalAutoencoder:
    def __init__(self, n_input, n_list, ent_weight=1, lr=0.001):
        '''
        number of input neurons and a list of the number of neurons in the encoder hidden layers
            The last number provided should be the number of latent features desired
                
        The decoder will have an inverted architecture
        
        Note: the actual number of neurons in the last layer of the encoder will be x2, for mean and std
            '''
        # input data
        self.X = tf.placeholder(tf.float32, shape=(None, n_input))

        # encoder
        self.encoder_layers = []
        # input layer
        previous = n_input
        # in case there is only one hidden layer (for loop will be skipped)
        current = n_input
        # current is the output of each layer (skip last because there is nothing after it)
        for current in n_list[:-1]:
            h = DenseLayer(previous, current)
            self.encoder_layers.append(h)
            previous = current
        # latent features number
        latent = n_list[-1]
        encoder_output = DenseLayer(current, latent * 2, activation='none')
        self.encoder_layers.append(encoder_output)

        # feed forward through encoder
        c_X = self.X
        for layer in self.encoder_layers:
            c_X = layer.feed_forward(c_X)
        # c_X now holds the output of the encoder
        # first half are the means
        self.means = c_X[:, :latent]
        # second half are the std; must be positive; +1e-6 for smoothing
        self.std = tf.nn.softplus(c_X[:, latent:]) + 1e-6

        # reparameterization trick
        normal = Normal(loc=self.means, scale=self.std)
        self.Z = normal.sample()

        # decoder
        self.decoder_layers = []
        previous = latent
        for current in reversed(n_list[:-1]):
            h = DenseLayer(previous, current)
            self.decoder_layers.append(h)
            previous = current
        # output is the reconstruction
        decoder_output = DenseLayer(previous, n_input, activation=lambda x: x)
        self.decoder_layers.append(decoder_output)

        #feed forward through decoder, using the sampled 'data'
        c_X = self.Z
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        # use logits for cost function below
        neg_cross_entropy = -tf.nn.sigmoid_cross_entropy_with_logits(
            labels=self.X, logits=logits)
        neg_cross_entropy = tf.reduce_sum(neg_cross_entropy, 1)

        # output
        self.y_prob = Bernoulli(logits=logits)

        # sample from output
        self.post_pred = self.y_prob.sample()
        self.post_pred_probs = tf.nn.sigmoid(logits)

        # generate 'de-novo' output
        self.gen = tf.Variable(0)
        Z_std = Normal(0.0, 1.0).sample([self.gen, latent])
        c_X = Z_std
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X

        prior_pred_dist = Bernoulli(logits=logits)
        self.prior_pred = prior_pred_dist.sample()
        self.prior_pred_probs = tf.nn.sigmoid(logits)

        # manually input Z
        self.Z_input = tf.placeholder(np.float32, shape=(None, latent))
        c_X = self.Z_input
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        self.manual_prior_prob = tf.nn.sigmoid(logits)

        # cost function
        # Kullback–Leibler divergence
        kl = -tf.log(self.std) + 0.5 * (self.std**2 + self.means**2) - 0.5
        kl = tf.reduce_sum(kl, axis=1)
        # ELBO
        self.elbo = tf.reduce_sum(ent_weight * neg_cross_entropy - kl)

        self.optimizer = tf.train.RMSPropOptimizer(
            learning_rate=lr).minimize(-self.elbo)

        self.init = tf.global_variables_initializer()
        self.session = tf.Session()
        self.session.run(self.init)

    def fit(self, X, epochs=10, batch=50):
        n_batches = len(X) // batch
        for epoch in range(epochs):
            print('Epoch:', epoch + 1)
            np.random.shuffle(X)
            cost = 0
            for b in range(n_batches):
                c_batch = X[b * batch:(b + 1) * batch]
                _, c, = self.session.run((self.optimizer, self.elbo),
                                         feed_dict={self.X: c_batch})
                # accumulate cost
                cost += c
            print('Cost:', cost)

    def predict(self, X, out='prob'):
        '''
        Pass data through encoder and decoder and retrieve reconstructed output
            by default the probabilities are returned, user can specify 'sample' or 'both'
        '''
        # correct shape if needed
        if (X.ndim == 1):
            X = X.reshape([1, -1])
        pred, prob = self.session.run((self.post_pred, self.post_pred_probs),
                                      feed_dict={self.X: X})
        if (out == 'prob'):
            return prob
        elif (out == 'sample'):
            return pred
        else:
            return pred, prob

    def generate(self, n=1, out='prob'):
        '''
        Generate output
            by default the probabilities are returned, user can specify 'sample' or 'both'
            User specifies the number of points requested 
        '''
        pred, prob = self.session.run((self.prior_pred, self.prior_pred_probs),
                                      feed_dict={self.gen: n})
        if (out == 'prob'):
            return prob
        elif (out == 'sample'):
            return pred
        else:
            return pred, prob

    def feed(self, Z):
        '''Generate output using provided latent-space input Z'''
        # correct shape if needed
        if (Z.ndim == 1):
            Z = Z.reshape([1, -1])
        return self.session.run(self.manual_prior_prob,
                                feed_dict={self.Z_input: Z})

    def close(self):
        self.session.close()
Пример #7
0
    def __init__(self,
                 image_shape=(128, 128, 3),
                 conv_param=(3, 16, True),
                 n_list=[256, 32]):
        # input data
        self.X = tf.placeholder(tf.float32, shape=(None, *image_shape))

        # encoder
        self.encoder_layers = []
        # convolution layer
        h = Conv2DLayer(image_shape[2], conv_param[0], conv_param[1],
                        conv_param[2])
        self.encoder_layers.append(h)
        # flatten layer
        self.encoder_layers.append(FlattenLayer())
        # calculate number of input neurons to the FC layer
        previous = image_shape[0] * image_shape[1] * conv_param[1]
        if conv_param[2]:
            previous = previous // 4
        # save for later
        flat = previous
        # current is the output of each layer (skip last because there is nothing after it)
        for current in n_list[:-1]:
            h = DenseLayer(previous, current)
            self.encoder_layers.append(h)
            previous = current
        # latent features number
        latent = n_list[-1]
        encoder_output = DenseLayer(current, latent * 2, activation='none')
        self.encoder_layers.append(encoder_output)

        # feed forward through encoder
        c_X = self.X
        for layer in self.encoder_layers:
            c_X = layer.feed_forward(c_X)
        # c_X now holds the output of the encoder
        # first half are the means
        self.means = c_X[:, :latent]
        # second half are the std; must be positive; +1e-6 for smoothing
        self.std = tf.nn.softplus(c_X[:, latent:]) + 1e-6

        # reparameterization trick
        normal = Normal(loc=self.means, scale=self.std)
        self.Z = normal.sample()

        # decoder
        self.decoder_layers = []
        previous = latent
        for current in reversed(n_list[:-1]):
            h = DenseLayer(previous, current)
            self.decoder_layers.append(h)
            previous = current
        decoder_output = DenseLayer(previous, flat, activation=lambda x: x)
        self.decoder_layers.append(decoder_output)
        #feed forward through decoder, using the sampled 'data'
        c_X = self.Z
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        # reshape
        if (conv_param[2]):
            shape = [
                -1, image_shape[0] // 2, image_shape[0] // 2, conv_param[1]
            ]
        else:
            shape = [-1, image_shape[0], image_shape[0], conv_param[1]]
        c_X = tf.reshape(c_X, shape)
        # convolution transpose
        self.trans_k = tf.Variable(
            tf.truncated_normal(
                [conv_param[0], conv_param[0], image_shape[2], conv_param[1]],
                stddev=0.1))
        if (conv_param[2]):
            strides = (1, 2, 2, 1)
        else:
            strides = (1, 1, 1, 1)
        c_X = tf.nn.conv2d_transpose(c_X,
                                     self.trans_k,
                                     strides=strides,
                                     padding='SAME',
                                     output_shape=[50, *image_shape])

        # output logit
        logits = c_X
        # use logits for cost function below
        neg_cross_entropy = -tf.nn.sigmoid_cross_entropy_with_logits(
            labels=self.X, logits=logits)
        neg_cross_entropy = tf.reduce_sum(neg_cross_entropy, 1)

        # output
        self.y_prob = Bernoulli(logits=logits)

        # sample from output
        self.post_pred = self.y_prob.sample()
        self.post_pred_probs = tf.nn.sigmoid(logits)

        # generate 'de-novo' output
        self.gen = tf.Variable(0)
        Z_std = Normal(0.0, 1.0).sample([self.gen, latent])
        c_X = Z_std
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        c_X = tf.reshape(c_X, shape)
        c_X = tf.nn.conv2d_transpose(c_X,
                                     self.trans_k,
                                     strides=strides,
                                     padding='SAME',
                                     output_shape=[50, *image_shape])
        logits = c_X

        prior_pred_dist = Bernoulli(logits=logits)
        self.prior_pred = prior_pred_dist.sample()
        self.prior_pred_probs = tf.nn.sigmoid(logits)

        # manually input Z
        self.Z_input = tf.placeholder(np.float32, shape=(None, latent))
        c_X = self.Z_input
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        self.manual_prior_prob = tf.nn.sigmoid(logits)

        # cost function
        # Kullback–Leibler divergence
        kl = -tf.log(self.std) + 0.5 * (self.std**2 + self.means**2) - 0.5
        kl = tf.reduce_sum(kl, axis=1)
        # ELBO
        self.elbo = tf.reduce_sum(neg_cross_entropy - kl)

        self.optimizer = tf.train.RMSPropOptimizer(
            learning_rate=0.001).minimize(-self.elbo)

        self.init = tf.global_variables_initializer()
        self.session = tf.Session()
        self.session.run(self.init)
Пример #8
0
class ConvVAE:
    def __init__(self,
                 image_shape=(128, 128, 3),
                 conv_param=(3, 16, True),
                 n_list=[256, 32]):
        # input data
        self.X = tf.placeholder(tf.float32, shape=(None, *image_shape))

        # encoder
        self.encoder_layers = []
        # convolution layer
        h = Conv2DLayer(image_shape[2], conv_param[0], conv_param[1],
                        conv_param[2])
        self.encoder_layers.append(h)
        # flatten layer
        self.encoder_layers.append(FlattenLayer())
        # calculate number of input neurons to the FC layer
        previous = image_shape[0] * image_shape[1] * conv_param[1]
        if conv_param[2]:
            previous = previous // 4
        # save for later
        flat = previous
        # current is the output of each layer (skip last because there is nothing after it)
        for current in n_list[:-1]:
            h = DenseLayer(previous, current)
            self.encoder_layers.append(h)
            previous = current
        # latent features number
        latent = n_list[-1]
        encoder_output = DenseLayer(current, latent * 2, activation='none')
        self.encoder_layers.append(encoder_output)

        # feed forward through encoder
        c_X = self.X
        for layer in self.encoder_layers:
            c_X = layer.feed_forward(c_X)
        # c_X now holds the output of the encoder
        # first half are the means
        self.means = c_X[:, :latent]
        # second half are the std; must be positive; +1e-6 for smoothing
        self.std = tf.nn.softplus(c_X[:, latent:]) + 1e-6

        # reparameterization trick
        normal = Normal(loc=self.means, scale=self.std)
        self.Z = normal.sample()

        # decoder
        self.decoder_layers = []
        previous = latent
        for current in reversed(n_list[:-1]):
            h = DenseLayer(previous, current)
            self.decoder_layers.append(h)
            previous = current
        decoder_output = DenseLayer(previous, flat, activation=lambda x: x)
        self.decoder_layers.append(decoder_output)
        #feed forward through decoder, using the sampled 'data'
        c_X = self.Z
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        # reshape
        if (conv_param[2]):
            shape = [
                -1, image_shape[0] // 2, image_shape[0] // 2, conv_param[1]
            ]
        else:
            shape = [-1, image_shape[0], image_shape[0], conv_param[1]]
        c_X = tf.reshape(c_X, shape)
        # convolution transpose
        self.trans_k = tf.Variable(
            tf.truncated_normal(
                [conv_param[0], conv_param[0], image_shape[2], conv_param[1]],
                stddev=0.1))
        if (conv_param[2]):
            strides = (1, 2, 2, 1)
        else:
            strides = (1, 1, 1, 1)
        c_X = tf.nn.conv2d_transpose(c_X,
                                     self.trans_k,
                                     strides=strides,
                                     padding='SAME',
                                     output_shape=[50, *image_shape])

        # output logit
        logits = c_X
        # use logits for cost function below
        neg_cross_entropy = -tf.nn.sigmoid_cross_entropy_with_logits(
            labels=self.X, logits=logits)
        neg_cross_entropy = tf.reduce_sum(neg_cross_entropy, 1)

        # output
        self.y_prob = Bernoulli(logits=logits)

        # sample from output
        self.post_pred = self.y_prob.sample()
        self.post_pred_probs = tf.nn.sigmoid(logits)

        # generate 'de-novo' output
        self.gen = tf.Variable(0)
        Z_std = Normal(0.0, 1.0).sample([self.gen, latent])
        c_X = Z_std
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        c_X = tf.reshape(c_X, shape)
        c_X = tf.nn.conv2d_transpose(c_X,
                                     self.trans_k,
                                     strides=strides,
                                     padding='SAME',
                                     output_shape=[50, *image_shape])
        logits = c_X

        prior_pred_dist = Bernoulli(logits=logits)
        self.prior_pred = prior_pred_dist.sample()
        self.prior_pred_probs = tf.nn.sigmoid(logits)

        # manually input Z
        self.Z_input = tf.placeholder(np.float32, shape=(None, latent))
        c_X = self.Z_input
        for layer in self.decoder_layers:
            c_X = layer.feed_forward(c_X)
        logits = c_X
        self.manual_prior_prob = tf.nn.sigmoid(logits)

        # cost function
        # Kullback–Leibler divergence
        kl = -tf.log(self.std) + 0.5 * (self.std**2 + self.means**2) - 0.5
        kl = tf.reduce_sum(kl, axis=1)
        # ELBO
        self.elbo = tf.reduce_sum(neg_cross_entropy - kl)

        self.optimizer = tf.train.RMSPropOptimizer(
            learning_rate=0.001).minimize(-self.elbo)

        self.init = tf.global_variables_initializer()
        self.session = tf.Session()
        self.session.run(self.init)

    def fit(self, X, epochs=10, batch=50):
        n_batches = len(X) // batch
        for epoch in range(epochs):
            print('Epoch:', epoch + 1)
            np.random.shuffle(X)
            cost = 0
            for b in range(n_batches):
                c_batch = X[b * batch:(b + 1) * batch]
                _, c, = self.session.run((self.optimizer, self.elbo),
                                         feed_dict={self.X: c_batch})
                # accumulate cost
                cost += c
            print('Cost:', cost)

    def predict(self, X, out='prob'):
        '''
        Pass data through encoder and decoder and retrieve reconstructed output
            by default the probabilities are returned, user can specify 'sample' or 'both'
        '''
        # correct shape if needed
        if (X.ndim == 1):
            X = X.reshape([1, -1])
        pred, prob = self.session.run((self.post_pred, self.post_pred_probs),
                                      feed_dict={self.X: X})
        if (out == 'prob'):
            return prob
        elif (out == 'sample'):
            return pred
        else:
            return pred, prob

    def generate(self, n=1, out='prob'):
        '''
        Generate output
            by default the probabilities are returned, user can specify 'sample' or 'both'
            User specifies the number of points requested 
        '''
        pred, prob = self.session.run((self.prior_pred, self.prior_pred_probs),
                                      feed_dict={self.gen: n})
        if (out == 'prob'):
            return prob
        elif (out == 'sample'):
            return pred
        else:
            return pred, prob

    def feed(self, Z):
        '''Generate output using provided latent-space input Z'''
        # correct shape if needed
        if (Z.ndim == 1):
            Z = Z.reshape([1, -1])
        return self.session.run(self.manual_prior_prob,
                                feed_dict={self.Z_input: Z})

    def close(self):
        self.session.close()