コード例 #1
0
    def __init__(self,
                 preds,
                 labels,
                 model,
                 num_nodes,
                 num_features,
                 pos_weight_adj,
                 norm_adj,
                 global_step,
                 ridge=0.):

        preds_adj, preds_express = preds
        labels_adj, labels_express = labels

        self.cost_adj = norm_adj * tf.reduce_mean(
            tf.nn.weighted_cross_entropy_with_logits(
                logits=preds_adj,
                targets=labels_adj,
                pos_weight=pos_weight_adj))
        # express-loss: zinb-loss
        zinb = ZINB(model.z_express_pi,
                    theta=model.z_express_disp,
                    ridge_lambda=ridge)
        self.cost_express = zinb.loss(
            tf.reshape(labels_express, [num_features, num_nodes]),
            tf.reshape(preds_express, [num_features, num_nodes]))
        self.log_lik = self.cost_adj + self.cost_express

        # KL divergence
        self.kl_adj = (0.5 / num_nodes) * tf.reduce_mean(tf.reduce_sum(1 + 2 * model.z_adj_log_std - tf.square(model.z_adj_mean) - \
                                                         tf.square(tf.exp(model.z_adj_log_std)), 1))
        self.kl_express = (
            FLAGS.weight_decay * 0.5 / num_features) * tf.reduce_mean(
                tf.square(
                    tf.subtract(
                        tf.reshape(preds_express, [num_features, num_nodes]),
                        tf.reshape(labels_express,
                                   [num_features, num_nodes]))))
        self.kl = self.kl_adj - self.kl_express

        self.cost = self.log_lik - self.kl

        # self.optimizer = tf.train.AdadeltaOptimizer(learning_rate=FLAGS.learning_rate)  # Adam Optimizer
        # self.opt_op = self.optimizer.minimize(self.cost)
        # self.grads_vars = self.optimizer.compute_gradients(self.cost)

        initial_learning_rate = FLAGS.learning_rate
        self.learning_rate = tf.train.exponential_decay(
            initial_learning_rate,
            global_step=global_step,
            decay_steps=50,
            decay_rate=0.9,
            staircase=False)
        self.optimizer = tf.train.AdamOptimizer(
            learning_rate=self.learning_rate)
        self.opt_op = self.optimizer.minimize(self.cost)
        self.grads_vars = self.optimizer.compute_gradients(self.cost)
コード例 #2
0
    def build_output(self):
        pi = Dense(self.output_size,
                   activation='sigmoid',
                   kernel_initializer=self.init,
                   kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
                   name='pi')(self.decoder_output)

        disp = Dense(self.output_size,
                     activation=DispAct,
                     kernel_initializer=self.init,
                     kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
                     name='dispersion')(self.decoder_output)

        mean = Dense(self.output_size,
                     activation=MeanAct,
                     kernel_initializer=self.init,
                     kernel_regularizer=l1_l2(self.l1_coef, self.l2_coef),
                     name='mean')(self.decoder_output)
        output = ColwiseMultLayer([mean, self.sf_layer])
        output = SliceLayer(0, name='slice')([output, disp, pi])

        zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
        self.loss = zinb.loss
        self.extra_models['pi'] = Model(inputs=self.input_layer, outputs=pi)
        self.extra_models['dispersion'] = Model(inputs=self.input_layer,
                                                outputs=disp)
        self.extra_models['mean_norm'] = Model(inputs=self.input_layer,
                                               outputs=mean)
        self.extra_models['decoded'] = Model(inputs=self.input_layer,
                                             outputs=self.decoder_output)

        self.model = Model(inputs=[self.input_layer, self.sf_layer],
                           outputs=output)
        self.encoder = self.get_encoder(True)
コード例 #3
0
ファイル: ae_v1.4.py プロジェクト: pansword/cs230rseq
def model_1_and_loss():
    input_layer = Input(shape=(num_input, ))
    layer_1 = Dense(
        num_hidden_1,
        activation='relu',
        kernel_initializer=initializer,
        bias_initializer=bias_initializer,
        kernel_regularizer=regularizers.l2(l2_parameter))(input_layer)
    layer_1 = Dropout(rate=dropout_prob)(layer_1)
    bottleneck = Dense(num_bottleneck,
                       activation='relu',
                       kernel_initializer=initializer,
                       bias_initializer=bias_initializer)(layer_1)
    layer__1 = Dense(
        num_hidden_1,
        activation='relu',
        kernel_initializer=initializer,
        bias_initializer=bias_initializer,
        kernel_regularizer=regularizers.l2(l2_parameter))(bottleneck)
    layer__1 = Dropout(rate=dropout_prob)(layer__1)
    #output = Dense(num_input)(layer__1)
    pi = Dense(num_input, kernel_initializer=initializer,
               activation='sigmoid')(layer__1)
    mean = Dense(num_input, kernel_initializer=initializer,
                 activation=MeanAct)(layer__1)
    disp = Dense(num_input, kernel_initializer=initializer,
                 activation=DispAct)(layer__1)

    zinb = ZINB(pi, theta=disp)
    #zinb_loss = zinb_model(input_layer, mean, disp, pi)

    ae_model = Model(input_layer, outputs=mean)
    encoder_model = Model(input_layer, outputs=bottleneck)

    return ae_model, zinb, encoder_model
コード例 #4
0
ファイル: ae_v1.4.py プロジェクト: pansword/cs230rseq
def model_and_loss():
    input_layer = Input(shape=(num_input, ))
    layer_1 = Dense(num_hidden_1,
                    activation='relu',
                    kernel_initializer=initializer,
                    bias_initializer=bias_initializer)(input_layer)
    layer_2 = Dense(num_hidden_2,
                    activation='relu',
                    kernel_initializer=initializer,
                    bias_initializer=bias_initializer)(layer_1)
    bottleneck = Dense(num_bottleneck,
                       kernel_initializer=initializer,
                       bias_initializer=bias_initializer)(layer_2)
    layer__2 = Dense(num_hidden_2,
                     activation='relu',
                     kernel_initializer=initializer,
                     bias_initializer=bias_initializer)(bottleneck)
    layer__1 = Dense(num_hidden_1,
                     activation='relu',
                     kernel_initializer=initializer,
                     bias_initializer=bias_initializer)(layer__2)
    #output = Dense(num_input)(layer__1)
    pi = Dense(num_input)(layer__1)
    mean = Dense(num_input, activation=MeanAct)(layer__1)
    disp = Dense(num_input, activation=DispAct)(layer__1)

    zinb = ZINB(pi, theta=disp)

    ae_model = Model(input_layer, outputs=mean)
    encoder_model = Model(input_layer, outputs=bottleneck)

    return ae_model, zinb, encoder_model
コード例 #5
0
    def __init__(self,
                 dims,
                 n_clusters=10,
                 noise_sd=3,
                 alpha=1.0,
                 ridge=0,
                 debug=False):

        super(DCAC, self).__init__()

        self.dims = dims
        self.input_dim = dims[0]
        self.n_stacks = len(self.dims) - 1

        self.n_clusters = n_clusters
        self.noise_sd = noise_sd
        self.alpha = alpha
        self.act = 'relu'
        self.ridge = ridge
        self.debug = debug
        self.autoencoder = autoencoder(self.dims,
                                       noise_sd=self.noise_sd,
                                       act=self.act)

        # prepare clean encode model
        ae_layers = [l for l in self.autoencoder.layers]
        #        print(ae_layers)
        hidden = self.autoencoder.input[0]
        for i in range(1, len(ae_layers)):
            if "noise" in ae_layers[i].name:
                next
            elif "dropout" in ae_layers[i].name:
                next
            else:
                hidden = ae_layers[i](hidden)
            if "encoder_hidden" in ae_layers[
                    i].name:  # only get encoder layers
                break


#        hidden = self.autoencoder.get_layer(name='encoder_hidden').output
        self.encoder = Model(inputs=self.autoencoder.input[0], outputs=hidden)

        pi = self.autoencoder.get_layer(name='pi').output
        disp = self.autoencoder.get_layer(name='dispersion').output
        mean = self.autoencoder.get_layer(name='mean').output
        zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
        self.loss = zinb.loss

        clustering_layer = ClusteringLayer(self.n_clusters,
                                           alpha=self.alpha,
                                           name='clustering')(hidden)
        self.model = Model(inputs=self.autoencoder.input[0],
                           outputs=clustering_layer)

        self.pretrained = False
        self.centers = []
        self.y_pred = []
コード例 #6
0
    def __init__(self,
                 dims,
                 n_clusters=10,
                 noise_sd=0,
                 alpha=1.0,
                 ridge=0,
                 debug=False,
                 temp=500.0):

        super(SCDeepCluster, self).__init__()

        self.dims = dims
        self.input_dim = dims[0]
        self.n_stacks = len(self.dims) - 1

        self.n_clusters = n_clusters
        self.noise_sd = noise_sd
        self.alpha = alpha
        self.act = 'relu'
        self.ridge = ridge
        self.debug = debug
        self.autoencoder = autoencoder(self.dims,
                                       self.n_clusters,
                                       noise_sd=self.noise_sd,
                                       act=self.act,
                                       temp=temp)

        # prepare clean encode model without Gaussian noise
        ae_layers = [l for l in self.autoencoder.layers]
        hidden = self.autoencoder.input[0]
        for i in range(1, len(ae_layers)):
            if "noise" in ae_layers[i].name:
                next
            elif "dropout" in ae_layers[i].name:
                next
            else:
                hidden = ae_layers[i](hidden)
            #if "encoder_hidden" in ae_layers[i].name:  # only get encoder layers
            if "gumbel_layer" in ae_layers[i].name:  # only get encoder layers
                break
        self.encoder = Model(inputs=self.autoencoder.input, outputs=hidden)

        pi = self.autoencoder.get_layer(name='pi').output
        disp = self.autoencoder.get_layer(name='dispersion').output
        mean = self.autoencoder.get_layer(name='mean').output
        zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
        self.loss = zinb.loss

        clustering_layer = ClusteringLayer(self.n_clusters,
                                           alpha=self.alpha,
                                           name='clustering')(hidden)
        self.model = Model(
            inputs=[self.autoencoder.input[0], self.autoencoder.input[1]],
            outputs=[clustering_layer, self.autoencoder.output])

        self.pretrained = False
        self.centers = []
        self.y_pred = []
コード例 #7
0
ファイル: zinb_AE.py プロジェクト: jhu99/scanalyse
    def build(self, input_size):
        inputs = Input(shape=(input_size, ), name="counts")
        sf = Input(shape=(1, ), name='size_factors')
        Relu = 'relu'

        # Construct network layers
        x = Dense(128,
                  kernel_regularizer=l1_l2(l1=0., l2=0.),
                  name="encoder_layer_1")(inputs)
        x = BatchNormalization(center=True, scale=False)(x)
        x = Activation(Relu, name="activation_el_1")(x)
        x = Dense(64,
                  kernel_regularizer=l1_l2(l1=0., l2=0.),
                  name="encoder_layer_2")(x)
        x = BatchNormalization(center=True, scale=False)(x)
        x = Activation(Relu, name="activation_el_2")(x)
        x = Dense(32,
                  kernel_regularizer=l1_l2(l1=0., l2=0.),
                  name="center_layer")(x)
        x = BatchNormalization(center=True, scale=False)(x)
        c = Activation(Relu, name="activation_cl")(x)
        x = Dense(64,
                  kernel_regularizer=l1_l2(l1=0., l2=0.),
                  name="decoder_layer_1")(c)
        x = BatchNormalization(center=True, scale=False)(x)
        x = Activation(Relu, name="activation_dl_1")(x)
        x = Dense(128,
                  kernel_regularizer=l1_l2(l1=0., l2=0.),
                  name="decoder_layer_2")(x)
        x = BatchNormalization(center=True, scale=False)(x)
        x = Activation(Relu, name="activation_dl_2")(x)
        pi = Dense(input_size,
                   kernel_regularizer=l1_l2(l1=0., l2=0.),
                   activation='sigmoid',
                   name="pi_layer")(x)
        dp = Dense(input_size,
                   kernel_regularizer=l1_l2(l1=0., l2=0.),
                   activation='dispact',
                   name="dispersion_layer")(x)
        mu = Dense(input_size,
                   kernel_regularizer=l1_l2(l1=0., l2=0.),
                   activation='meanact',
                   name="mean_layer")(x)
        ColwiseMultLayer = Lambda(lambda l: l[0] * tf.reshape(l[1], (-1, 1)))
        outputs = ColwiseMultLayer([mu, sf])
        outputs = SliceLayer(0, name='slice')([outputs, dp])

        # Define loss function and callbacks strategies
        zinb = ZINB(pi, theta=dp, ridge_lambda=0, debug=False)
        self.loss = zinb.loss

        # Define models
        self.model = Model(inputs=[inputs, sf], outputs=outputs)
コード例 #8
0
ファイル: ZINBAE0.py プロジェクト: ttgump/ZINBAE
    def __init__(self, dims, noise_sd=0, ridge=0, debug=False, eps=1e-20):
        self.dims = dims
        self.input_dim = dims[0]
        self.n_stacks = len(self.dims) - 1
        self.noise_sd = noise_sd
        self.act = 'relu'
        self.ridge = ridge
        self.debug = debug
        self.eps = eps

        self.autoencoder = autoencoder(self.dims,
                                       noise_sd=self.noise_sd,
                                       act=self.act)

        self.pi = pi = self.autoencoder.get_layer(name='pi').output
        self.disp = disp = self.autoencoder.get_layer(name='dispersion').output
        zinb = ZINB(pi, theta=disp, ridge_lambda=self.ridge, debug=self.debug)
        self.zinb_loss = zinb.loss

        self.model = Model(
            inputs=[self.autoencoder.input[0], self.autoencoder.input[1]],
            outputs=self.autoencoder.output)
コード例 #9
0
    x_sd = adata.X.std(0)
    x_sd_median = np.median(x_sd)
    print("median of gene sd: %.5f" % x_sd_median)

    if args.update_interval == 0:  # one epoch
        args.update_interval = int(adata.X.shape[0] / args.batch_size)
    print(args)

    # Define scDeepCluster model
    t0 = time()

    #l_pi = vae.get_layer(name='pi').output
    #l_disp = vae.get_layer(name='dispersion').output
    #l_mean = vae.get_layer(name='mean').output
    zinb = ZINB(pi, theta=disp, ridge_lambda=ridge, debug=debug)
    #reconstruction_loss = zinb.loss(y_true= adata.raw.X, y_pred= output)# tf.get_variable(output, (73909, 17925)))
    #X = tf.Variable([0.0])

    #place = tf.placeholder(tf.float32, shape=(3000000, 300))
    #set_x = X.assign(place)
    """reconstruction_loss = binary_crossentropy(y_true= adata.raw.X, y_pred= outputs)# tf.get_variable(output, (17925, 73909)))

    reconstruction_loss *= dims[0]
    kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
    kl_loss = K.sum(kl_loss, axis=-1)
    kl_loss *= -0.5
    vae_loss = K.mean(reconstruction_loss + kl_loss)
    #vae_loss = K.mean(kl_loss)"""

    print('...Pretraining autoencoder...')
コード例 #10
0
ファイル: model.py プロジェクト: adespi/sc_ae_clustering
def create_model(model,
                 dims,
                 act='relu',
                 init='glorot_uniform',
                 ridge=0,
                 debug=False,
                 **kwargs):
    #n_clusters=args.n_clusters
    ##alpha=1.0,
    assert model in ["ae", "vae", "iaf"]
    #if model == "ae":
    noise_sd = kwargs.get('noise_sd', 2.5)
    if model == "iaf":
        num_trans = kwargs.get('num_trans', 6)

    n_stacks = len(dims) - 1

    # input
    counts_input = Input(shape=(dims[0], ), name='counts')
    h = counts_input
    #if model == "ae":
    h = GaussianNoise(noise_sd, name='input_noise')(h)

    # internal layers in encoder
    for i in range(n_stacks - 1):
        h = Dense(dims[i + 1], kernel_initializer=init,
                  name='encoder_%d' % i)(h)
        if model == "ae" or model == "vae":
            h = GaussianNoise(noise_sd,
                              name='noise_%d' % i)(h)  # add Gaussian noise
        h = Activation(act)(h)

    # hidden layer
    if model == "ae":
        h = Dense(dims[-1], kernel_initializer=init, name='encoder_hidden')(
            h)  # hidden layer, features are extracted from here
        latent_layer = h
    elif model == "vae":
        z_mean = Dense(dims[-1], name='z_mean')(h)
        z_log_var = Dense(dims[-1], name='z_log_var')(h)
        z = Lambda(sampling, output_shape=(dims[-1], ),
                   name='z')([z_mean, z_log_var])
        h = z
        latent_layer = z_mean
    else:
        z, mu, sig, kl = build_iaf_layer(h,
                                         _name='IAF',
                                         num_trans=num_trans,
                                         latent_dim=dims[-1])
        z = z[-1]
        mu = mu[-1]
        h = z
        latent_layer = mu

    sf_layer = Input(shape=(1, ), name='size_factors')

    # internal layers in decoder
    for i in range(n_stacks - 1, 0, -1):
        h = Dense(dims[i],
                  activation=act,
                  kernel_initializer=init,
                  name='decoder_%d' % i)(h)

    # output

    pi = Dense(dims[0],
               activation='sigmoid',
               kernel_initializer=init,
               name='pi')(h)

    disp = Dense(dims[0],
                 activation=DispAct,
                 kernel_initializer=init,
                 name='dispersion')(h)

    mean = Dense(dims[0],
                 activation=MeanAct,
                 kernel_initializer=init,
                 name='mean')(h)

    adjusted_mean = ColWiseMultLayer(name='output')([mean, sf_layer])
    outputs = SliceLayer(0, name='slice')([adjusted_mean, disp, pi])

    model_network = Model([counts_input, sf_layer],
                          outputs,
                          name=model + '_mlp')
    encoder_network = Model(counts_input, latent_layer, name='encoder')
    imputation_no_zi_network = Model([counts_input, sf_layer],
                                     adjusted_mean,
                                     name=model + '_mlp')

    # loss
    zinb = ZINB(pi, theta=disp, ridge_lambda=ridge, debug=debug)
    if model == "ae":

        def loss(y_true, y_pred):
            return zinb.loss(y_true=y_true, y_pred=y_pred)
    elif model == "vae":

        def loss(y_true, y_pred):
            reconstruction_loss = zinb.loss(
                y_true=y_true,
                y_pred=y_pred)  # tf.get_variable(output, (17925, 73909)))
            reconstruction_loss *= dims[0]
            kl_loss = 1 + z_log_var - K.square(z_mean) - K.exp(z_log_var)
            kl_loss = K.sum(kl_loss, axis=-1)
            #kl_loss = K.mean(kl_loss, axis=-1)
            kl_loss *= -0.5
            vae_loss = K.mean(reconstruction_loss + kl_loss)
            return vae_loss
            #return reconstruction_loss
    else:

        def loss(y_true, y_pred):
            reconstruction_loss = zinb.loss(
                y_true=y_true,
                y_pred=y_pred)  # tf.get_variable(output, (17925, 73909)))
            reconstruction_loss *= dims[0]
            vae_loss = K.mean(reconstruction_loss + kl)
            return vae_loss
            #return reconstruction_loss

    return model_network, encoder_network, imputation_no_zi_network, loss, counts_input, latent_layer