Beispiel #1
0
def s(data, dp, l_out, l1 ,l2, dp_type):
	# data = tf.nn.dropout(data, dp)
	# data = tf.expand_dims(data, axis=2)
	# data = tf.contrib.layers.batch_norm(data,  activation_fn=tf.nn.relu)
	# data = tf.contrib.layers.conv2d(data, 1, 3, 1, 'SAME', activation_fn=None)
	# data = tf.contrib.layers.max_pool2d(data, 3, 2, 'SAME')
	if(dp_type == True):
		# l1 = tf.contrib.layers.batch_norm(data,  activation_fn=tf.nn.relu, is_training=bn)
		layer1 = tf.contrib.layers.fully_connected(data, num_outputs=l_out, activation_fn=se.selu, weights_regularizer=tf.contrib.layers.l1_l2_regularizer(scale_l1=l1, scale_l2=l2), weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN'))
		# l1 = tf.concat([data, l1], axis=1)
		layer1 = se.dropout_selu(layer1, rate=dp, training=(dp < 1.))
		# l2 = tf.contrib.layers.batch_norm(l1,  activation_fn=tf.nn.relu, is_training=bn)
		layer2 = tf.contrib.layers.fully_connected(layer1, num_outputs=l_out, activation_fn=se.selu, weights_regularizer=tf.contrib.layers.l1_l2_regularizer(scale_l1=l1, scale_l2=l2), weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN'))
		# l2 = tf.concat([l1, l2], axis=1)
		layer2 = se.dropout_selu(layer2, rate=dp, training=(dp < 1.))
		# l3 = tf.contrib.layers.batch_norm(l2,  activation_fn=tf.nn.relu, is_training=bn)
		layer3 = tf.contrib.layers.fully_connected(layer2, num_outputs=l_out, activation_fn=se.selu, weights_regularizer=tf.contrib.layers.l1_l2_regularizer(scale_l1=l1, scale_l2=l2), weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN'))
		
	else:
		# l1 = tf.contrib.layers.batch_norm(data,  activation_fn=tf.nn.relu, is_training=bn)
		layer1 = tf.contrib.layers.fully_connected(data, num_outputs=l_out, activation_fn=se.selu, weights_regularizer=tf.contrib.layers.l1_l2_regularizer(scale_l1=l1, scale_l2=l2), weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN'))
		# l1 = tf.concat([data, l1], axis=1)
		# l2 = tf.contrib.layers.batch_norm(l1,  activation_fn=tf.nn.relu, is_training=bn)
		layer2 = tf.contrib.layers.fully_connected(tf.nn.dropout(layer1, dp), num_outputs=l_out, activation_fn=se.selu, weights_regularizer=tf.contrib.layers.l1_l2_regularizer(scale_l1=l1, scale_l2=l2), weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN'))
		# l2 = tf.concat([l1, l2], axis=1)
		# l3 = tf.contrib.layers.batch_norm(l2,  activation_fn=tf.nn.relu, is_training=bn)
		layer3 = tf.contrib.layers.fully_connected(tf.nn.dropout(layer2, dp), num_outputs=l_out, activation_fn=se.selu, weights_regularizer=tf.contrib.layers.l1_l2_regularizer(scale_l1=l1, scale_l2=l2), weights_initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, mode='FAN_IN'))

	return layer3	
Beispiel #2
0
def mlp_w_selu(x, weights, biases, dropout_rate, is_training):
    layer1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])
    layer1 = selu(layer1)
    layer1 = dropout_selu(layer1, dropout_rate, training=is_training)

    layer2 = tf.add(tf.matmul(layer1, weights['h2']), biases['b2'])
    layer2 = selu(layer2)
    layer2 = dropout_selu(layer2, dropout_rate, training=is_training)

    out_layer = tf.matmul(layer2, weights['out']) + biases['out']   # with linear activation
    return out_layer
Beispiel #3
0
def selu_adversary(x,
                   z_pivot,
                   n_layers,
                   hidden_nodes,
                   keep_prob,
                   name,
                   reuse=False,
                   training=True,
                   actv=selu.selu):
    SELU_initializer = tf.contrib.layers.variance_scaling_initializer(
        factor=1.0, mode='FAN_IN')
    xavier = tf.contrib.layers.xavier_initializer(uniform=True,
                                                  seed=None,
                                                  dtype=tf.float32)

    with tf.variable_scope('adversary', reuse=reuse):
        l0 = tf.layers.dense(x,
                             units=hidden_nodes[0],
                             activation=actv,
                             kernel_initializer=SELU_initializer)
        d0 = selu.dropout_selu(l0, rate=1 - keep_prob, training=training)

        l1 = tf.layers.dense(d0,
                             units=hidden_nodes[1],
                             activation=actv,
                             kernel_initializer=SELU_initializer)
        d1 = selu.dropout_selu(l1, rate=1 - keep_prob, training=training)

        fc = tf.layers.dense(d1,
                             units=96,
                             activation=tf.nn.tanh,
                             kernel_initializer=xavier)
        fc_logits, fc_mu, fc_sigma = tf.split(fc, 3, axis=1)
        logits = tf.layers.dense(fc_logits,
                                 units=config.n_gaussians,
                                 activation=tf.identity,
                                 name='mixing_fractions')
        centers = tf.layers.dense(fc_mu,
                                  units=config.n_gaussians,
                                  activation=tf.identity,
                                  name='means')
        variances = tf.layers.dense(fc_sigma,
                                    units=config.n_gaussians,
                                    activation=custom_elu,
                                    name='variances')
        mixing_coeffs = tf.nn.softmax(logits)

        exponent = tf.log(mixing_coeffs) - 1 / 2 * tf.log(2 * np.pi) - tf.log(
            variances) - tf.square(centers - tf.expand_dims(z_pivot, 1)) / (
                2 * tf.square(variances))

    return log_sum_exp_trick(exponent)
Beispiel #4
0
def dense_SELU(x,
               n_layers,
               hidden_layer_nodes,
               keep_prob,
               reuse=False,
               training=True,
               actv=selu.selu):
    SELU_initializer = tf.contrib.layers.variance_scaling_initializer(
        factor=1.0, mode='FAN_IN')

    with tf.variable_scope('seluNet', reuse=reuse):
        l0 = tf.layers.dense(x,
                             units=hidden_layer_nodes[0],
                             activation=actv,
                             kernel_initializer=SELU_initializer)
        d0 = selu.dropout_selu(l0, rate=1 - keep_prob, training=training)

        l1 = tf.layers.dense(d0,
                             units=hidden_layer_nodes[1],
                             activation=actv,
                             kernel_initializer=SELU_initializer)
        d1 = selu.dropout_selu(l1, rate=1 - keep_prob, training=training)

        l2 = tf.layers.dense(d1,
                             units=hidden_layer_nodes[2],
                             activation=actv,
                             kernel_initializer=SELU_initializer)
        d2 = selu.dropout_selu(l2, rate=1 - keep_prob, training=training)

        l3 = tf.layers.dense(d2,
                             units=hidden_layer_nodes[3],
                             activation=actv,
                             kernel_initializer=SELU_initializer)
        d3 = selu.dropout_selu(l3, rate=1 - keep_prob, training=training)

        l4 = tf.layers.dense(d3,
                             units=hidden_layer_nodes[4],
                             activation=actv,
                             kernel_initializer=SELU_initializer)
        d4 = selu.dropout_selu(l4, rate=1 - keep_prob, training=training)

        # Readout layer
        readout = tf.layers.dense(d4,
                                  units=config.n_classes,
                                  kernel_initializer=SELU_initializer)

    return readout
Beispiel #5
0
 def __call__(self, x, apply_softmax=True):
     del self.activations[:]
     xp = self.xp
     out = x
     for idx in xrange(self.num_layers):
         layer = getattr(self, "layer_%s" % idx)
         out = selu(layer(out))
         if chainer.config.train == False:
             self.activations.append(xp.copy(out.data))
         out = dropout_selu(out, ratio=0.1)
     out = self.logits(out)
     if apply_softmax:
         out = F.softmax(out)
     return out
    def model(self, points_tensor, input_tensor, is_training=True):
        """
        Arguments:
            points_tensor: [b, n, 3] point cloud
            input_tensor: [b, n, channels] extra data defined for each point
        """
        b = points_tensor.get_shape()[0].value
        n = points_tensor.get_shape()[1].value
        in_channels = input_tensor.get_shape()[2].value

        voxel_size = tf.constant([0.1])
        stride = tf.constant([1, 1, 1])
        filter1_tensor = tf.get_variable("filter1", [3, 3, 3, in_channels, 9])
        conv1 = conv3p(points_tensor, input_tensor, filter1_tensor, stride,
                       voxel_size)
        relu1 = selu.selu(conv1)

        stride = tf.constant([2, 2, 2])
        filter2_tensor = tf.get_variable("filter2", [3, 3, 3, 9, 9])
        conv2 = conv3p(points_tensor, relu1, filter2_tensor, stride,
                       voxel_size)
        relu2 = selu.selu(conv2)

        stride = tf.constant([3, 3, 3])
        filter3_tensor = tf.get_variable("filter3", [3, 3, 3, 9, 9])
        conv3 = conv3p(points_tensor, relu2, filter3_tensor, stride,
                       voxel_size)
        relu3 = selu.selu(conv3)

        stride = tf.constant([4, 4, 4])
        filter4_tensor = tf.get_variable("filter4", [3, 3, 3, 9, 9])
        conv4 = conv3p(points_tensor, relu3, filter4_tensor, stride,
                       voxel_size)
        relu4 = selu.selu(conv4)

        feat = tf.concat([relu1, relu2, relu3, relu4], axis=2)
        view = tf.reshape(feat, [-1, n * 36])
        fc1 = tf.contrib.layers.fully_connected(view,
                                                512,
                                                activation_fn=selu.selu)

        dropout = selu.dropout_selu(x=fc1, rate=0.5, training=is_training)

        fc2 = tf.contrib.layers.fully_connected(dropout,
                                                self.num_class,
                                                activation_fn=selu.selu)

        return fc2
Beispiel #7
0
def selu_builder(x, shape, name, keep_prob, training=True):
    init = tf.contrib.layers.variance_scaling_initializer(factor=1.0,
                                                          mode='FAN_IN')

    with tf.variable_scope(name) as scope:
        W = tf.get_variable("weights", shape=shape, initializer=init)
        b = tf.get_variable(
            "biases",
            shape=[shape[1]],
            initializer=tf.random_normal_initializer(stddev=0.1))
        actv = selu.selu(tf.add(tf.matmul(x, W), b))
        layer_output = selu.dropout_selu(actv,
                                         rate=1 - keep_prob,
                                         training=training)

    return layer_output
    def _buildGraph(self):
        with self.g.as_default():
            XPH = tf.placeholder(tf.float32, [
                None, self.inputShape[0], self.inputShape[1],
                self.inputShape[2]
            ],
                                 name='XPH')
            self.XPH = XPH

            YPH = tf.placeholder(tf.float32, [
                None, self.outputShape1[0] + self.outputShape2[0] +
                self.outputShape3[0] + self.outputShape4[0]
            ],
                                 name='YPH')
            self.YPH = YPH

            learningRatePH = tf.placeholder(tf.float32,
                                            shape=[],
                                            name='learningRatePH')
            self.learningRatePH = learningRatePH

            phasePH = tf.placeholder(tf.bool, shape=[], name='phasePH')
            self.phasePH = phasePH

            dropoutRateFC4PH = tf.placeholder(tf.float32,
                                              shape=[],
                                              name='dropoutRateFC4PH')
            self.dropoutRateFC4PH = dropoutRateFC4PH

            dropoutRateFC5PH = tf.placeholder(tf.float32,
                                              shape=[],
                                              name='dropoutRateFC5PH')
            self.dropoutRateFC5PH = dropoutRateFC5PH

            l2RegularizationLambdaPH = tf.placeholder(tf.float32,
                                                      shape=[],
                                                      name='dropoutRateFC5PH')
            self.l2RegularizationLambdaPH = l2RegularizationLambdaPH

            conv1 = tf.layers.conv2d(inputs=XPH,
                                     filters=self.numFeature1,
                                     kernel_size=self.kernelSize1,
                                     kernel_initializer=tf.contrib.layers.
                                     variance_scaling_initializer(
                                         factor=2.0,
                                         mode='FAN_IN',
                                         uniform=False),
                                     padding="same",
                                     activation=selu.selu,
                                     name='conv1')
            self.conv1 = conv1
            # print(conv1.shape)

            pool1 = tf.layers.max_pooling2d(inputs=conv1,
                                            pool_size=self.pollSize1,
                                            strides=1,
                                            name='pool1')
            self.pool1 = pool1
            # print(pool1.shape)
            conv2 = tf.layers.conv2d(inputs=pool1,
                                     filters=self.numFeature2,
                                     kernel_size=self.kernelSize2,
                                     kernel_initializer=tf.contrib.layers.
                                     variance_scaling_initializer(
                                         factor=2.0,
                                         mode='FAN_IN',
                                         uniform=False),
                                     padding="same",
                                     activation=selu.selu,
                                     name='conv2')
            self.conv2 = conv2
            # print(conv2.shape)

            pool2 = tf.layers.max_pooling2d(inputs=conv2,
                                            pool_size=self.pollSize2,
                                            strides=1,
                                            name='pool2')
            self.pool2 = pool2
            # print(pool2.shape)

            conv3 = tf.layers.conv2d(inputs=pool2,
                                     filters=self.numFeature3,
                                     kernel_size=self.kernelSize3,
                                     kernel_initializer=tf.contrib.layers.
                                     variance_scaling_initializer(
                                         factor=2.0,
                                         mode='FAN_IN',
                                         uniform=False),
                                     padding="same",
                                     activation=selu.selu,
                                     name='conv3')
            self.conv3 = conv3
            # print(conv3.shape)

            pool3 = tf.layers.max_pooling2d(inputs=conv3,
                                            pool_size=self.pollSize3,
                                            strides=1,
                                            name='pool3')
            self.pool3 = pool3
            # print(pool3.shape)

            flat_size = (self.inputShape[0] - (self.pollSize1[0] - 1) -
                         (self.pollSize2[0] - 1) - (self.pollSize3[0] - 1))
            flat_size *= (self.inputShape[1] - (self.pollSize1[1] - 1) -
                          (self.pollSize2[1] - 1) - (self.pollSize3[1] - 1))
            flat_size *= self.numFeature3
            conv3_flat = tf.reshape(pool3, [-1, flat_size])
            # print(conv3_flat.shape)

            fc4 = tf.layers.dense(inputs=conv3_flat,
                                  units=self.hiddenLayerUnits4,
                                  kernel_initializer=tf.contrib.layers.
                                  variance_scaling_initializer(factor=2.0,
                                                               mode='FAN_IN',
                                                               uniform=False),
                                  activation=selu.selu,
                                  name='fc4')
            self.fc4 = fc4
            # print(fc4.shape)

            dropout4 = selu.dropout_selu(fc4,
                                         dropoutRateFC4PH,
                                         training=phasePH,
                                         name='dropout4')
            self.dropout4 = dropout4

            fc5 = tf.layers.dense(inputs=dropout4,
                                  units=self.hiddenLayerUnits5,
                                  kernel_initializer=tf.contrib.layers.
                                  variance_scaling_initializer(factor=2.0,
                                                               mode='FAN_IN',
                                                               uniform=False),
                                  activation=selu.selu,
                                  name='fc5')
            self.fc5 = fc5
            # print(fc5.shape)

            dropout5 = selu.dropout_selu(fc5,
                                         dropoutRateFC5PH,
                                         training=phasePH,
                                         name='dropout5')
            self.dropout5 = dropout5

            epsilon = tf.constant(value=1e-10)

            YBaseChangeSigmoid = tf.layers.dense(inputs=dropout4,
                                                 units=self.outputShape1[0],
                                                 activation=tf.nn.sigmoid,
                                                 name='YBaseChangeSigmoid')
            self.YBaseChangeSigmoid = YBaseChangeSigmoid
            # print(YBaseChangeSigmoid.shape)

            YZygosityFC = tf.layers.dense(inputs=dropout5,
                                          units=self.outputShape2[0],
                                          activation=selu.selu,
                                          name='YZygosityFC')
            YZygosityLogits = tf.add(YZygosityFC,
                                     epsilon,
                                     name='YZygosityLogits')
            YZygositySoftmax = tf.nn.softmax(YZygosityLogits,
                                             name='YZygositySoftmax')
            self.YZygositySoftmax = YZygositySoftmax
            # print(YZygositySoftmax.shape)

            YVarTypeFC = tf.layers.dense(inputs=dropout5,
                                         units=self.outputShape3[0],
                                         activation=selu.selu,
                                         name='YVarTypeFC')
            YVarTypeLogits = tf.add(YVarTypeFC, epsilon, name='YVarTypeLogits')
            YVarTypeSoftmax = tf.nn.softmax(YVarTypeLogits,
                                            name='YVarTypeSoftmax')
            self.YVarTypeSoftmax = YVarTypeSoftmax
            # print(YVarTypeSoftmax.shape)

            YIndelLengthFC = tf.layers.dense(inputs=dropout5,
                                             units=self.outputShape4[0],
                                             activation=selu.selu,
                                             name='YIndelLengthFC')
            YIndelLengthLogits = tf.add(YIndelLengthFC,
                                        epsilon,
                                        name='YIndelLengthLogits')
            YIndelLengthSoftmax = tf.nn.softmax(YIndelLengthLogits,
                                                name='YIndelLengthSoftmax')
            self.YIndelLengthSoftmax = YIndelLengthSoftmax
            # print(YIndelLengthSoftmax.shape)
            # print(YPH.shape)
            #print(YBaseChangeSigmoid)
            #print(tf.slice(YPH,[0,0],[-1,self.outputShape1[0]]))
            loss1 = tf.reduce_sum(
                tf.pow(YBaseChangeSigmoid -
                       tf.slice(YPH, [0, 0], [-1, self.outputShape1[0]],
                                name='YBaseChangeGetTruth'),
                       2,
                       name='YBaseChangeMSE'),
                name='YBaseChangeReduceSum')
            #print("Loss1: "+str(loss1)+"\n")

            #print(YZygosityLogits)
            #print(YZygosityCrossEntropy)
            #print(tf.slice(YPH, [0,self.outputShape1[0]], [-1,self.outputShape2[0]]))
            YZygosityCrossEntropy = tf.nn.log_softmax(YZygosityLogits, name='YZygosityLogSoftmax')\
                                    * -tf.slice(YPH, [0,self.outputShape1[0]], [-1,self.outputShape2[0]], name='YZygosityGetTruth')
            loss2 = tf.reduce_sum(YZygosityCrossEntropy,
                                  name='YZygosityReduceSum')
            #print("Loss2: "+str(loss2)+"\n")

            #print(YVarTypeLogits)
            #print(YVarTypeCrossEntropy)
            #print(tf.slice(YPH, [0,self.outputShape1[0]+self.outputShape2[0]]))
            YVarTypeCrossEntropy = tf.nn.log_softmax(YVarTypeLogits, name='YVarTypeLogSoftmax')\
                                   * -tf.slice(YPH, [0,self.outputShape1[0]+self.outputShape2[0]], [-1,self.outputShape3[0]], name='YVarTypeGetTruth')
            loss3 = tf.reduce_sum(YVarTypeCrossEntropy,
                                  name='YVarTypeReduceSum')
            #print("Loss3: " + str(loss3)+"\n")

            #print(YIndelLengthLogits)
            #print(YIndelLengthCrossEntropy)
            #print(tf.slice(YPH, [0,self.outputShape1[0]+self.outputShape2[0]+self.outputShape3[0]], [-1,self.outputShape4[0]]))
            YIndelLengthCrossEntropy = tf.nn.log_softmax(YIndelLengthLogits, name='YIndelLengthLogSoftmax')\
                                       * -tf.slice(YPH, [0,self.outputShape1[0]+self.outputShape2[0]+self.outputShape3[0]], [-1,self.outputShape4[0]], name='YIndelLengthGetTruth')
            loss4 = tf.reduce_sum(YIndelLengthCrossEntropy,
                                  name='YIndelLengthReduceSum')
            #print("Loss4: " + str(loss4)+"\n")

            lossL2 = tf.add_n([
                tf.nn.l2_loss(v)
                for v in tf.trainable_variables() if 'bias' not in v.name
            ]) * l2RegularizationLambdaPH
            #print("LossL2: " + str(lossL2)+"\n")

            loss = loss1 + loss2 + loss3 + loss4 + lossL2
            self.loss = loss

            # add tensorboard embedding
            self.embedding1 = YBaseChangeSigmoid
            self.embedding2 = YZygosityLogits
            self.embedding3 = YVarTypeLogits
            self.embedding4 = YIndelLengthLogits
            # add summaries
            tf.summary.scalar('learning_rate', learningRatePH)
            tf.summary.scalar('l2Lambda', l2RegularizationLambdaPH)
            tf.summary.scalar("loss1", loss1)
            tf.summary.scalar("loss2", loss2)
            tf.summary.scalar("loss3", loss3)
            tf.summary.scalar("loss4", loss4)
            tf.summary.scalar("lossL2", lossL2)
            tf.summary.scalar("loss", loss)

            # For report or debug. Fetching histogram summary is slow, GPU utilization will be low if enabled.
            #for var in tf.trainable_variables():
            #    tf.summary.histogram(var.op.name, var)
            self.merged_summary_op = tf.summary.merge_all()

            self.training_op = tf.train.AdamOptimizer(
                learning_rate=learningRatePH).minimize(loss)
            self.init_op = tf.global_variables_initializer()
Beispiel #9
0
#train = split[80:]
#test = split[:20]
split = int(0.8 * sol.shape[0])
#x_train = sol[:split, :]
#y_train = sol_dt[:split, :]
#x_test = sol[split:, :]
#y_test = sol_dt[split:, :]

#print("Training set", x_train.shape[0])
#print("Test set", x_test.shape[0])

model = Sequential()
with tf.device('/gpu:0'):
    model.add(
        Dense(2048, kernel_initializer='glorot_uniform', input_shape=(42, )))
    model.add(Lambda(lambda x: dropout_selu(x, 0.5)))

for i in range(3):
    with tf.device(f'/gpu:{i+1}'):
        model.add(Dense(2048, kernel_initializer='he_uniform'))
        model.add(Lambda(lambda x: dropout_selu(x, 0.5)))

model.add(Dense(41, activation='linear'))

#model = make_parallel(model, 4)

model.summary()
model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.01))

xtr = (sol[i:i + batch_size] for i in range(0, split, batch_size))
ytr = (sol_dt[i:i + batch_size] for i in range(0, split, batch_size))
Beispiel #10
0
    def model(self, points_tensor, input_tensor, is_training=True):
        """
        Arguments:
            points_tensor: [b, n, 3] point cloud
            input_tensor: [b, n, channels] extra data defined for each point
      
        """
        b = points_tensor.get_shape()[0].value
        n = points_tensor.get_shape()[1].value
        in_channels = input_tensor.get_shape()[2].value

        voxel_size = tf.constant([0.1])
        stride = tf.constant([1, 1, 1])
        filter1_tensor = tf.get_variable("filter1", [3, 3, 3, in_channels, 9])
        conv1 = conv3p(points_tensor, input_tensor, filter1_tensor, stride,
                       voxel_size)
        relu1 = selu.selu(conv1)

        stride = tf.constant([2, 2, 2])
        filter2_tensor = tf.get_variable("filter2", [3, 3, 3, 9, 9])
        conv2 = conv3p(points_tensor, relu1, filter2_tensor, stride,
                       voxel_size)
        relu2 = selu.selu(conv2)

        stride = tf.constant([3, 3, 3])
        filter3_tensor = tf.get_variable("filter3", [3, 3, 3, 9, 9])
        conv3 = conv3p(points_tensor, relu2, filter3_tensor, stride,
                       voxel_size)
        relu3 = selu.selu(conv3)
        skipCon = tf.add(relu1, relu3, name=None)

        stride = tf.constant([4, 4, 4])
        filter4_tensor = tf.get_variable("filter4", [3, 3, 3, 9, 9])
        conv4 = conv3p(points_tensor, skipCon, filter4_tensor, stride,
                       voxel_size)
        relu4 = selu.selu(conv4)

        stride = tf.constant([5, 5, 5])
        filter5_tensor = tf.get_variable("filter5", [3, 3, 3, 9, 9])
        conv5 = conv3p(points_tensor, relu4, filter5_tensor, stride,
                       voxel_size)
        relu5 = selu.selu(conv5)
        skipCon2 = tf.add(relu3, relu5, name=None)

        stride = tf.constant([6, 6, 6])
        filter6_tensor = tf.get_variable("filter6", [3, 3, 3, 9, 9])
        conv6 = conv3p(points_tensor, skipCon2, filter6_tensor, stride,
                       voxel_size)
        relu6 = selu.selu(conv6)

        stride = tf.constant([7, 7, 7])
        filter7_tensor = tf.get_variable("filter7", [3, 3, 3, 9, 9])
        conv7 = conv3p(points_tensor, relu6, filter7_tensor, stride,
                       voxel_size)
        relu7 = selu.selu(conv7)
        skipCon3 = tf.add(relu5, relu7, name=None)

        stride = tf.constant([8, 8, 8])
        filter8_tensor = tf.get_variable("filter8", [3, 3, 3, 9, 9])
        conv8 = conv3p(points_tensor, skipCon3, filter8_tensor, stride,
                       voxel_size)
        relu8 = selu.selu(conv8)

        feat = tf.concat(
            [relu1, relu2, skipCon, relu4, skipCon2, relu6, skipCon3, relu8],
            axis=2)  #local features
        view = tf.reshape(feat, [-1, n * 72])  #global feature

        fc1 = tf.contrib.layers.fully_connected(view,
                                                512,
                                                activation_fn=selu.selu)

        dropout = selu.dropout_selu(x=fc1, rate=0.5, training=is_training)

        fc2 = tf.contrib.layers.fully_connected(dropout,
                                                self.num_class,
                                                activation_fn=selu.selu)

        return fc2
Beispiel #11
0
    def _buildGraph(self):
        with self.g.as_default():
            XPH = tf.placeholder(tf.float32, [
                None, self.inputShape[0], self.inputShape[1],
                self.inputShape[2]
            ],
                                 name='XPH')
            self.XPH = XPH

            YPH = tf.placeholder(tf.float32, [
                None, self.outputShape1[0] + self.outputShape2[0] +
                self.outputShape3[0] + self.outputShape4[0]
            ],
                                 name='YPH')
            self.YPH = YPH

            learningRatePH = tf.placeholder(tf.float32,
                                            shape=[],
                                            name='learningRatePH')
            self.learningRatePH = learningRatePH

            phasePH = tf.placeholder(tf.bool, shape=[], name='phasePH')
            self.phasePH = phasePH

            dropoutRatePH = tf.placeholder(tf.float32,
                                           shape=[],
                                           name='dropoutRatePH')
            self.dropoutRatePH = dropoutRatePH

            conv1 = tf.layers.conv2d(
                inputs=XPH,
                filters=self.numFeature1,
                kernel_size=self.kernelSize1,
                kernel_initializer=tf.truncated_normal_initializer(
                    stddev=1e-5, dtype=tf.float32),
                padding="same",
                activation=selu.selu,
                name='conv1')
            self.conv1 = conv1

            pool1 = tf.layers.max_pooling2d(inputs=conv1,
                                            pool_size=self.pollSize1,
                                            strides=1,
                                            name='pool1')
            self.pool1 = pool1

            conv2 = tf.layers.conv2d(
                inputs=pool1,
                filters=self.numFeature2,
                kernel_size=self.kernelSize2,
                kernel_initializer=tf.truncated_normal_initializer(
                    stddev=1e-5, dtype=tf.float32),
                padding="same",
                activation=selu.selu,
                name='conv2')
            self.conv2 = conv2

            pool2 = tf.layers.max_pooling2d(inputs=conv2,
                                            pool_size=self.pollSize2,
                                            strides=1,
                                            name='pool2')
            self.pool2 = pool2

            conv3 = tf.layers.conv2d(
                inputs=pool2,
                filters=self.numFeature3,
                kernel_size=self.kernelSize3,
                kernel_initializer=tf.truncated_normal_initializer(
                    stddev=1e-5, dtype=tf.float32),
                padding="same",
                activation=selu.selu,
                name='conv3')
            self.conv3 = conv3

            pool3 = tf.layers.max_pooling2d(inputs=conv3,
                                            pool_size=self.pollSize3,
                                            strides=1,
                                            name='pool3')
            self.pool3 = pool3

            flat_size = (self.inputShape[0] - (self.pollSize1[0] - 1) -
                         (self.pollSize2[0] - 1) - (self.pollSize3[0] - 1))
            flat_size *= (self.inputShape[1] - (self.pollSize1[1] - 1) -
                          (self.pollSize2[1] - 1) - (self.pollSize3[1] - 1))
            flat_size *= self.numFeature3
            conv3_flat = tf.reshape(pool3, [-1, flat_size])

            fc4 = tf.layers.dense(
                inputs=conv3_flat,
                units=self.hiddenLayerUnits4,
                kernel_initializer=tf.truncated_normal_initializer(
                    stddev=1e-5, dtype=tf.float32),
                activation=selu.selu,
                name='fc4')
            self.fc4 = fc4

            dropout4 = selu.dropout_selu(fc4,
                                         dropoutRatePH,
                                         training=phasePH,
                                         name='dropout4')
            self.dropout4 = dropout4

            fc5 = tf.layers.dense(
                inputs=dropout4,
                units=self.hiddenLayerUnits5,
                kernel_initializer=tf.truncated_normal_initializer(
                    stddev=1e-5, dtype=tf.float32),
                activation=selu.selu,
                name='fc5')
            self.fc5 = fc5

            dropout5 = selu.dropout_selu(fc5,
                                         dropoutRatePH,
                                         training=phasePH,
                                         name='dropout5')
            self.dropout5 = dropout5

            epsilon = tf.constant(value=1e-10)
            YBaseChangeSigmoid = tf.layers.dense(inputs=dropout5,
                                                 units=self.outputShape1[0],
                                                 activation=tf.nn.sigmoid,
                                                 name='YBaseChangeSigmoid')
            self.YBaseChangeSigmoid = YBaseChangeSigmoid
            YZygosityFC = tf.layers.dense(inputs=dropout5,
                                          units=self.outputShape2[0],
                                          activation=selu.selu,
                                          name='YZygosityFC')
            YZygosityLogits = tf.add(YZygosityFC,
                                     epsilon,
                                     name='YZygosityLogits')
            YZygositySoftmax = tf.nn.softmax(YZygosityLogits,
                                             name='YZygositySoftmax')
            self.YZygositySoftmax = YZygositySoftmax
            YVarTypeFC = tf.layers.dense(inputs=dropout5,
                                         units=self.outputShape3[0],
                                         activation=selu.selu,
                                         name='YVarTypeFC')
            YVarTypeLogits = tf.add(YVarTypeFC, epsilon, name='YVarTypeLogits')
            YVarTypeSoftmax = tf.nn.softmax(YVarTypeLogits,
                                            name='YVarTypeSoftmax')
            self.YVarTypeSoftmax = YVarTypeSoftmax
            YIndelLengthFC = tf.layers.dense(inputs=dropout5,
                                             units=self.outputShape4[0],
                                             activation=selu.selu,
                                             name='YIndelLengthFC')
            YIndelLengthLogits = tf.add(YIndelLengthFC,
                                        epsilon,
                                        name='YIndelLengthLogits')
            YIndelLengthSoftmax = tf.nn.softmax(YIndelLengthLogits,
                                                name='YIndelLengthSoftmax')
            self.YIndelLengthSoftmax = YIndelLengthSoftmax

            loss1 = tf.reduce_sum(
                tf.pow(YBaseChangeSigmoid -
                       tf.slice(YPH, [0, 0], [-1, self.outputShape1[0]],
                                name='YBaseChangeGetTruth'),
                       2,
                       name='YBaseChangeMSE'),
                name='YBaseChangeReduceSum')
            YZygosityCrossEntropy = tf.nn.log_softmax(YZygosityLogits, name='YZygosityLogSoftmax')\
                                    * -tf.slice(YPH, [0,self.outputShape1[0]], [-1,self.outputShape2[0]], name='YZygosityGetTruth')
            loss2 = tf.reduce_sum(YZygosityCrossEntropy,
                                  name='YZygosityReduceSum')
            YVarTypeCrossEntropy = tf.nn.log_softmax(YVarTypeLogits, name='YVarTypeLogSoftmax')\
                                   * -tf.slice(YPH, [0,self.outputShape1[0]+self.outputShape2[0]], [-1,self.outputShape3[0]], name='YVarTypeGetTruth')
            loss3 = tf.reduce_sum(YVarTypeCrossEntropy,
                                  name='YVarTypeReduceSum')
            YIndelLengthCrossEntropy = tf.nn.log_softmax(YIndelLengthLogits, name='YIndelLengthLogSoftmax')\
                                       * -tf.slice(YPH, [0,self.outputShape1[0]+self.outputShape2[0]+self.outputShape3[0]], [-1,self.outputShape4[0]], name='YIndelLengthGetTruth')
            loss4 = tf.reduce_sum(YIndelLengthCrossEntropy,
                                  name='YIndelLengthReduceSum')
            loss = loss1 + loss2 + loss3 + loss4
            self.loss = loss

            # add summaries
            tf.summary.scalar('learning_rate', learningRatePH)
            tf.summary.scalar("loss1", loss1)
            tf.summary.scalar("loss2", loss2)
            tf.summary.scalar("loss3", loss3)
            tf.summary.scalar("loss4", loss4)
            tf.summary.scalar("loss", loss)

            # For report or debug. Fetching histogram summary is slow, GPU utilization will be low if enabled.
            #for var in tf.trainable_variables():
            #    tf.summary.histogram(var.op.name, var)
            self.merged_summary_op = tf.summary.merge_all()

            self.training_op = tf.train.AdamOptimizer(
                learning_rate=learningRatePH).minimize(loss)
            self.init_op = tf.global_variables_initializer()
Beispiel #12
0
    def _buildGraph(self):
        with self.g.as_default():
            XPH = tf.placeholder(tf.float32, [
                None, self.inputShape[0], self.inputShape[1],
                self.inputShape[2]
            ],
                                 name='XPH')
            self.XPH = XPH

            YPH = tf.placeholder(tf.float32, [
                None, self.outputShape1[0] + self.outputShape2[0] +
                self.outputShape3[0] + self.outputShape4[0]
            ],
                                 name='YPH')
            self.YPH = YPH

            learningRatePH = tf.placeholder(tf.float32,
                                            shape=[],
                                            name='learningRatePH')
            self.learningRatePH = learningRatePH

            phasePH = tf.placeholder(tf.bool, shape=[], name='phasePH')
            self.phasePH = phasePH

            dropoutRatePH = tf.placeholder(tf.float32,
                                           shape=[],
                                           name='dropoutRatePH')
            self.dropoutRatePH = dropoutRatePH

            conv1 = tf.layers.conv2d(
                inputs=XPH,
                filters=self.numFeature1,
                kernel_size=self.kernelSize1,
                kernel_initializer=tf.truncated_normal_initializer(
                    stddev=1e-5, dtype=tf.float32),
                padding="same",
                activation=selu.selu,
                name='conv1')
            self.conv1 = conv1

            conv2 = tf.layers.conv2d(
                inputs=conv1,
                filters=self.numFeature2,
                kernel_size=self.kernelSize2,
                kernel_initializer=tf.truncated_normal_initializer(
                    stddev=1e-5, dtype=tf.float32),
                padding="same",
                activation=selu.selu,
                name='conv2')
            self.conv2 = conv2

            conv3 = tf.layers.conv2d(
                inputs=conv2,
                filters=self.numFeature3,
                kernel_size=self.kernelSize3,
                kernel_initializer=tf.truncated_normal_initializer(
                    stddev=1e-5, dtype=tf.float32),
                padding="same",
                activation=selu.selu,
                name='conv3')
            self.conv3 = conv3

            flat_size = self.inputShape[0] * self.inputShape[
                1] * self.numFeature3
            conv3_flat = tf.reshape(conv3, [-1, flat_size])

            fc4 = tf.layers.dense(
                inputs=conv3_flat,
                units=self.hiddenLayerUnits4,
                kernel_initializer=tf.truncated_normal_initializer(
                    stddev=1e-5, dtype=tf.float32),
                activation=selu.selu,
                name='fc4')
            self.fc4 = fc4

            dropout4 = selu.dropout_selu(fc4,
                                         dropoutRatePH,
                                         training=phasePH,
                                         name='dropout4')
            self.dropout4 = dropout4

            fc5 = tf.layers.dense(
                inputs=dropout4,
                units=self.hiddenLayerUnits5,
                kernel_initializer=tf.truncated_normal_initializer(
                    stddev=1e-5, dtype=tf.float32),
                activation=selu.selu,
                name='fc5')
            self.fc5 = fc5

            dropout5 = selu.dropout_selu(fc5,
                                         dropoutRatePH,
                                         training=phasePH,
                                         name='dropout5')
            self.dropout5 = dropout5

            epsilon = tf.constant(value=1e-10)
            YBaseChangeSigmoid = tf.layers.dense(inputs=dropout5,
                                                 units=self.outputShape1[0],
                                                 activation=tf.nn.sigmoid,
                                                 name='YBaseChangeSigmoid')
            self.YBaseChangeSigmoid = YBaseChangeSigmoid
            YZygosityFC = tf.layers.dense(inputs=dropout5,
                                          units=self.outputShape2[0],
                                          activation=selu.selu,
                                          name='YZygosityFC')
            YZygosityLogits = YZygosityFC + epsilon
            YZygositySoftmax = tf.nn.softmax(YZygosityLogits,
                                             name='YZygositySoftmax')
            self.YZygositySoftmax = YZygositySoftmax
            YVarTypeFC = tf.layers.dense(inputs=dropout5,
                                         units=self.outputShape3[0],
                                         activation=selu.selu,
                                         name='YVarTypeFC')
            YVarTypeLogits = YVarTypeFC + epsilon
            YVarTypeSoftmax = tf.nn.softmax(YVarTypeLogits,
                                            name='YVarTypeSoftmax')
            self.YVarTypeSoftmax = YVarTypeSoftmax
            YIndelLengthFC = tf.layers.dense(inputs=dropout5,
                                             units=self.outputShape4[0],
                                             activation=selu.selu,
                                             name='YIndelLengthFC')
            YIndelLengthLogits = YIndelLengthFC + epsilon
            YIndelLengthSoftmax = tf.nn.softmax(YIndelLengthLogits,
                                                name='YIndelLengthSoftmax')
            self.YIndelLengthSoftmax = YIndelLengthSoftmax

            loss1 = tf.reduce_sum(
                tf.pow(
                    YBaseChangeSigmoid -
                    tf.slice(YPH, [0, 0], [-1, self.outputShape1[0]]), 2))
            YZygosityCrossEntropy = tf.nn.log_softmax(YZygosityLogits)\
                                    * -tf.slice(YPH, [0,self.outputShape1[0]], [-1,self.outputShape2[0]])
            loss2 = tf.reduce_sum(YZygosityCrossEntropy)
            YVarTypeCrossEntropy = tf.nn.log_softmax(YVarTypeLogits)\
                                   * -tf.slice(YPH, [0,self.outputShape1[0]+self.outputShape2[0]], [-1,self.outputShape3[0]])
            loss3 = tf.reduce_sum(YVarTypeCrossEntropy)
            YIndelLengthCrossEntropy = tf.nn.log_softmax(YIndelLengthLogits)\
                                       * -tf.slice(YPH, [0,self.outputShape1[0]+self.outputShape2[0]+self.outputShape3[0]], [-1,self.outputShape4[0]])
            loss4 = tf.reduce_sum(YIndelLengthCrossEntropy)
            loss = loss1 + loss2 + loss3 + loss4
            self.loss = loss

            # add summaries
            tf.summary.scalar('learning_rate', learningRatePH)
            tf.summary.scalar("loss1", loss1)
            tf.summary.scalar("loss2", loss2)
            tf.summary.scalar("loss3", loss3)
            tf.summary.scalar("loss4", loss4)
            tf.summary.scalar("loss", loss)
            #for var in tf.trainable_variables():
            #    tf.summary.histogram(var.op.name, var)
            self.merged_summary_op = tf.summary.merge_all()

            self.training_op = tf.train.AdamOptimizer(
                learning_rate=learningRatePH).minimize(loss)
            self.init_op = tf.global_variables_initializer()
Beispiel #13
0
    def model(self, points_tensor, input_tensor, is_training=True):
        """
        Arguments:
            points_tensor: [b, n, 3] point cloud
            input_tensor: [b, n, channels] extra data defined for each point
        """
        b = points_tensor.get_shape()[0].value
        n = points_tensor.get_shape()[1].value
        in_channels = input_tensor.get_shape()[2].value


        voxel_size = tf.constant([0.1])
        stride = tf.constant([1, 1, 1])
        filter1_tensor = tf.get_variable("filter1", [3, 3, 3, in_channels, 9])
        conv1 = conv3p(points_tensor, input_tensor, filter1_tensor, stride, voxel_size);
        relu1 = selu.selu(conv1)

        stride = tf.constant([2, 2, 2])
        filter2_tensor = tf.get_variable("filter2", [3, 3, 3, 9, 9])
        conv2 = conv3p(points_tensor, relu1, filter2_tensor, stride, voxel_size);
        relu2 = selu.selu(conv2)

        stride = tf.constant([3, 3, 3])
        filter3_tensor = tf.get_variable("filter3", [3, 3, 3, 9, 9])
        conv3 = conv3p(points_tensor, relu2, filter3_tensor, stride, voxel_size);
        skipCon1 = tf.add(relu1, conv3)
        relu3 = selu.selu(skipCon1)

        stride = tf.constant([4, 4, 4])
        filter4_tensor = tf.get_variable("filter4", [3, 3, 3, 9, 9])
        conv4 = conv3p(points_tensor, relu3, filter4_tensor, stride, voxel_size);
        relu4 = selu.selu(conv4)

        stride = tf.constant([5, 5, 5])
        filter5_tensor = tf.get_variable("filter5", [3, 3, 3, 9, 9])
        conv5 = conv3p(points_tensor, relu4, filter5_tensor, stride, voxel_size);
        skipCon2 = tf.add(relu3, conv5)
        relu5 = selu.selu(skipCon2)

        stride = tf.constant([6, 6, 6])
        filter6_tensor = tf.get_variable("filter6", [3, 3, 3, 9, 9])
        conv6 = conv3p(points_tensor, relu5, filter6_tensor, stride, voxel_size);
        relu6 = selu.selu(conv6)

        stride = tf.constant([7, 7, 7])
        filter7_tensor = tf.get_variable("filter7", [3, 3, 3, 9, 9])
        conv7 = conv3p(points_tensor, relu6, filter7_tensor, stride, voxel_size);
        skipCon3 = tf.add(relu5, conv7, name = None)
        relu7 = selu.selu(skipCon3)
        
        stride = tf.constant([8, 8, 8])
        filter8_tensor = tf.get_variable("filter8", [3, 3, 3, 9, 9])
        conv8 = conv3p(points_tensor, relu7, filter8_tensor, stride, voxel_size);
        relu8 = selu.selu(conv8)

        stride = tf.constant([9, 9, 9])
        filter9_tensor = tf.get_variable("filter9", [3, 3, 3, 9, 9])
        conv9 = conv3p(points_tensor, relu8, filter9_tensor, stride, voxel_size);
        skipCon4 = tf.add(relu7, conv9)
        relu9 = selu.selu(skipCon4)

        stride = tf.constant([10, 10, 10])
        filter10_tensor = tf.get_variable("filter10", [3, 3, 3, 9, 9])
        conv10 = conv3p(points_tensor, relu9, filter10_tensor, stride, voxel_size);
        relu10 = selu.selu(conv10)

        stride = tf.constant([11, 11, 11])
        filter11_tensor = tf.get_variable("filter11", [3, 3, 3, 9, 9])
        conv11 = conv3p(points_tensor, relu10, filter11_tensor, stride, voxel_size);
        skipCon5 = tf.add(relu9, conv11)
        relu11 = selu.selu(skipCon5)

        stride = tf.constant([12, 12, 12])
        filter12_tensor = tf.get_variable("filter12", [3, 3, 3, 9, 9])
        conv12 = conv3p(points_tensor, relu11, filter12_tensor, stride, voxel_size);
        relu12 = selu.selu(conv12)

        stride = tf.constant([13, 13, 13])
        filter13_tensor = tf.get_variable("filter13", [3, 3, 3, 9, 9])
        conv13 = conv3p(points_tensor, relu12, filter13_tensor, stride, voxel_size);
        skipCon6 = tf.add(relu11, conv13,name = None)
        relu13 = selu.selu(skipCon6)

        stride = tf.constant([14, 14, 14])
        filter14_tensor = tf.get_variable("filter14", [3, 3, 3, 9, 9])
        conv14 = conv3p(points_tensor, relu13, filter14_tensor, stride, voxel_size);
        relu14 = selu.selu(conv14)

        stride = tf.constant([15, 15, 15])
        filter15_tensor = tf.get_variable("filter15", [3, 3, 3, 9, 9])
        conv15 = conv3p(points_tensor, relu14, filter15_tensor, stride, voxel_size);
        skipCon7 = tf.add(relu13, conv15)
        relu15 = selu.selu(skipCon7)

        stride = tf.constant([16, 16, 16])
        filter16_tensor = tf.get_variable("filter16", [3, 3, 3, 9, 9])
        conv16 = conv3p(points_tensor, relu15, filter16_tensor, stride, voxel_size);
        relu16 = selu.selu(conv16)




        feat = tf.concat([relu1, relu2, relu3, relu4, relu5, relu6, relu7, relu8, relu9, relu10, relu11, relu12, relu13, relu14, relu15, relu16], axis=2) #local features
        view = tf.reshape(feat, [-1, n * 144])                  #global feature
        
        fc1 = tf.contrib.layers.fully_connected(view, 512 , activation_fn=selu.selu)
        
        dropout1 = selu.dropout_selu(x=fc1, rate=0.5, training=is_training)

        fc2 = tf.contrib.layers.fully_connected(dropout1, self.num_class, activation_fn=selu.selu)

        return fc2
Beispiel #14
0
    def _build_graph(self):
        """
        Build the computation graph for the model
        """

        self.graph = self.g
        self.layers = [
        ]  # A list used to contain meaningful intermediate layers
        with self.graph.as_default():
            tf.set_random_seed(param.RANDOM_SEED)

            # Conversion to tensors for some values
            self.epsilon = tf.constant(value=1e-10, dtype=self.float_type)
            self.input_shape_tf = (None, self.input_shape[0],
                                   self.input_shape[1], self.input_shape[2])

            # Place holders
            self.X_placeholder = tf.placeholder(self.float_type,
                                                self.input_shape_tf,
                                                name='X_placeholder')
            self.Y_placeholder = tf.placeholder(self.float_type, [
                None,
                self.output_base_change_shape + self.output_zygosity_shape +
                self.output_variant_type_shape + self.output_indel_length_shape
            ],
                                                name='Y_placeholder')
            self.layers.append(self.X_placeholder)

            self.learning_rate_placeholder = tf.placeholder(
                self.float_type, shape=[], name='learning_rate_placeholder')
            self.phase_placeholder = tf.placeholder(tf.bool,
                                                    shape=[],
                                                    name='phase_placeholder')
            self.regularization_L2_lambda_placeholder = tf.placeholder(
                self.float_type,
                shape=[],
                name='regularization_L2_lambda_placeholder')
            self.task_loss_weights_placeholder = tf.placeholder(
                self.float_type,
                shape=self.task_loss_weights.shape,
                name='task_loss_weights_placeholder')
            self.output_base_change_entropy_weights_placeholder = tf.placeholder(
                self.float_type,
                shape=self.output_base_change_entropy_weights.shape,
                name='output_base_change_entropy_weights_placeholder')
            self.output_zygosity_entropy_weights_placeholder = tf.placeholder(
                self.float_type,
                shape=self.output_zygosity_entropy_weights.shape,
                name='output_zygosity_entropy_weights_placeholder')
            self.output_variant_type_entropy_weights_placeholder = tf.placeholder(
                self.float_type,
                shape=self.output_variant_type_entropy_weights.shape,
                name='output_variant_type_entropy_weights_placeholder')
            self.output_indel_length_entropy_weights_placeholder = tf.placeholder(
                self.float_type,
                shape=self.output_indel_length_entropy_weights.shape,
                name='output_indel_length_entropy_weights_placeholder')

            he_initializer = tf.contrib.layers.variance_scaling_initializer(
                factor=1.0, mode='FAN_IN', seed=param.OPERATION_SEED)

            if self.structure == "2BiLSTM":
                self.L3_dropout_rate_placeholder = tf.placeholder(
                    self.float_type,
                    shape=[],
                    name='L3_dropout_rate_placeholder')

                # Flatten the 2nd (ACGT) and 3rd (Ref Ins Del SNP) dimension
                self.X_flattened_2D = tf.reshape(
                    self.X_placeholder,
                    shape=(tf.shape(self.X_placeholder)[0],
                           self.input_shape_tf[1],
                           self.input_shape_tf[2] * self.input_shape_tf[3]),
                    name="X_flattened_2D")
                self.layers.append(self.X_flattened_2D)

                self.X_flattened_2D_transposed = tf.transpose(
                    self.X_flattened_2D, [1, 0, 2],
                    name="X_flattened_2D_transposed")

                is_gpu_available = len(Clair.get_available_gpus()) > 0
                # print("is_gpu_available:", is_gpu_available)
                self.LSTM1, self.LSTM1_state = Clair.adaptive_LSTM_layer(
                    inputs=self.X_flattened_2D_transposed,
                    num_units=self.LSTM1_num_units,
                    name="LSTM1",
                    direction="bidirectional",
                    num_layers=1,
                    cudnn_gpu_available=is_gpu_available)
                self.layers.append(self.LSTM1)
                # print(self.LSTM1, self.LSTM1_state)
                self.LSTM1_dropout = tf.layers.dropout(
                    inputs=self.LSTM1,
                    rate=self.LSTM1_dropout_rate,
                    training=self.phase_placeholder,
                    name="LSTM1_dropout",
                    seed=param.OPERATION_SEED)
                self.LSTM2, _ = Clair.adaptive_LSTM_layer(
                    inputs=self.LSTM1_dropout,
                    num_units=self.LSTM2_num_units,
                    name="LSTM2",
                    direction="bidirectional",
                    num_layers=1,
                    cudnn_gpu_available=is_gpu_available)
                self.layers.append(self.LSTM2)
                self.LSTM2_dropout = tf.layers.dropout(
                    inputs=self.LSTM2,
                    rate=self.LSTM2_dropout_rate,
                    training=self.phase_placeholder,
                    name="LSTM2_dropout",
                    seed=param.OPERATION_SEED)
                self.LSTM2_transposed = tf.transpose(self.LSTM2_dropout,
                                                     [1, 0, 2],
                                                     name="LSTM2_transposed")

                # Slice dense layer 2
                self.L2 = Clair.slice_dense_layer(
                    inputs=self.LSTM2_transposed,
                    units=self.L2_num_units,
                    slice_dimension=2,
                    name="L2",
                    activation=selu.selu,
                    kernel_initializer=he_initializer)
                self.layers.append(self.L2)

                self.L2_flattened = tf.reshape(
                    self.L2,
                    shape=(tf.shape(self.L2)[0],
                           self.L2_num_units * self.LSTM2_num_units * 2),
                    name="L2_flattened")
                self.layers.append(self.L2_flattened)

                # Dense layer 3
                self.L3 = tf.layers.dense(inputs=self.L2_flattened,
                                          units=self.L3_num_units,
                                          name="L3",
                                          activation=selu.selu,
                                          kernel_initializer=he_initializer)
                self.layers.append(self.L3)

                self.L3_dropout = selu.dropout_selu(
                    self.L3,
                    self.L3_dropout_rate_placeholder,
                    training=self.phase_placeholder,
                    name='L3_dropout',
                    seed=param.OPERATION_SEED)
                self.layers.append(self.L3_dropout)

                self.core_final_layer = self.L3_dropout

            # Output layer
            with tf.variable_scope("Prediction"):
                self.Y_base_change_logits = tf.layers.dense(
                    inputs=self.core_final_layer,
                    units=self.output_base_change_shape,
                    kernel_initializer=he_initializer,
                    activation=selu.selu,
                    name='Y_base_change_logits')
                self.Y_base_change = tf.nn.softmax(self.Y_base_change_logits,
                                                   name='Y_base_change')
                self.layers.append(self.Y_base_change)

                self.Y_zygosity_logits = tf.layers.dense(
                    inputs=self.core_final_layer,
                    units=self.output_zygosity_shape,
                    kernel_initializer=he_initializer,
                    activation=selu.selu,
                    name='Y_zygosity_logits')
                self.Y_zygosity = tf.nn.softmax(self.Y_zygosity_logits,
                                                name='Y_zygosity')
                self.layers.append(self.Y_zygosity)

                if "legacy_0.1" in self.structure:
                    self.Y_variant_type_logits = tf.layers.dense(
                        inputs=self.core_final_layer,
                        units=self.output_variant_type_shape,
                        kernel_initializer=he_initializer,
                        activation=selu.selu,
                        name='Y_variant_logits')
                    self.Y_variant_type = tf.nn.softmax(
                        self.Y_variant_type_logits, name='Y_variant')
                else:
                    self.Y_variant_type_logits = tf.layers.dense(
                        inputs=self.core_final_layer,
                        units=self.output_variant_type_shape,
                        kernel_initializer=he_initializer,
                        activation=selu.selu,
                        name='Y_variant_type_logits')
                    self.Y_variant_type = tf.nn.softmax(
                        self.Y_variant_type_logits, name='Y_variant_type')
                self.layers.append(self.Y_variant_type)

                self.Y_indel_length_logits = tf.layers.dense(
                    inputs=self.core_final_layer,
                    units=self.output_indel_length_shape,
                    kernel_initializer=he_initializer,
                    activation=selu.selu,
                    name='Y_indel_length_logits')
                self.Y_indel_length = tf.nn.softmax(self.Y_indel_length_logits,
                                                    name='Y_indel_length')
                self.layers.append(self.Y_indel_length)

                self.Y = [
                    self.Y_base_change, self.Y_zygosity, self.Y_variant_type,
                    self.Y_indel_length
                ]

            # Extract the truth labels by output ratios
            with tf.variable_scope("Loss"):
                Y_base_change_label, Y_zygosity_label, Y_variant_type_label, Y_indel_length_label = tf.split(
                    self.Y_placeholder,
                    self.output_label_split,
                    axis=1,
                    name="label_split")

                # Cross Entropy loss
                Y_variant_type_str = "Y_variant_type"

                self.Y_base_change_cross_entropy = Clair.weighted_cross_entropy(
                    softmax_prediction=self.Y_base_change,
                    labels=Y_base_change_label,
                    weights=self.
                    output_base_change_entropy_weights_placeholder,
                    epsilon=self.epsilon,
                    name="Y_base_change_cross_entropy")
                self.Y_zygosity_cross_entropy = Clair.weighted_cross_entropy(
                    softmax_prediction=self.Y_zygosity,
                    labels=Y_zygosity_label,
                    weights=self.output_zygosity_entropy_weights_placeholder,
                    epsilon=self.epsilon,
                    name="Y_zygosity_cross_entropy")
                self.Y_variant_type_cross_entropy = Clair.weighted_cross_entropy(
                    softmax_prediction=self.Y_variant_type,
                    labels=Y_variant_type_label,
                    weights=self.
                    output_variant_type_entropy_weights_placeholder,
                    epsilon=self.epsilon,
                    name=Y_variant_type_str + "_cross_entropy")
                self.Y_indel_length_entropy = Clair.weighted_cross_entropy(
                    softmax_prediction=self.Y_indel_length,
                    labels=Y_indel_length_label,
                    weights=self.
                    output_indel_length_entropy_weights_placeholder,
                    epsilon=self.epsilon,
                    name="Y_indel_length_entropy")

                self.Y_base_change_loss = tf.reduce_sum(
                    self.Y_base_change_cross_entropy,
                    name="Y_base_change_loss")
                self.Y_zygosity_loss = tf.reduce_sum(
                    self.Y_zygosity_cross_entropy, name="Y_zygosity_loss")
                self.Y_variant_type_loss = tf.reduce_sum(
                    self.Y_variant_type_cross_entropy,
                    name=Y_variant_type_str + "_loss")
                self.Y_indel_length_loss = tf.reduce_sum(
                    self.Y_indel_length_entropy, name="Y_indel_length_loss")

                self.regularization_L2_loss_without_lambda = tf.add_n([
                    tf.nn.l2_loss(v) for v in tf.trainable_variables()
                    if 'bias' not in v.name
                ])
                self.regularization_L2_loss = (
                    self.regularization_L2_loss_without_lambda *
                    self.regularization_L2_lambda_placeholder)

                # Weighted average of losses, speicified by loss_ratio
                self.total_loss = tf.reduce_sum(tf.multiply(
                    self.task_loss_weights_placeholder,
                    tf.stack([
                        self.Y_base_change_loss, self.Y_zygosity_loss,
                        self.Y_variant_type_loss, self.Y_indel_length_loss,
                        self.regularization_L2_loss
                    ])),
                                                name="Total_loss")

            # Create the saver for the model
            self.saver = tf.train.Saver(max_to_keep=1000000, )

            # Include gradient clipping if RNN architectures are used
            if "RNN" in self.structure or "LSTM" in self.structure:
                with tf.variable_scope("Training_Operation"):
                    self.optimizer = tf.train.AdamOptimizer(
                        learning_rate=self.learning_rate_placeholder)
                    gradients, variables = zip(
                        *self.optimizer.compute_gradients(self.total_loss))
                    gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
                    self.training_op = self.optimizer.apply_gradients(
                        zip(gradients, variables))
            else:
                self.training_op = tf.train.AdamOptimizer(
                    learning_rate=self.learning_rate_placeholder).minimize(
                        self.total_loss)

            self.init_op = tf.global_variables_initializer()

            # Summary logging
            self.training_summary_op = tf.summary.merge([
                tf.summary.scalar('learning_rate',
                                  self.learning_rate_placeholder),
                tf.summary.scalar('l2_Lambda',
                                  self.regularization_L2_lambda_placeholder),
                tf.summary.scalar("Y_base_change_loss",
                                  self.Y_base_change_loss),
                tf.summary.scalar("Y_zygosity_loss", self.Y_zygosity_loss),
                tf.summary.scalar("Y_variant_type_loss",
                                  self.Y_variant_type_loss),
                tf.summary.scalar("Y_indel_length_loss",
                                  self.Y_indel_length_loss),
                tf.summary.scalar("Regularization_loss",
                                  self.regularization_L2_loss),
                tf.summary.scalar("Total_loss", self.total_loss)
            ])

            # For report or debug. Fetching histogram summary is slow, GPU utilization will be low if enabled.
            # for var in tf.trainable_variables():
            #    tf.summary.histogram(var.op.name, var)
            # self.merged_summary_op = tf.summary.merge_all()

            # Aliasing
            self.loss = self.total_loss