def LSTM_Sentiment(input_tensor): # Reference Paper: https://www.bioinf.jku.at/publications/older/2604.pdf lstmCell = tf.compat.v1.nn.rnn_cell.BasicLSTMCell(1024) output_rnn, _ = tf.compat.v1.nn.dynamic_rnn(lstmCell, input_tensor, dtype=tf.float32) W_fc = tf.Variable(tf.random.truncated_normal([1024, 2])) b_fc = tf.Variable(tf.constant(0.1, shape=[2])) output_transposed = tf.transpose(output_rnn, perm=[1, 0, 2]) output = tf.gather(output_transposed, int(output_transposed.get_shape()[0]) - 1) return tf.identity(tf.matmul(output, W_fc) + b_fc, name="output")
def build_fcn_net(self, inp, use_dice=False): with self.graph.as_default(): self.saver = tf.train.Saver(max_to_keep=1) with tf.name_scope("Out"): bn1 = tf.layers.batch_normalization(inputs=inp, name='bn1') dnn1 = tf.layers.dense(bn1, 200, activation=None, name='f1') if use_dice: dnn1 = dice(dnn1, name='dice_1') else: dnn1 = prelu(dnn1, 'prelu1') dnn2 = tf.layers.dense(dnn1, 80, activation=None, name='f2') if use_dice: dnn2 = dice(dnn2, name='dice_2') else: dnn2 = prelu(dnn2, 'prelu2') dnn3 = tf.layers.dense(dnn2, 2, activation=None, name='f3') self.y_hat = tf.nn.softmax(dnn3) + 0.00000001 with tf.name_scope('Metrics'): # Cross-entropy loss and optimizer initialization coe = tf.constant([1.2, 1.2]) coe_mask = tf.equal(self.core_type_ph, 1) coe_mask2 = tf.concat( [tf.expand_dims(coe_mask, -1) for i in range(2)], -1) self.target_ph_coe = tf.where(coe_mask2, self.target_ph * coe, self.target_ph) ctr_loss = -tf.reduce_mean(tf.log(self.y_hat) * self.target_ph) self.loss = ctr_loss # tf.summary.scalar('loss', self.loss) self.optimizer = tf.train.AdamOptimizer( learning_rate=self.lr_ph).minimize(self.loss) # self.optimizer = tf.train.GradientDescentOptimizer(learning_rate=self.lr_ph).minimize(self.loss) # Accuracy metric self.accuracy = tf.reduce_mean( tf.cast(tf.equal(tf.round(self.y_hat), self.target_ph), tf.float32)) # tf.summary.scalar('accuracy', self.accuracy) self.merged = tf.summary.merge_all()
def conv2d(inputs, num_outputs, kernel_shape, mask_type, scope="conv2d"): with tf.compat.v1.variable_scope(scope): WEIGHT_INITIALIZER = tf.compat.v1.keras.initializers.VarianceScaling( scale=1.0, mode="fan_avg", distribution="uniform") batch_size, height, width, channel = inputs.get_shape().as_list() kernel_h, kernel_w = kernel_shape center_h = kernel_h // 2 center_w = kernel_w // 2 weights_shape = [kernel_h, kernel_w, channel, num_outputs] weights = tf.compat.v1.get_variable("weights", weights_shape, tf.float32, WEIGHT_INITIALIZER, None) mask = np.ones((kernel_h, kernel_w, channel, num_outputs), dtype=np.float32) mask[center_h, center_w + 1:, :, :] = 0.0 mask[center_h + 1:, :, :, :] = 0.0 if mask_type == 'a': mask[center_h, center_w, :, :] = 0.0 weights = weights * tf.constant(mask, dtype=tf.float32) outputs = tf.nn.conv2d(input=inputs, filters=weights, strides=[1, 1, 1, 1], padding="SAME", name='outputs') biases = tf.compat.v1.get_variable("biases", [ num_outputs, ], tf.float32, tf.compat.v1.zeros_initializer(), None) outputs = tf.nn.bias_add(outputs, biases, name='outputs_plus_b') return outputs
def __init__(self, dat, dim_rec, dim_z, dim_gen, scope='vae'): assert 2 == dat.ndim assert isinstance(dim_rec, tuple) assert isinstance(dim_z, int) assert isinstance(dim_gen, tuple) init_w = tf.variance_scaling_initializer(scale=2.0, mode='fan_in', distribution='uniform') init_b = tf.constant_initializer(0.01) init_z = tf.zeros_initializer() with tf.variable_scope(scope): dat = self.dat = tf.constant(name='dat', value=dat) bs_ = self.bs_ = tf.placeholder(name='bs_', dtype=tf.int32, shape=()) bat = self.bat = tf.random_uniform(name='bat', shape=(bs_, ), minval=0, maxval=dat.shape[0], dtype=tf.int32) h = x = self.x = tf.nn.embedding_lookup(name='x', params=dat, ids=bat) for i, dim in enumerate(dim_rec, 1): name = "hr{}".format(i) h = tf.layers.dense(name=name, inputs=h, units=dim, activation=tf.nn.relu, kernel_initializer=init_w, bias_initializer=init_b) setattr(self, name, h) mu = self.mu = tf.layers.dense(name='mu', inputs=h, units=dim_z, kernel_initializer=init_w, bias_initializer=init_z) lv = self.lv = tf.layers.dense(name='lv', inputs=h, units=dim_z, kernel_initializer=init_w, bias_initializer=init_z) with tf.name_scope('z'): h = z = self.z = mu + tf.exp( 0.5 * lv) * tf.random_normal(shape=tf.shape(lv)) for i, dim in enumerate(dim_gen, 1): name = "hg{}".format(i) h = tf.layers.dense(name=name, inputs=h, units=dim, activation=tf.nn.relu, kernel_initializer=init_w, bias_initializer=init_b) setattr(self, name, h) logits = tf.layers.dense( name='logits', inputs=h, units=dat.shape[1] # , activation= tf.nn.sigmoid , kernel_initializer=init_w, bias_initializer=init_z) g = self.g = tf.sigmoid(logits) with tf.name_scope('loss_recons'): # loss_recons = self.loss_recons = tf.reduce_mean( # tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels= x, logits= logits), axis= 1)) loss_recons = self.loss_recons = tf.reduce_mean( tf.reduce_sum(tf.square(x - g), axis=1)) with tf.name_scope('loss_relent'): # loss_relent = self.loss_relent = tf.reduce_mean( # 0.5 * tf.reduce_sum((- 1.0 - lv + tf.exp(lv) + tf.square(mu)), axis= 1)) loss_relent = self.loss_relent = tf.reduce_mean( tf.reduce_sum((-1.0 - lv + tf.exp(lv) + tf.square(mu)), axis=1)) with tf.name_scope('loss'): loss = self.loss = loss_relent + loss_recons up = self.up = tf.train.AdamOptimizer().minimize(loss) self.step = 0