def flatten(inputs): """ Flattens the inputs Parameters ---------- inputs: Input tensor """ return Flatten()(inputs)
def build_net1(inputs, output_size=output_size): convolved1 = tf.layers.conv1d(inputs=inputs, filters=20, strides=1, kernel_size=5, padding="SAME", name="convolution-1") pooled1 = tf.layers.max_pooling1d(inputs=convolved1, pool_size=2, strides=2, name="max_pool-1") convolved2 = tf.layers.conv1d(inputs=pooled1, filters=30, strides=1, kernel_size=3, padding="SAME", name="convolution-2") pooled2 = tf.layers.max_pooling1d(inputs=convolved2, pool_size=2, strides=2, name="max_pool-2") convolved3 = tf.layers.conv1d(inputs=pooled2, filters=40, strides=1, kernel_size=3, padding="SAME", name="convolution-3") pooled3 = tf.layers.max_pooling1d(inputs=convolved3, pool_size=2, strides=2, name="max_pool-3") convolved4 = tf.layers.conv1d(inputs=pooled3, filters=50, strides=1, kernel_size=3, padding="SAME", name="convolution-4") pooled4 = tf.layers.max_pooling1d(inputs=convolved4, pool_size=4, strides=4, name="max_pool-4") flat_layer = Flatten()(pooled4) predict_logits = tf.layers.dense(inputs=flat_layer, units=output_size, name="predict_logits") return predict_logits
def _fully_connected_2d(input, name): previous_size = input.shape with tf.variable_scope(name): input = Flatten()(input) input = Dense(1000)(input) input = Dense( int(previous_size[1] * previous_size[2] * previous_size[3]))(input) input = tf.reshape(input, (-1, int( previous_size[1]), int(previous_size[2]), int(previous_size[3]))) return input
def _build(self): self.s_input = tf.placeholder(tf.float32, [None] + list(self.state_shape), name='States') self.a_input = tf.placeholder(tf.float32, [None, self.n_actions], name='Actions') self.r_input = tf.placeholder(tf.float32, [None], name='Rewards') conv1 = Conv2D(32, 8, strides=(6, 6), activation=tf.nn.relu, kernel_initializer=tf.initializers.glorot_normal(), bias_initializer=tf.initializers.glorot_normal(), name='conv1')(self.s_input) conv2 = Conv2D(48, 6, strides=(3, 3), activation=tf.nn.relu, kernel_initializer=tf.initializers.glorot_normal(), bias_initializer=tf.initializers.glorot_normal(), name='conv2')(conv1) f = Flatten()(conv2) dense1 = Dense(512, activation=tf.nn.relu, kernel_initializer=tf.initializers.glorot_normal(), bias_initializer=tf.initializers.glorot_normal(), name='dense1')(f) dense_pi = Dense(128, activation=tf.nn.relu, kernel_initializer=tf.initializers.glorot_normal(), bias_initializer=tf.initializers.glorot_normal(), name='dense_pi')(dense1) self.pi = Dense(self.n_actions, activation=tf.nn.softmax, kernel_initializer=tf.initializers.glorot_normal(), bias_initializer=tf.initializers.glorot_normal(), name='Pi')(dense_pi) self.pi = tf.clip_by_value(self.pi, 1e-8, 1.) dense_v = Dense(32, activation=tf.nn.relu, kernel_initializer=tf.initializers.glorot_normal(), bias_initializer=tf.initializers.glorot_normal(), name='dense_v')(dense1) self.v = Dense(1, activation=None, kernel_initializer=tf.initializers.glorot_normal(), name='V')(dense_v) self.v = tf.squeeze(self.v, axis=1) log_policy = tf.log(tf.reduce_sum(self.pi * self.a_input, axis=1) + 1e-10) self.advantage = self.r_input - self.v loss_pi = -tf.multiply(log_policy, tf.stop_gradient(self.advantage)) loss_v = 0.5 * tf.square(self.advantage) entropy_pi = - 0.05 * tf.reduce_sum(self.pi * tf.log(self.pi), axis=1) self.loss = tf.reduce_mean(loss_pi + loss_v - entropy_pi) opt = tf.train.AdamOptimizer(self.lr) self.train_op = opt.minimize(self.loss)
def initialize_model(config, num_people, current_layer, is_training): for count, layer_conf in enumerate(config.model.layers): name = get_or_none(layer_conf, "name") with tf.variable_scope(layer_conf.scope, reuse=tf.AUTO_REUSE): if layer_conf.HasField("convolutional"): current_layer = relu( conv2d( current_layer, layer_conf.convolutional.filters, max(layer_conf.convolutional.kernel_size.width, layer_conf.convolutional.kernel_size.height), #data_format='channels_last', padding="same", scope="conv")) elif layer_conf.HasField("pool"): if layer_conf.pool.type == "max": current_layer = MaxPooling2D( (layer_conf.pool.size.width, layer_conf.pool.size.height), strides=(layer_conf.pool.size.width, layer_conf.pool.size.height), name=name)(current_layer) else: raise ValueError("Unsupported pool type:" + conv_config.pool_type) elif layer_conf.HasField("dense"): current_layer = Dense(layer_conf.dense.units, activation=str_to_activation( layer_conf.dense.activation), name=name)(current_layer) elif layer_conf.HasField("flatten"): current_layer = Flatten(name=name)(current_layer) elif layer_conf.HasField("dropout"): current_layer = Dropout(layer_conf.dropout.rate * is_training, name=name)(current_layer) elif layer_conf.HasField("transfer"): if count != 0: ValueError("Transfer layer must occur first.") # We're handling this outside now else: ValueError("Unsupported layer.") return current_layer
def _fully_connected(input, name): with tf.variable_scope(name): input = Flatten()(input) input = Dense(1)(input) return input
from tensorflow.keras.datasets.mnist import load_data from tensorflow.layers import Flatten import tensorflow as tf (train_X, train_y),(test_X, test_y) = load_data() # mlp for Multi Layer Perceptron with tf.variable_scope("mlp") as mlp_scope: x = tf.placeholder(tf.float32, shape=(None, 28, 28), name="x") y_target = tf.placeholder(tf.uint8, shape=(None), name="y_target") # Preprocess x x_flattened = Flatten()(x) x_normalized = x_flattened / 255.0 w1 = tf.Variable(tf.random_normal(shape=(784, 100)), name="w1") b1 = tf.Variable(tf.zeros(shape=(100)), name="b1") # h1 = tf.nn.relu(tf.add(tf.matmul(x,w1),b1)) h1 = tf.nn.relu((x_normalized @ w1) + b1) w2 = tf.Variable(tf.random_normal(shape=(100, 10)), name="w2") b2 = tf.Variable(tf.zeros(shape=(10)), name="b2") # logits=tf.add(tf.matmul(h1,w2),b2) logits = (h1 @ w2) + b2 y_pred = tf.nn.softmax(logits) learn_rate = tf.placeholder(tf.float32, shape=None, name="learn_rate") # One-hot encoding
import tensorflow as tf from tensorflow.keras.datasets import imdb from tensorflow.keras import preprocessing from tensorflow.keras.models import Sequential from tensorflow.layers import Flatten, Dense max_features = 10000 max_len = 20 (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features) x_train = preprocessing.sequence.pad_sequences(x_train, maxlen=max_len) x_test = preprocessing.sequence.pad_sequences(x_test, maxlen=max_len) model = Sequential() model.add(tf.keras.layers.Embedding(100000, 8, input_length=max_len)) model.add(Flatten()) model.add(Dense(1, activation='sigmoid')) model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc']) model.summary() history = model.fit(x_train, y_train, epochs=10, batch_size=32, validation_split=0.2)
def __init__(self): super(Discriminator, self).__init__() arg = {'activation': tf.nn.relu, 'padding': 'same'} self.conv_11 = Conv2D(name='di_conv_11', filters=64, kernel_size=(5, 5), strides=(2, 2), **arg) self.conv_12 = Conv2D(name='di_conv_12', filters=64, kernel_size=(3, 3), strides=(1, 1), **arg) self.conv_13 = Conv2D(name='di_conv_13', filters=64, kernel_size=(3, 3), strides=(1, 1), **arg) self.pool_1 = MaxPooling2D(name='di_pool_1', pool_size=(5, 5), strides=(2, 2), padding='same') self.drop_1 = Dropout(0.5) self.conv_21 = Conv2D(name='di_conv_21', filters=128, kernel_size=(3, 3), strides=(1, 1), **arg) self.conv_22 = Conv2D(name='di_conv_22', filters=128, kernel_size=(3, 3), strides=(1, 1), **arg) self.conv_23 = Conv2D(name='di_conv_23', filters=128, kernel_size=(3, 3), strides=(1, 1), **arg) self.pool_2 = MaxPooling2D(name='di_pool_2', pool_size=(3, 3), strides=(2, 2), padding='same') self.drop_2 = Dropout(0.5) self.conv_31 = Conv2D(name='di_conv_31', filters=256, kernel_size=(3, 3), strides=(2, 2), **arg) self.conv_32 = Conv2D(name='di_conv_32', filters=256, kernel_size=(3, 3), strides=(1, 1), **arg) self.conv_33 = Conv2D(name='di_conv_33', filters=256, kernel_size=(3, 3), strides=(1, 1), **arg) self.pool_3 = MaxPooling2D(name='di_pool_3', pool_size=(3, 3), strides=(2, 2), padding='same') self.drop_3 = Dropout(0.5) self.flattener = Flatten() self.drop_4 = Dropout(0.5) self.classifier_1 = Dense(name='di_cls_1', units=512, activation=tf.nn.relu, use_bias=True) self.drop_5 = Dropout(0.5) self.classifier_2 = Dense(name='di_cls_2', units=256, activation=tf.nn.relu, use_bias=True) self.classifier_3 = Dense(name='di_cls_3', units=2, activation=None, use_bias=True)
def __init__(self): super(Encoder, self).__init__() arg = {'activation': tf.nn.relu, 'padding': 'same'} self.conv_11 = Conv2D(name='e_conv_11', filters=64, kernel_size=7, strides=(2, 2), **arg) self.conv_12 = Conv2D(name='e_conv_12', filters=64, kernel_size=7, strides=(2, 2), **arg) self.pool_1 = MaxPooling2D(name='e_pool_1', pool_size=4, strides=(2, 2), padding='same') self.compress_11 = AveragePooling2D(name='e_comp_11', pool_size=5, strides=(3, 3), padding='same') self.compress_12 = Flatten() self.compress_13 = Dense(name='e_comp_13', units=128, activation=None, use_bias=False) # activity_regularizer=tf.keras.regularizers.l2(l=0.01)) self.batch_norm_1 = BatchNormalization(name='e_bn_1') self.drop_1 = Dropout(name='e_drop_1', rate=0.5) self.conv_21 = Conv2D(name='e_conv_21', filters=128, kernel_size=5, strides=(1, 1), **arg) self.conv_22 = Conv2D(name='e_conv_22', filters=128, kernel_size=5, strides=(1, 1), **arg) self.pool_2 = MaxPooling2D(name='e_pool_2', pool_size=4, strides=(2, 2), padding='same') self.compress_21 = AveragePooling2D(name='e_comp_21', pool_size=5, strides=(3, 3), padding='same') self.compress_22 = Flatten() self.compress_23 = Dense(name='e_comp_23', units=128, activation=None, use_bias=False) # activity_regularizer=tf.keras.regularizers.l2(l=0.01)) self.batch_norm_2 = BatchNormalization(name='e_bn_2') self.drop_2 = Dropout(name='e_drop_2', rate=0.5) self.conv_31 = Conv2D(name='e_conv_31', filters=256, kernel_size=3, strides=(1, 1), **arg) self.conv_32 = Conv2D(name='e_conv_32', filters=256, kernel_size=3, strides=(1, 1), **arg) self.pool_3 = MaxPooling2D(name='e_pool_3', pool_size=2, strides=(2, 2), padding='same') self.compress_31 = AveragePooling2D(name='e_comp_31', pool_size=3, strides=(1, 1), padding='same') self.compress_32 = Flatten() self.compress_33 = Dense(name='e_comp_33', units=128, activation=None, use_bias=False) # activity_regularizer=tf.keras.regularizers.l2(l=0.01)) self.batch_norm_3 = BatchNormalization(name='e_bn_3') self.drop_3 = Dropout(name='e_drop_3', rate=0.5)