def __init__(self, num_filters, kernel_size): # inputs self.inputs = tf.placeholder(tf.float32, shape=[None, 28, 28, 1]) # first convolutional layer, default values: strides=1, use_bias=True conv1 = layers.Conv2D(filters=num_filters, kernel_size=kernel_size, padding="same", activation="relu") pooling = layers.MaxPooling2D(pool_size=2, strides=2) conv2 = layers.Conv2D(filters=num_filters, kernel_size=kernel_size, padding="same", activation="relu") # flatten layer before the dense layers flatten = layers.Flatten() # dense layers -> first one with relu? linear1 = layers.Dense(units=128, activation="relu") # second dense layer only computes logits linear2 = layers.Dense(units=10, activation=None) # define the graph self.logits = conv1(self.inputs) for layer in [pooling, conv2, pooling, flatten, linear1, linear2]: self.logits = layer(self.logits) self.out_soft = tf.nn.softmax(self.logits) # intialize the variables init = tf.global_variables_initializer() self.sess = tf.Session() self.sess.run(init)
def __call__(self, inp): with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE) as model_scope: x = tfl.Dense(11*11*16)(inp) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) x = tf.reshape(x, [-1, 11, 11, 16]) x = tfl.Conv2D(32, 3, padding="valid")(x) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) x = UpSampling2D()(x) x = tfl.Conv2D(128, 5, padding="valid")(x) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) x = self.pad()(x) x = tfl.Conv2D(64, 3, padding="valid")(x) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) x = UpSampling2D()(x) x = self.pad()(x) x = tfl.Conv2D(64, 3, padding="valid")(x) x = tf.nn.leaky_relu(x) x = self.pad()(x) x = tfl.Conv2D(1, 3, padding="valid")(x) self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=model_scope.name) self.post_call(model_scope) return x
def __call__(self, inp): with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE) as model_scope: x = tfl.Dense(9 * 9 * 8)(inp) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) x = tf.reshape(x, [-1, 9, 9, 8]) x = tfl.Conv2D(16, 3, padding="valid")(x) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) x = tfl.Conv2DTranspose(128, 5, 2, padding="same")(x) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) print(x) x = self.pad()(x) x = tfl.Conv2D(64, 3, padding="valid")(x) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) print(x) x = tfl.Conv2DTranspose(64, 3, 2, padding="same")(x) x = tf.nn.leaky_relu(x) print(x) x = self.pad()(x) x = tfl.Conv2D(1, 3, padding="valid")(x) self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=model_scope.name) self.post_call(model_scope) return x
def __init__(self): self.conv_1 = layers.Conv2D(filters=512, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="mln_conv_1") self.conv_2 = layers.Conv2D(filters=256, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="mln_conv_2") return
def Conv2D(self, filters, kernel_size, strides=1, padding='same', use_bias=False): return tl.Conv2D(filters, kernel_size, strides, padding=padding, data_format=self.layer_data_format, use_bias=use_bias, kernel_initializer=self.w_initer)
def __init__(self): self.conv_1 = layers.Conv2D(filters=512, kernel_size=[3, 3], strides=[2, 2], padding='same', activation=nn.sigmoid, name="gfn_conv_1") self.conv_2 = layers.Conv2D(filters=512, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="gfn_conv_2") self.conv_3 = layers.Conv2D(filters=512, kernel_size=[3, 3], strides=[2, 2], padding='same', activation=nn.sigmoid, name="gfn_conv_3") self.conv_4 = layers.Conv2D(filters=512, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="gfn_conv_4") self.flatten = layers.Flatten(name="flatten") self.dense_5 = layers.Dense(1024, activation=nn.sigmoid, name="gfn_dense_1") self.dense_6 = layers.Dense(512, activation=nn.sigmoid, name="gfn_dense_1") self.dense_7 = layers.Dense(256, activation=nn.sigmoid, name="gfn_desne_3") return
def __init__(self): self.conv_1 = layers.Conv2D(filters=128, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="colorize_conv_1") self.upsample_1 = tf.image.resize_images self.conv_2 = layers.Conv2D(filters=64, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="colorize_conv_2") self.conv_3 = layers.Conv2D(filters=64, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="colorize_conv_3") self.upsample_2 = tf.image.resize_images self.conv_4 = layers.Conv2D(filters=256, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="colorize_conv_4") self.conv_5 = layers.Conv2D(filters=2, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="colorize_conv_5") self.upsample_3 = tf.image.resize_images return
def __call__(self, inp): with tf.variable_scope("discriminator", reuse=tf.AUTO_REUSE) as model_scope: x = tfl.Conv2D(32, 3, 2)(inp) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) x = tfl.Conv2D(128, 3, 2)(x) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) x = self.dropout(0.25)(x) x = tfl.Conv2D(256, 3, 2)(x) x = self.batchnorm()(x) x = tf.nn.leaky_relu(x) x = self.dropout(0.25)(x) x = tfl.Flatten()(x) x = tfl.Dense(1, activation=None)(x) self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=model_scope.name) self.post_call(model_scope) return x
def __init__(self): self.conv_1 = layers.Conv2D(filters=64, kernel_size=[3, 3], strides=[2, 2], padding='same', activation=nn.sigmoid, name="lln_conv_1") self.conv_2 = layers.Conv2D(filters=128, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="lln_conv_2") self.conv_3 = layers.Conv2D(filters=128, kernel_size=[3, 3], strides=[2, 2], padding='same', activation=nn.sigmoid, name="lln_conv_3") self.conv_4 = layers.Conv2D(filters=256, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="lln_conv_4") self.conv_5 = layers.Conv2D(filters=256, kernel_size=[3, 3], strides=[2, 2], padding='same', activation=nn.sigmoid, name="lln_conv_5") self.conv_6 = layers.Conv2D(filters=512, kernel_size=[3, 3], strides=[1, 1], padding='same', activation=nn.sigmoid, name="lln_conv_6") return
X = [] y = [] for feature, label in training_dataset(): X.append(feature) y.append(label) X_train = np.array(X).reshape(-1, 150, 150, 1) train_label = np.array(y).reshape(-1, 1) label_encoded = OneHotEncoder().fit_transform(train_label) y_train = label_encoded.A X_train = X_train / 255.0 model = keras.Sequential([ layers.Conv2D(64, (3, 3), activation=tf.nn.relu, input_shape=X_train.shape[1:]), layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1)), layers.Conv2D(64, (3, 3), activation=tf.nn.relu), layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1)), layers.Flatten(), layers.Dense(64, activation=tf.nn.relu), layers.Dense(8, activation=tf.nn.softmax) ]) model.compile(loss='categorical_crossentropy', optimizer=tf.keras.optimizers.Adam(), metrics=['accuracy']) model.fit(X_train, y_train, batch_size=4, validation_split=0.1, epoch=5)
def __init__(self, name, state_size, action_size, opt, feature_layers=None, critic_layers=None, actor_layers=None): self.name = name self.state_size = state_size self.action_size = action_size self.optimizer = opt self.feature_layers = [ layers.Conv2D(filters=16, kernel_size=(8, 8), strides=(4, 4), activation=tf.nn.leaky_relu), layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(2, 2), activation=tf.nn.leaky_relu), layers.Flatten(), layers.Dense(256, activation=tf.nn.leaky_relu, name="features"), ] if (feature_layers is None or not isinstance(feature_layers, Iterable)) else feature_layers critic_layers = [layers.Dense(1, name='value')] if ( critic_layers is None or not isinstance(critic_layers, Iterable)) else critic_layers actor_layers = [layers.Dense(action_size, name='logits')] if ( actor_layers is None or not isinstance(actor_layers, Iterable)) else actor_layers self.selected_action = tf.placeholder(tf.uint8, [None], name="labels") self.actions_onehot = tf.one_hot(self.selected_action, self.action_size, dtype=tf.float32) self.advantages = tf.placeholder(tf.float32, [None]) self.discounted_reward = tf.placeholder(tf.float32, [None]) self.state = tf.placeholder(tf.float32, shape=[None, *state_size], name="states") with tf.variable_scope(self.name): self.feature = self._layers_output(self.feature_layers, self.state) self.value = self._layers_output(critic_layers, self.feature) self.logits = self._layers_output(actor_layers, self.feature) self.policy = tf.nn.softmax(self.logits, name='policy') # self.value_loss, self.policy_loss, self.entropy, self.total_loss = self._compute_loss() # self.target = tf.placeholder(tf.float32, [None]) responsible_outputs = tf.reduce_sum( self.policy * self.actions_onehot, 1) self.entropy = 0.005 * tf.reduce_sum( -self.policy * tf.log(self.policy + 1e-7), 1) self.policy_loss = -tf.reduce_mean( (tf.log(responsible_outputs + 1e-7)) * self.advantages + self.entropy) self.value_loss = tf.losses.mean_squared_error( self.advantages, tf.squeeze(self.value)) self.total_loss = 0.5 * self.value_loss + self.policy_loss # - entropy * 0.005 self.optimizer = tf.train.RMSPropOptimizer(learning_rate=0.01, decay=.99) # if name != 'global': var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name) self.gradients = self.optimizer.compute_gradients( self.total_loss, var_list) self.gradients_placeholders = [] for grad, var in self.gradients: self.gradients_placeholders.append( (tf.placeholder(var.dtype, shape=var.get_shape()), var)) self.apply_gradients = self.optimizer.apply_gradients( self.gradients_placeholders) # self.gradients = tf.gradients(self.total_loss, # tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name)) # self.grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, 40.0) # self.apply_grads = opt.apply_gradients( # zip(self.grads, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global'))) # else: self.reward_summary_ph = tf.placeholder(tf.float32, name="reward_summary") self.reward_summary = tf.summary.scalar(name='reward_summary', tensor=self.reward_summary_ph) self.merged_summary_op = tf.summary.merge_all() self.writer = tf.summary.FileWriter('./graphs', tf.get_default_graph()) self.test = tf.get_default_graph().get_tensor_by_name( os.path.split(self.value.name)[0] + '/kernel:0')
def __init__(self, name, state_size, action_size, opt, feature_layers=None, critic_layers=None, actor_layers=None): self.name = name self.state_size = state_size self.action_size = action_size self.optimizer = opt self.feature_layers = [ # layers.Dense(100, activation='relu', name="features"), layers.Conv2D(filters=16, kernel_size=(8, 8), strides=(4, 4), activation=tf.nn.leaky_relu), layers.Conv2D(filters=32, kernel_size=(4, 4), strides=(2, 2), activation=tf.nn.leaky_relu), layers.Flatten(), layers.Dense(256, activation=tf.nn.leaky_relu, name="features"), ] if (feature_layers is None or not isinstance(feature_layers, Iterable)) else feature_layers critic_layers = [layers.Dense(1, name='value')] if ( critic_layers is None or not isinstance(critic_layers, Iterable)) else critic_layers # actor_layers = [ # layers.Dense(action_size, activation='sigmoid', name='logits') # ] if (actor_layers is None or not isinstance(actor_layers, Iterable)) else actor_layers self.state = tf.placeholder(tf.float32, shape=[None, *state_size], name="states") with tf.variable_scope(self.name): self.feature = self._layers_output(self.feature_layers, self.state) self.value = self._layers_output(critic_layers, self.feature) # self.logits = self._layers_output(actor_layers, self.feature) # self.policy = tf.nn.softmax(self.logits, name='policy') # self.selected_action = tf.placeholder(tf.float32, [None], name="labels") # self.actions_onehot = tf.one_hot(tf.cast(self.selected_action, tf.int32), self.action_size, dtype=tf.float32) self.advantages = tf.placeholder(tf.float32, [None]) if name != 'global': # self.value_loss, self.policy_loss, self.entropy_loss, self.total_loss = self._compute_loss() # self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, 1) # responsible_outputs = tf.reduce_sum(self.logits * actions_onehot, 1) self.value_loss = (self.advantages - self.value)**2 # self.entropy = -tf.reduce_sum(self.policy * tf.log(self.policy), 1) # self.policy_loss = -tf.reduce_sum(tf.log(self.responsible_outputs)*self.advantages) self.total_loss = tf.reduce_mean( 0.5 * self.value_loss) # + self.policy_loss) self.gradients = tf.gradients( self.total_loss, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name))
def __init__(self, inplanes, planes, stride=1, downsample=None, n_s=0, depth_3d=47, ST_struc=('A', 'B', 'C')): self.w_initer = tc.layers.xavier_initializer(tf.float32) self.downsample = downsample self.depth_3d = depth_3d self.ST_struc = ST_struc self.len_ST = len(self.ST_struc) stride_p = stride if not self.downsample == None: stride_p = (1, 2, 2) if n_s < self.depth_3d: if n_s == 0: stride_p = 1 self.conv1 = tl.Conv3D(filters=planes, kernel_size=1, strides=stride_p, padding='same', use_bias=False, kernel_initializer=self.w_initer) self.bn1 = nn.BatchNorm3d(planes) self.bn1 = tl.BatchNormalization(axis=axis, scale=False, training=self.is_training, fused=True) else: if n_s == self.depth_3d: stride_p = 2 else: stride_p = 1 self.conv1 = tl.Conv2D(filters=planes, kernel_size=1, strides=stride_p, padding='same', use_bias=False, kernel_initializer=self.w_initer) self.bn1 = nn.BatchNorm2d(planes) # self.conv2 = nn.Conv3d(planes, planes, kernel_size=3, stride=stride, # padding=1, bias=False) self.id = n_s self.ST = list(self.ST_struc)[self.id % self.len_ST] if self.id < self.depth_3d: self.conv2 = conv_S(planes, planes, stride=1, padding=(0, 1, 1)) self.bn2 = nn.BatchNorm3d(planes) # self.conv3 = conv_T(planes, planes, stride=1, padding=(1, 0, 0)) self.bn3 = nn.BatchNorm3d(planes) else: self.conv_normal = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False) self.bn_normal = nn.BatchNorm2d(planes) if n_s < self.depth_3d: self.conv4 = nn.Conv3d(planes, planes * 4, kernel_size=1, bias=False) self.bn4 = nn.BatchNorm3d(planes * 4) else: self.conv4 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn4 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.stride = stride