コード例 #1
0
    def __call__(self, inp):
        with tf.variable_scope("discriminator",
                               reuse=tf.AUTO_REUSE) as model_scope:
            x = tfl.Flatten()(inp)

            x = tfl.Dense(256)(x)
            x = self.batchnorm()(x)
            x = tf.nn.leaky_relu(x)
            x = self.dropout(0.25)(x)

            x = tfl.Dense(256)(x)
            x = self.batchnorm()(x)
            x = tf.nn.leaky_relu(x)
            x = self.dropout(0.25)(x)

            x = tfl.Dense(64)(x)
            x = self.batchnorm()(x)
            x = tf.nn.leaky_relu(x)
            x = self.dropout(0.25)(x)

            x = tfl.Dense(1, activation=None)(x)

        self.variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope=model_scope.name)
        self.post_call(model_scope)

        return x
コード例 #2
0
    def __init__(self, num_filters, kernel_size):
        # inputs

        self.inputs = tf.placeholder(tf.float32, shape=[None, 28, 28, 1])
        # first convolutional layer, default values: strides=1, use_bias=True
        conv1 = layers.Conv2D(filters=num_filters,
                              kernel_size=kernel_size,
                              padding="same",
                              activation="relu")
        pooling = layers.MaxPooling2D(pool_size=2, strides=2)
        conv2 = layers.Conv2D(filters=num_filters,
                              kernel_size=kernel_size,
                              padding="same",
                              activation="relu")
        # flatten layer before the dense layers
        flatten = layers.Flatten()
        # dense layers -> first one with relu?
        linear1 = layers.Dense(units=128, activation="relu")
        # second dense layer only computes logits
        linear2 = layers.Dense(units=10, activation=None)

        # define the graph
        self.logits = conv1(self.inputs)
        for layer in [pooling, conv2, pooling, flatten, linear1, linear2]:
            self.logits = layer(self.logits)

        self.out_soft = tf.nn.softmax(self.logits)

        # intialize the variables
        init = tf.global_variables_initializer()
        self.sess = tf.Session()
        self.sess.run(init)
コード例 #3
0
ファイル: model.py プロジェクト: anasvaf/HCDVAE
def conv_net(inputs, conv1_kernels, kernel_size1, pool_size1, conv2_kernels,
             kernel_size2, pool_size2):
    # *** WARNING: HARDCODED SHAPES, CHANGE ACCORDINGLY ***
    in_layer = tf.reshape(inputs, [-1, 28, 28, 1])

    # 1st conv layer
    conv1 = layers.conv2d(inputs=in_layer,
                          filters=conv1_kernels,
                          kernel_size=kernel_size1,
                          padding='same',
                          activation=tf.nn.relu)

    # # 1st pooling layer
    # pool1 = layers.max_pooling2d(inputs=conv1,
    #                              pool_size=pool_size1,
    #                              strides=2)

    # 2nd conv layer
    conv2 = layers.conv2d(inputs=conv1,
                          filters=conv2_kernels,
                          kernel_size=kernel_size2,
                          padding='same',
                          activation=tf.nn.relu)

    # # 2nd pooling layer
    # pool2 = layers.max_pooling2d(inputs=conv2,
    #                              pool_size=pool_size2,
    #                              strides=2)
    fl = layers.Flatten()(conv2)
    return fl
コード例 #4
0
    def __init__(self):
        self.conv_1 = layers.Conv2D(filters=512,
                                    kernel_size=[3, 3],
                                    strides=[2, 2],
                                    padding='same',
                                    activation=nn.sigmoid,
                                    name="gfn_conv_1")
        self.conv_2 = layers.Conv2D(filters=512,
                                    kernel_size=[3, 3],
                                    strides=[1, 1],
                                    padding='same',
                                    activation=nn.sigmoid,
                                    name="gfn_conv_2")
        self.conv_3 = layers.Conv2D(filters=512,
                                    kernel_size=[3, 3],
                                    strides=[2, 2],
                                    padding='same',
                                    activation=nn.sigmoid,
                                    name="gfn_conv_3")
        self.conv_4 = layers.Conv2D(filters=512,
                                    kernel_size=[3, 3],
                                    strides=[1, 1],
                                    padding='same',
                                    activation=nn.sigmoid,
                                    name="gfn_conv_4")

        self.flatten = layers.Flatten(name="flatten")

        self.dense_5 = layers.Dense(1024,
                                    activation=nn.sigmoid,
                                    name="gfn_dense_1")
        self.dense_6 = layers.Dense(512,
                                    activation=nn.sigmoid,
                                    name="gfn_dense_1")
        self.dense_7 = layers.Dense(256,
                                    activation=nn.sigmoid,
                                    name="gfn_desne_3")

        return
コード例 #5
0
def get_model(feature, label):
    # Reshape the input vector into a 28x28 image
    input_layer = tf.reshape(feature, [-1, 28, 28, 1])

    conv1 = tf.layers.conv2d(inputs=input_layer,
                             filters=32,
                             kernel_size=[5, 5],
                             padding="same",
                             activation=tf.nn.relu,
                             name="conv1")

    pool1 = tf.layers.max_pooling2d(inputs=conv1,
                                    pool_size=[2, 2],
                                    strides=2,
                                    name="pool1")

    conv2 = tf.layers.conv2d(inputs=pool1,
                             filters=64,
                             kernel_size=[5, 5],
                             padding="same",
                             activation=tf.nn.relu,
                             name="conv2")

    pool2 = tf.layers.max_pooling2d(inputs=conv2,
                                    pool_size=[2, 2],
                                    strides=2,
                                    name="pool2")

    # Flatten
    pool2_flat = layers.Flatten()(pool2)

    # Add a Dense layer
    dense = tf.layers.dense(inputs=pool2_flat,
                            units=1024,
                            activation=tf.nn.relu,
                            name="Dense1")

    # Add a dropout layer
    dropout = tf.layers.dropout(
        inputs=dense,
        rate=0.4,  # Dropout rate
        training=True,
        name="dropout")

    # Logits Layer
    logits = tf.layers.dense(inputs=dropout, units=10, name="Logits_layer")

    onehot_labels = tf.one_hot(tf.cast(label, tf.int32), 10, 1, 0)

    loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels,
                                           logits=logits)

    correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.float32),
                                  label)
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    """
    Print out TensorBoard Summaries
    """
    tf.summary.scalar("loss", loss)
    tf.summary.histogram("loss", loss)
    tf.summary.scalar("training_accuracy", accuracy)
    tf.summary.image("images", input_layer, max_outputs=3)
    # summary_op = tf.summary.merge_all()

    return tf.argmax(logits, 1), loss, accuracy
コード例 #6
0
X = []
y = []
for feature, label in training_dataset():
    X.append(feature)
    y.append(label)

X_train = np.array(X).reshape(-1, 150, 150, 1)
train_label = np.array(y).reshape(-1, 1)
label_encoded = OneHotEncoder().fit_transform(train_label)
y_train = label_encoded.A
X_train = X_train / 255.0

model = keras.Sequential([
    layers.Conv2D(64, (3, 3),
                  activation=tf.nn.relu,
                  input_shape=X_train.shape[1:]),
    layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1)),
    layers.Conv2D(64, (3, 3), activation=tf.nn.relu),
    layers.MaxPooling2D(pool_size=(2, 2), strides=(1, 1)),
    layers.Flatten(),
    layers.Dense(64, activation=tf.nn.relu),
    layers.Dense(8, activation=tf.nn.softmax)
])

model.compile(loss='categorical_crossentropy',
              optimizer=tf.keras.optimizers.Adam(),
              metrics=['accuracy'])

model.fit(X_train, y_train, batch_size=4, validation_split=0.1, epoch=5)
コード例 #7
0
    def __init__(self,
                 name,
                 state_size,
                 action_size,
                 opt,
                 feature_layers=None,
                 critic_layers=None,
                 actor_layers=None):
        self.name = name
        self.state_size = state_size
        self.action_size = action_size
        self.optimizer = opt
        self.feature_layers = [
            layers.Conv2D(filters=16,
                          kernel_size=(8, 8),
                          strides=(4, 4),
                          activation=tf.nn.leaky_relu),
            layers.Conv2D(filters=32,
                          kernel_size=(4, 4),
                          strides=(2, 2),
                          activation=tf.nn.leaky_relu),
            layers.Flatten(),
            layers.Dense(256, activation=tf.nn.leaky_relu, name="features"),
        ] if (feature_layers is None
              or not isinstance(feature_layers, Iterable)) else feature_layers
        critic_layers = [layers.Dense(1, name='value')] if (
            critic_layers is None
            or not isinstance(critic_layers, Iterable)) else critic_layers
        actor_layers = [layers.Dense(action_size, name='logits')] if (
            actor_layers is None
            or not isinstance(actor_layers, Iterable)) else actor_layers

        self.selected_action = tf.placeholder(tf.uint8, [None], name="labels")
        self.actions_onehot = tf.one_hot(self.selected_action,
                                         self.action_size,
                                         dtype=tf.float32)
        self.advantages = tf.placeholder(tf.float32, [None])
        self.discounted_reward = tf.placeholder(tf.float32, [None])

        self.state = tf.placeholder(tf.float32,
                                    shape=[None, *state_size],
                                    name="states")
        with tf.variable_scope(self.name):
            self.feature = self._layers_output(self.feature_layers, self.state)
            self.value = self._layers_output(critic_layers, self.feature)
            self.logits = self._layers_output(actor_layers, self.feature)
            self.policy = tf.nn.softmax(self.logits, name='policy')
            # self.value_loss, self.policy_loss, self.entropy, self.total_loss = self._compute_loss()
            # self.target = tf.placeholder(tf.float32, [None])

            responsible_outputs = tf.reduce_sum(
                self.policy * self.actions_onehot, 1)
            self.entropy = 0.005 * tf.reduce_sum(
                -self.policy * tf.log(self.policy + 1e-7), 1)
            self.policy_loss = -tf.reduce_mean(
                (tf.log(responsible_outputs + 1e-7)) * self.advantages +
                self.entropy)

            self.value_loss = tf.losses.mean_squared_error(
                self.advantages, tf.squeeze(self.value))

            self.total_loss = 0.5 * self.value_loss + self.policy_loss  # - entropy * 0.005

            self.optimizer = tf.train.RMSPropOptimizer(learning_rate=0.01,
                                                       decay=.99)

        # if name != 'global':
        var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                     scope=self.name)
        self.gradients = self.optimizer.compute_gradients(
            self.total_loss, var_list)

        self.gradients_placeholders = []
        for grad, var in self.gradients:
            self.gradients_placeholders.append(
                (tf.placeholder(var.dtype, shape=var.get_shape()), var))
        self.apply_gradients = self.optimizer.apply_gradients(
            self.gradients_placeholders)
        # self.gradients = tf.gradients(self.total_loss,
        #                               tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.name))
        # self.grads, self.grad_norms = tf.clip_by_global_norm(self.gradients, 40.0)
        # self.apply_grads = opt.apply_gradients(
        #     zip(self.grads, tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')))
        # else:

        self.reward_summary_ph = tf.placeholder(tf.float32,
                                                name="reward_summary")
        self.reward_summary = tf.summary.scalar(name='reward_summary',
                                                tensor=self.reward_summary_ph)

        self.merged_summary_op = tf.summary.merge_all()
        self.writer = tf.summary.FileWriter('./graphs', tf.get_default_graph())

        self.test = tf.get_default_graph().get_tensor_by_name(
            os.path.split(self.value.name)[0] + '/kernel:0')
コード例 #8
0
    def __call__(self, inputs, castFromUint8=True):
        # self.is_training
        pr_shape = lambda var: print(var.shape)

        # CNN
        # with tf.name_scope('')
        if self.data_format == 'NCHW':
            inputs = tf.transpose(inputs, [0, 3, 1, 2])
        # print(inputs.shape.dims)
        if castFromUint8:
            inputs = tf.cast(inputs, self.dtype)

        with tf.variable_scope("init_conv"):
            out = self.Conv2D(32, 3, strides=2)(inputs)
            # pr_shape(out)
            out = self.BN()(out, training=self.is_training)
            out = tf.nn.relu6(out)
        with tf.variable_scope("body"):
            out = self._inverted_bottleneck(out, 6, 16, stride=1)
            # pr_shape(out)
            # out = tf.nn.relu6(out)
            out = self._inverted_bottleneck(out, 6, 24, stride=2)
            # pr_shape(out)
            # out = tf.nn.relu6(out)
            out = self._inverted_bottleneck(out, 6, 32, stride=2)
            # out = tf.nn.relu6(out)
            out = self._inverted_bottleneck(out, 6, 64, stride=2)
            # out = tf.nn.relu6(out)
        out = self.Pool2D(4, 3, 'max')(out)
        pr_shape(out)

        # residualParam = []
        # param = {'filters': 32, 'kernel_sz': 5, 'strides': 2}
        # residualParam.append(param)
        # param = {'filters': 48, 'kernel_sz': 3, 'strides': 1}
        # residualParam.append(param)
        # with tf.variable_scope("res1"):
        #   out = self.residual(inputs, residualParam)
        # out = tf.nn.relu6(out)

        # with tf.variable_scope("conv1_relu"):
        #   out0 = self.Conv2D(48, 5)(inputs)
        #   out0 = self.BN()(out0, training=self.is_training)
        #   out0 = tf.nn.relu6(out0)

        # with tf.variable_scope("conv1_relu"):
        # with tf.variable_scope("pool1"):
        #   out = self.Pool2D(3, 2, 'max')(out)

        # with tf.variable_scope("conv2_relu"):
        #   out = self.Conv2D(48, 5)(out)
        #   out = self.BN()(out, training=self.is_training)
        #   out = tf.nn.relu6(out)
        # with tf.variable_scope("pool2"):
        #   out = self.Pool2D(3, 3, 'max')(out)

        # with tf.variable_scope("conv3_relu"):
        #   out = self.Conv2D(48, 5)(out)
        #   out = self.BN()(out, training=self.is_training)
        #   out = tf.nn.relu6(out)
        # with tf.variable_scope("pool3"):
        #   out = self.Pool2D(3, 3, 'max')(out)

        with tf.variable_scope('fc1'):
            out = tl.Flatten()(out)
            out = self.FC(128)(out)

        self.feature2rnn = out

        with tf.variable_scope("dropout"):
            out = tl.Dropout(0.5)(out, training=self.is_training)
        with tf.variable_scope('fc2'):
            out = self.FC(2)(out)
        # pr_shape(out)

        return out
コード例 #9
0
    def __init__(self,
                 name,
                 state_size,
                 action_size,
                 opt,
                 feature_layers=None,
                 critic_layers=None,
                 actor_layers=None):
        self.name = name
        self.state_size = state_size
        self.action_size = action_size
        self.optimizer = opt
        self.feature_layers = [
            # layers.Dense(100, activation='relu', name="features"),
            layers.Conv2D(filters=16,
                          kernel_size=(8, 8),
                          strides=(4, 4),
                          activation=tf.nn.leaky_relu),
            layers.Conv2D(filters=32,
                          kernel_size=(4, 4),
                          strides=(2, 2),
                          activation=tf.nn.leaky_relu),
            layers.Flatten(),
            layers.Dense(256, activation=tf.nn.leaky_relu, name="features"),
        ] if (feature_layers is None
              or not isinstance(feature_layers, Iterable)) else feature_layers
        critic_layers = [layers.Dense(1, name='value')] if (
            critic_layers is None
            or not isinstance(critic_layers, Iterable)) else critic_layers
        # actor_layers = [
        #     layers.Dense(action_size, activation='sigmoid', name='logits')
        # ] if (actor_layers is None or not isinstance(actor_layers, Iterable)) else actor_layers

        self.state = tf.placeholder(tf.float32,
                                    shape=[None, *state_size],
                                    name="states")
        with tf.variable_scope(self.name):
            self.feature = self._layers_output(self.feature_layers, self.state)
            self.value = self._layers_output(critic_layers, self.feature)
            # self.logits = self._layers_output(actor_layers, self.feature)
            # self.policy = tf.nn.softmax(self.logits, name='policy')
            # self.selected_action = tf.placeholder(tf.float32, [None], name="labels")
            # self.actions_onehot = tf.one_hot(tf.cast(self.selected_action, tf.int32), self.action_size, dtype=tf.float32)
            self.advantages = tf.placeholder(tf.float32, [None])

        if name != 'global':
            # self.value_loss, self.policy_loss, self.entropy_loss, self.total_loss = self._compute_loss()

            # self.responsible_outputs = tf.reduce_sum(self.policy * self.actions_onehot, 1)
            # responsible_outputs = tf.reduce_sum(self.logits * actions_onehot, 1)
            self.value_loss = (self.advantages - self.value)**2

            # self.entropy = -tf.reduce_sum(self.policy * tf.log(self.policy), 1)
            # self.policy_loss = -tf.reduce_sum(tf.log(self.responsible_outputs)*self.advantages)

            self.total_loss = tf.reduce_mean(
                0.5 * self.value_loss)  # + self.policy_loss)

            self.gradients = tf.gradients(
                self.total_loss,
                tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                  scope=self.name))
コード例 #10
0
ファイル: layers.py プロジェクト: tvelagapudi/deblender
def flatten(x):
    return ly.Flatten()(x)