예제 #1
0
    def fully_connected_flow(self, x, DEBUG=True):

        for _ in range(self.fully_connected_flow_layers):
            temp = self.fully_connected_res_flow(x, DEBUG=True)
            x = temp

        x = Dense(1, activation='sigmoid')(x)

        if DEBUG:
            print(x.get_shape().as_list())

        return x
예제 #2
0
    def fully_connected_flow(self, x, outputsize):
        print(" shape after all conv layers ", x.get_shape().as_list())

        for _ in range(self.fully_connected_flow_layers):
            temp = self.fully_connected_res_flow(x)
            x = temp
            print(" fully residual block at ", _, "  ",
                  x.get_shape().as_list())

        x = Dense(outputsize, activation='linear')(x)

        print(" final shape ", x.get_shape().as_list())

        return x
예제 #3
0
파일: mnist.py 프로젝트: moritzwinger/SoK
def mnist_mlp_model(input):
    # Using Keras model API with Flatten results in split ngraph at Flatten() or Reshape() op.
    # Use tf.reshape instead
    known_shape = input.get_shape()[1:]
    size = np.prod(known_shape)
    print('size', size)
    y = tf.reshape(input, [-1, size])
    # y = Flatten()(input)
    y = Dense(input_shape=[1, 784], units=30, use_bias=True)(y)
    y = PolyAct()(y)
    y = Dense(units=10, use_bias=True, name="output")(y)
    known_shape = y.get_shape()[1:]
    size = np.prod(known_shape)
    print('size', size)
    return y
예제 #4
0
def build_decoder(input_shape,
                  latent_dim,
                  hidden_dim,
                  filters,
                  kernels,
                  conv_activation=None,
                  dense_activation=None):
    """
    Return decoder as model
    input_shape: shape of the input data
    latent_dim : dimension of the latent variable
    hidden_dim : dimension of the dense hidden layer
    filters: list of the sizes of the filters used for this model
    list of the size of the kernels used for each filter of this model
    conv_activation: type of activation layer used after the convolutional layers
    dense_activation: type of activation layer used after the dense layers
    """
    input_layer = Input(shape=(latent_dim, ))
    h = Dense(hidden_dim, activation=dense_activation)(input_layer)
    h = PReLU()(h)
    w = int(np.ceil(input_shape[0] / 2**(len(filters))))
    h = Dense(w * w * filters[-1], activation=dense_activation)(h)
    h = PReLU()(h)
    h = Reshape((w, w, filters[-1]))(h)
    for i in range(len(filters) - 1, -1, -1):
        h = Conv2DTranspose(filters[i], (kernels[i], kernels[i]),
                            activation=conv_activation,
                            padding='same',
                            strides=(2, 2))(h)
        h = PReLU()(h)
        h = Conv2DTranspose(filters[i], (kernels[i], kernels[i]),
                            activation=conv_activation,
                            padding='same')(h)
        h = PReLU()(h)
    h = Conv2D(input_shape[-1], (3, 3), activation='sigmoid',
               padding='same')(h)
    cropping = int(h.get_shape()[1] - input_shape[0])
    if cropping > 0:
        print('in cropping')
        if cropping % 2 == 0:
            h = Cropping2D(cropping / 2)(h)
        else:
            h = Cropping2D(((cropping // 2, cropping // 2 + 1),
                            (cropping // 2, cropping // 2 + 1)))(h)

    return Model(input_layer, h)
예제 #5
0
print(l_cov1.get_shape())

l_pool2 = MaxPooling1D(5)(l_cov2)
print(l_pool2.get_shape())

l_cov3 = Conv1D(128, 5, activation='relu')(l_pool2)
print(l_cov3.get_shape())

l_pool3 = MaxPooling1D(35)(l_cov3)  # global max pooling
print(l_pool3.get_shape())

l_flat = Flatten()(l_pool3)
print(l_flat.get_shape())

l_dense = Dense(128, activation='relu')(l_flat)
print(l_dense.get_shape())

preds = Dense(len(macronum), activation='softmax')(l_dense)
print(preds.get_shape())


# #### Here we attempt to train the neural network, but the kernel dies every time we use the keras.fit() to train it.
# It says that it will take too much memory. There are similar cases on google, and people advised to not use Sagemaker to run it and use our personal computers instead.

# In[17]:


model = Model(sequence_input, preds)
model.compile(loss='categorical_crossentropy',
              optimizer='rmsprop',
              metrics=['acc'])