示例#1
0
    def __init__(self, wv_matrix, lstm_hidden_dim, nn_hidden_dim, num_classes,
                 mean_pool=False):
        print 'Initializing embedding model...'
        self.mean_pool = mean_pool
        input_dim = wv_matrix.shape[0]
        self.embeddingLayer = layers.embeddingLayer(wv_matrix)
        self.LSTMLayer = layers.LSTMLayer(input_dim, lstm_hidden_dim)
        nn_input_dim = 2 * lstm_hidden_dim

        self.fc1 = layers.FullyConnected(input_dim=nn_input_dim,
                                         output_dim=nn_hidden_dim,
                                         activation='relu')

        self.fc2 = layers.FullyConnected(input_dim=nn_hidden_dim,
                                         output_dim=nn_hidden_dim,
                                         activation='relu')

        self.linear_layer = layers.FullyConnected(input_dim=nn_hidden_dim,
                                                  output_dim=num_classes,
                                                  activation=None)

        self.layers = {'embeddingLayer':  self.embeddingLayer, 'lstm': self.LSTMLayer,
                       'fc1': self.fc1, 'fc2': self.fc2, 'linear': self.linear_layer}

        self.params = self.embeddingLayer.params + self.LSTMLayer.params + \
            self.fc1.params + self.fc2.params + self.linear_layer.params
示例#2
0
 def __init__(self, name_scope, input_nc, normalize_encoder):
   self.blocks = []
   activation = functools.partial(tf.nn.leaky_relu, alpha=0.2)
   conv2d = functools.partial(layers.LayerConv, use_scaling=opts.use_scaling,
                              relu_slope=0.2, padding='SAME')
   with tf.variable_scope(name_scope, tf.AUTO_REUSE):
     if normalize_encoder:
       self.blocks.append(layers.LayerPipe([
           conv2d('conv0', w=4, n=[input_nc, 64], stride=2),
           layers.BasicBlock('BB0', n=[64, 128], use_scaling=opts.use_scaling),
           layers.pixel_norm,
           layers.BasicBlock('BB1', n=[128, 192], use_scaling=opts.use_scaling),
           layers.pixel_norm,
           layers.BasicBlock('BB2', n=[192, 256], use_scaling=opts.use_scaling),
           layers.pixel_norm,
           activation,
           layers.global_avg_pooling
       ]))
     else:
       self.blocks.append(layers.LayerPipe([
           conv2d('conv0', w=4, n=[input_nc, 64], stride=2),
           layers.BasicBlock('BB0', n=[64, 128], use_scaling=opts.use_scaling),
           layers.BasicBlock('BB1', n=[128, 192], use_scaling=opts.use_scaling),
           layers.BasicBlock('BB2', n=[192, 256], use_scaling=opts.use_scaling),
           activation,
           layers.global_avg_pooling
       ]))
     # FC layers to get the mean and logvar
     self.fc_mean = layers.FullyConnected(opts.app_vector_size, 'FC_mean')
     self.fc_logvar = layers.FullyConnected(opts.app_vector_size, 'FC_logvar')
示例#3
0
 def __init__(self, inputSize, outputSize):
     super().__init__()
     self.outputSize = outputSize
     self.conv1 = layers.Convolution(5, 1)
     self.Pool1 = layers.Pool(5, stride=2)
     self.Flatten = layers.Flatten()
     self.fc1 = layers.FullyConnected(14 * 14 * 5,
                                      50,
                                      activation=activate.Relu())
     self.fc2 = layers.FullyConnected(50,
                                      outputSize,
                                      activation=activate.SoftMax())
     self.inputLayer = self.conv1
     self.outputLayer = self.fc2
     self.conv1.addSon(self.Pool1)
     self.Pool1.addSon(self.Flatten)
     self.Flatten.addSon(self.fc1)
     self.fc1.addSon(self.fc2)
示例#4
0
 def __init__(self, inputSize, outputSize):
     super().__init__()
     self.outputSize = outputSize
     self.Flatten = layers.Flatten()
     self.fc1 = layers.FullyConnected(inputSize,
                                      10,
                                      activation=activate.SoftMax())
     self.inputLayer = self.Flatten
     self.outputLayer = self.fc1
     self.Flatten.addSon(self.fc1)
示例#5
0
    def __init__(self, embeddings, lstm_hidden_dim):
        print 'Initializing attention model...'
        # self.reverse = reverse
        input_dim = 2 * embeddings.shape[0]
        self.embeddingLayer = layers.embeddingLayer(embeddings)
        self.LSTMLayer = layers.LSTMLayer(input_dim, lstm_hidden_dim)
        self.linear_layer = layers.FullyConnected(input_dim=lstm_hidden_dim,
                                                  output_dim=2,
                                                  activation=None)
        self.layers = {'lstm': self.LSTMLayer, 'linear': self.linear_layer,
                       'embeddings': self.embeddingLayer}

        self.params = self.LSTMLayer.params + self.linear_layer.params + self.embeddingLayer.params
示例#6
0
    def __init__(self, wv_matrix, hidden_dim, num_classes):
        print 'Initializing averaging model...'
        # just concatenate vector averages
        input_dim = 2 * wv_matrix.shape[0]

        # initialize layers
        self.embeddingLayer = layers.wordVectorLayer(wv_matrix)

        self.fc1 = layers.FullyConnected(input_dim=input_dim,
                                         output_dim=hidden_dim,
                                         activation='relu')

        self.fc2 = layers.FullyConnected(input_dim=hidden_dim,
                                         output_dim=hidden_dim,
                                         activation='relu')

        self.linear_layer = layers.FullyConnected(input_dim=hidden_dim,
                                                  output_dim=num_classes,
                                                  activation=None)

        self.layers = {'embeddingLayer':  self.embeddingLayer, 'fc1': self.fc1,
                       'fc2': self.fc2, 'linear': self.linear_layer}
        self.params = self.embeddingLayer.params + self.fc1.params + self.fc2.params + self.linear_layer.params
示例#7
0
pool1 = layers.Maxpool(conv1.out_dim, size=2, stride=2)
# Conv layer with 2 filters kernel size of 3x3 stride of 1 and no padding
conv2 = layers.Conv(pool1.out_dim,
                    n_filter=2,
                    h_filter=3,
                    w_filter=3,
                    stride=1,
                    padding=0)
# activation for layer 2 rectified linear
relu = mf.ReLU()
# MaxPool layer 2x2 stride 1
pool2 = layers.Maxpool(conv2.out_dim, size=2, stride=1)
# Flatten the matrix
flat = layers.Flatten()
# Fully connected layer with 50 neurons
fc1 = layers.FullyConnected(np.prod(pool2.out_dim), 50)
# Activation for fully connected layer of 50 neurons is tanh
tanh = mf.TanH()

# Fully connected layer with 10 neurons 'output layer'
out = layers.FullyConnected(50, num_classes)

cnn = layers.CNN([conv1, sig, pool1, conv2, relu, pool2, flat, fc1, tanh, out])

mf.model_summary(cnn, 'cnn_model_plot.png', f)

e_nnet, e_accuracy, e_validate, e_loss, e_loss_val = mf.sgd(cnn,
                                                            x_train,
                                                            y_train,
                                                            f,
                                                            minibatch_size=200,
示例#8
0
                       kernel_channels=384,
                       n_kernels=256,
                       stride=1,
                       same_padding=True,
                       logging=False))
graph.add_layer(layers.Activation(activation="relu", logging=False))

graph.add_layer(
    layers.MaxPooling(window_size=3,
                      window_channels=256,
                      stride=2,
                      logging=False))

graph.add_layer(layers.Flatten((9216, 1), logging=False))
graph.add_layer(
    layers.FullyConnected("fc1", 9216, 4096, activation="relu", logging=False))
graph.add_layer(
    layers.FullyConnected("fc2", 4096, 4096, activation="relu", logging=False))
graph.add_layer(
    layers.FullyConnected("fc3",
                          4096,
                          n_classes,
                          activation="softmax",
                          logging=False))

try:
    print()
    print("[+] N_EXAMPLES:", n_examples)
    for e in range(epochs):
        images_shuffled, targets_shuffled = shuffle(images, targets)
        total_predicted_ok = 0
示例#9
0
maxpool = layers.MaxPooling(window_size=2, window_channels=10, stride=2)
X = maxpool.forward(X)
print("[+] MAXPOOLED_shape:", X.shape)
dMaxpool = maxpool.backward(np.ones_like(X))
print("dMaxpool_SHAPE:", dMaxpool.shape)

N, C, H, W = X.shape

flatten = layers.Flatten((C * H * W, N))
X = flatten.forward(X)
print(X.shape)

I, N = X.shape

fc1 = layers.FullyConnected("fc1", I, 1000, activation="relu")
X = fc1.forward(X)
print(X.shape)

fc2 = layers.FullyConnected("fc2", 1000, 10, activation="softmax")
Y = fc2.forward(X)
print(Y.shape)

cross_entropy = layers.CrossEntropy()
loss = cross_entropy.forward(Y, T)
print(loss)
######################################################################################################################
'''
print()

print("XSHAPE:", X.shape)