コード例 #1
0
    def _inference(self, X, keep_prob, is_train):
        dropout_rate = [0.9, 0.8, 0.7, 0.6, 0.5]
        layers = [64, 128, 256, 512, 512]
        iters = [2, 2, 3, 3]
        h = X

        # VGG Network Layer
        for i in range(4):
            for j in range(iters[i]):
                with tf.variable_scope('layers%s_%s' % (i, j)) as scope:
                    h = F.conv(h, layers[i])
                    h = F.batch_norm(h, is_train)
                    h = F.activation(h)
                    h = F.dropout(h, dropout_rate[i], is_train)
            h = F.max_pool(h)

        # Fully Connected Layer
        with tf.variable_scope('fully_connected_layer') as scope:
            h = F.dense(h, layers[i + 1])
            h = F.batch_norm(h, is_train)
            h = F.activation(h)
            h = F.dropout(h, dropout_rate[i + 1], is_train)

        # Softmax Layer
        with tf.variable_scope('softmax_layer') as scope:
            h = F.dense(h, self._num_classes)

        return h
コード例 #2
0
    def _inference(self, CC, MLO, keep_prob, is_train):
        layers = [3, 16, 32, 64, 64]

        cc = CC
        mlo = MLO

        for i in range(4):
            with tf.variable_scope('CC_layers_%s' % i) as scope:
                cc = F.conv(cc, layers[i])
                cc = F.batch_norm(cc, is_train)
                cc = F.activation(cc)
            cc = F.max_pool(cc)
        with tf.variable_scope('CC_features') as scope:
            cc = F.dense(cc, layers[i + 1])
            cc = F.batch_norm(cc, is_train)
            cc = F.activation(cc)

        for j in range(4):
            with tf.variable_scope('MLO_layers_%s' % j) as scope:
                mlo = F.conv(mlo, layers[j])
                mlo = F.batch_norm(mlo, is_train)
                mlo = F.activation(mlo)
            mlo = F.max_pool(mlo)
        with tf.variable_scope('MLO_features') as scope:
            mlo = F.dense(mlo, layers[j + 1])
            mlo = F.batch_norm(mlo, is_train)
            mlo = F.activation(mlo)

        with tf.variable_scope('softmax') as scope:
            concat = tf.concat(1, [cc, mlo])
            h = F.dense(concat, self._num_classes)

        return h
コード例 #3
0
ファイル: network.py プロジェクト: muojp/cifar10-tensorflow
 def _inference(self, X, keep_prob):
     h = F.max_pool(F.activation(F.conv(X, 64)))
     h = F.max_pool(F.activation(F.conv(h, 128)))
     h = F.max_pool(F.activation(F.conv(h, 256)))
     h = F.activation(F.dense(F.flatten(h), 1024))
     h = F.dense(h, self._num_classes)
     return tf.nn.softmax(h)
コード例 #4
0
 def _inference(self, X, keep_prob):
     h = F.max_pool(F.activation(F.conv(X, 64)))
     h = F.max_pool(F.activation(F.conv(h, 128)))
     h = F.max_pool(F.activation(F.conv(h, 256)))
     h = F.activation(F.dense(F.flatten(h), 1024))
     h = F.dense(h, self._num_classes)
     return h
コード例 #5
0
ファイル: network.py プロジェクト: meliketoy/cnn.tensorflow
 def _residual(self, h, channels, strides, keep_prob, is_train):
     h0 = h
     h1 = F.conv(F.activation(F.batch_norm(self, 'bn1', h0, is_train)), channels, strides)
     h1 = F.dropout(h1, keep_prob, is_train)
     h2 = F.conv(F.activation(F.batch_norm(self, 'bn2', h1, is_train)), channels)
     if F.volume(h0) == F.volume(h2):
         h = h0 + h2
     else :
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
コード例 #6
0
 def _residual(self, h, channels, strides, keep_prob, is_train):
     h0 = h
     with tf.variable_scope('residual_first'):
         h1 = F.conv(F.activation(F.batch_norm(h0, is_train)), channels, strides)
         h1 = F.dropout(h1, keep_prob, is_train)
     with tf.variable_scope('residual_second'):
         h2 = F.conv(F.activation(F.batch_norm(h1, is_train)), channels)
     if F.volume(h0) == F.volume(h2):
         h = h0 + h2
     else :
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
コード例 #7
0
ファイル: network.py プロジェクト: meliketoy/cnn.tensorflow
 def _residual(self, h, channels, strides, keep_prob):
     h0 = h
     h1 = F.dropout(
         F.conv(F.activation(F.batch_normalization(h0)), channels, strides),
         keep_prob)
     h2 = F.conv(F.activation(F.batch_normalization(h1)), channels)
     # c.f. http://gitxiv.com/comments/7rffyqcPLirEEsmpX
     if F.volume(h0) == F.volume(h2):
         h = h2 + h0
     else:
         h4 = F.conv(h0, channels, strides)
         h = h2 + h4
     return h
コード例 #8
0
 def _residual(self, h, channels, strides):
     h0 = h
     h1 = F.batch_normalization(
         F.conv(F.activation(h0), channels, strides, bias_term=False))
     h2 = F.batch_normalization(
         F.conv(F.activation(h1), channels, bias_term=False))
     if F.volume(h0) == F.volume(h2):
         h = h2 + h0
     else:
         h3 = F.avg_pool(h0)
         h4 = tf.pad(h3,
                     [[0, 0], [0, 0], [0, 0], [channels / 4, channels / 4]])
         h = h2 + h4
     return h
コード例 #9
0
 def _residual(self, h, channels, strides):
     h0 = h
     h1 = F.activation(
         F.batch_normalization(
             F.conv(h0, channels, strides, bias_term=False)))
     h2 = F.batch_normalization(F.conv(h1, channels, bias_term=False))
     # c.f. http://gitxiv.com/comments/7rffyqcPLirEEsmpX
     if F.volume(h0) == F.volume(h2):
         h = h2 + h0
     else:
         h3 = F.avg_pool(h0)
         h4 = tf.pad(h3,
                     [[0, 0], [0, 0], [0, 0], [channels / 4, channels / 4]])
         h = h2 + h4
     return F.activation(h)
コード例 #10
0
ファイル: dnn.py プロジェクト: trygvels/FYS-STK4155
def forward(conf, X_batch, params, is_training):
    """
    Forward propagation through fully connected network.

    X_batch:
        (batch_size, channels * height * width)
    """
    n = conf["layer_dimensions"]
    L = len(n) - 1

    # Saves the input
    A = X_batch
    features = {}
    features["A_0"] = A

    # Loop over each layer in network
    for l in range(1, L + 1):
        A_prev = A.copy()
        Z = np.dot(params["W_" + str(l)].T, A_prev) + params["b_" + str(l)]

        # Calculates activation (Relu, or softmax for output)
        if l < L:
            A = activation(Z.copy(), "relu")
        else:
            A = softmax(Z.copy())
        if is_training:
            # Save activations if training
            features["Z_" + str(l)] = Z.copy()
            features["A_" + str(l)] = A.copy()

    # Y_proposed is the probabilities returned by passing
    # activations through the softmax function.
    Y_proposed = A
    return Y_proposed, features
コード例 #11
0
def forward(conf, input_layer, params, is_training=False):
    """
    Forward propagation through the Convolutional layer.

    input_layer:
        (batch_size, channels_x, height_x, width_x)
    """

    # Get weights an parameters
    weight = params["W_1"]
    bias = params["b_1"]
    # Get parameters
    stride = conf["stride"]
    pad_size = conf["pad_size"]

    # Padding width and height
    (batch_size, channels_x, height_x, width_x) = input_layer.shape
    input_padded = np.pad(input_layer,
                          ((0, ), (0, ), (pad_size, ), (pad_size, )),
                          mode="constant")
    (num_filters, channels_w, height_w, width_w) = weight.shape

    # Calculate dimensions of output layer and initialize
    height_y = 1 + (height_x + 2 * pad_size - height_w) // stride
    width_y = 1 + (width_x + 2 * pad_size - width_w) // stride
    output_layer = np.zeros((batch_size, num_filters, height_y, width_y))

    # Save input layer
    A = input_layer
    features = {}
    features["A_0"] = A

    # Forward pass loop in numba
    output_layer = forwardloop(
        batch_size,
        num_filters,
        output_layer,
        weight,
        bias,
        input_padded,
        channels_x,
        width_y,
        height_y,
        width_w,
        height_w,
        stride,
    )

    # Save output to Z
    Z = output_layer.copy()
    A = functions.activation(Z.copy(), "relu")

    if is_training:
        # If training, save outputs
        features["Z_1"] = Z.copy()
        features["A_1"] = A.copy()

    return A, features
コード例 #12
0
 def _feedforward(self, x):
     x_in = x
     for layer in self.net:
         x_out = []
         for node in layer:
             active = activation(node['func'])
             node["a"] = active(dotprod(node['weights'], x_in))
             x_out.append(node["a"])
         x_in = x_out # set output as next input
     return x_in
コード例 #13
0
 def _inference(self, X, keep_prob):
     h = X
     h = F.activation(F.batch_normalization(F.conv(h, 16, bias_term=False)))
     for i in range(self._layers):
         h = self._residual(h, channels=16, strides=1)
     for channels in [32, 64]:
         for i in range(self._layers):
             strides = 2 if i == 0 else 1
             h = self._residual(h, channels, strides)
     h = tf.reduce_mean(h, reduction_indices=[1,
                                              2])  # Global Average Pooling
     h = F.dense(h, self._num_classes)
     return h
コード例 #14
0
ファイル: network.py プロジェクト: meliketoy/cnn.tensorflow
    def _inference(self, X, keep_prob, is_train):
        h = F.conv(X, 16)
        for i in range(self._layers):
            with tf.variable_scope(str(16*self._k)+'_layers_%s' %i):
                h = self._residual(h, channels=16*self._k, strides=1, keep_prob=keep_prob, is_train=is_train)
        for channels in [32*self._k, 64*self._k]:
            for i in range(self._layers):
                with tf.variable_scope(str(channels)+'_layers_%s' %i):
                    strides = 2 if i == 0 else 1
                    h = self._residual(h, channels, strides, keep_prob, is_train)
        h = F.activation(F.batch_norm(self, 'bn', h, is_train))
        h = tf.reduce_mean(h, reduction_indices=[1,2])
        h = F.dense(h, self._num_classes)

        return h
コード例 #15
0
ファイル: network.py プロジェクト: meliketoy/cnn.tensorflow
    def _inference(self, X, keep_prob, is_train):
        # Conv_layer 1
        conv = F.conv(X, 192)
        batch_norm = F._batch_norm(self, 'bn1', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.9, is_train)

        conv = F.conv(dropout, 192)
        batch_norm = F._batch_norm(self, 'bn2', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.9, is_train)

        max_pool = F.max_pool(dropout) # 16 x 16

        # Conv_layer 2
        conv = F.conv(max_pool, 192)
        batch_norm = F._batch_norm(self, 'bn3', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.8, is_train)

        conv = F.conv(dropout, 192)
        batch_norm = F._batch_norm(self, 'bn4', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.8, is_train)

        max_pool = F.max_pool(dropout) # 8 x 8

        # Conv_layer 3
        conv = F.conv(max_pool, 256)
        batch_norm = F._batch_norm(self, 'bn5', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.7, is_train)

        conv = F.conv(dropout, 256)
        batch_norm = F._batch_norm(self, 'bn6', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.7, is_train)

        conv = F.conv(dropout, 256)
        batch_norm = F._batch_norm(self, 'bn7', conv, is_train)
        dropout = F.dropout(relu, 0.7, is_train)

        max_pool = F.max_pool(dropout) # 4 x 4

         # Conv_layer 4
        conv = F.conv(max_pool, 512)
        batch_norm = F._batch_norm(self, 'bn8', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        conv = F.conv(dropout, 512)
        batch_norm = F._batch_norm(self, 'bn9', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        conv = F.conv(max_pool, 512)
        batch_norm = F._batch_norm(self, 'bn10', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        max_pool = F.max_pool(dropout) # 2 x 2

        # Fully Connected Layer
        h = tf.reduce_mean(max_pool, reduction_indices=[1,2])
        h = F.dropout(h, 0.5, is_train)
        h = F.dense(h, 512)
        h = F._batch_norm(self, 'bn11', h, is_train)
        h = F.activation(h)
        h = F.dropout(h, 0.5, is_train)
        h = F.dense(h, self._num_classes)

        return h
コード例 #16
0
    weights += [random.uniform(0, 1)]

# Initialize arrays to contain historical loss and accuracy for plotting
train_loss = np.zeros(args.numepoch + 1)
valid_loss = np.zeros(args.numepoch + 1)
train_acc = np.zeros(args.numepoch + 1)
valid_acc = np.zeros(args.numepoch + 1)

# Gradient descent method with specified # epochs
for e in range(args.numepoch):
    # Get list of sum (Z) values for training and validation data
    sums = F.sum_function(weights, train_data, bias)
    valid_sums = F.sum_function(weights, valid_data, bias)

    # Run summations (Z) through activation function to get array Y
    train_act = F.activation(args.actfunction, sums)
    valid_act = F.activation(args.actfunction, valid_sums)

    # Record loss and accuracy at end of each epoch on training and validation data
    train_loss[e] = F.loss(train_act, train_label)
    valid_loss[e] = F.loss(valid_act, valid_label)

    train_acc[e] = F.accuracy(train_label, train_act)
    valid_acc[e] = F.accuracy(valid_label, valid_act)

    # Calculate weights gradient for each of 9 weights
    weight_grad = []
    for w in range(0, 9, 1):
        weight_grad += F.weights_gradient(args.actfunction, train_act,
                                          train_data, train_label, w)