Пример #1
0
def body2(i, x, out):
    # Convolution Layer
    inputx = x.read(index=i)
    conv1 = func.conv2d(inputx, weights['wc1'], biases['bc1'])
    # Pooling (down-sampling)
    p1 = func.extract_patches(conv1, 'SAME', 2, 2)
    f1 = func.majority_frequency(p1)
    #     maxpooling
    pool1 = func.max_pool(p=p1)

    # Convolution Layer
    conv2 = func.conv2d(pool1, weights['wc2'], biases['bc2'])
    #     Pooling (down-sampling)
    p2 = func.extract_patches(conv2, 'SAME', 2, 2)
    f2 = func.majority_frequency(p2)
    #     maxpooling
    pool2 = func.max_pool(p=p2)

    # Fully connected layer
    # Reshape conv2 output to fit fully connected layer input
    fc = tf.reshape(pool2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    # Apply Dropout
    fc1 = tf.nn.dropout(fc1, dropout)
    # Output, class prediction
    out = out.write(index=i,
                    value=tf.add(tf.matmul(fc1, weights['out']),
                                 biases['out']))
    i += 1
    return i, x, out
Пример #2
0
 def _inference(self, X, keep_prob):
     h = F.max_pool(F.activation(F.conv(X, 64)))
     h = F.max_pool(F.activation(F.conv(h, 128)))
     h = F.max_pool(F.activation(F.conv(h, 256)))
     h = F.activation(F.dense(F.flatten(h), 1024))
     h = F.dense(h, self._num_classes)
     return tf.nn.softmax(h)
Пример #3
0
 def _inference(self, X, keep_prob):
     h = F.max_pool(F.activation(F.conv(X, 64)))
     h = F.max_pool(F.activation(F.conv(h, 128)))
     h = F.max_pool(F.activation(F.conv(h, 256)))
     h = F.activation(F.dense(F.flatten(h), 1024))
     h = F.dense(h, self._num_classes)
     return h
Пример #4
0
    def _inference(self, CC, MLO, keep_prob, is_train):
        layers = [3, 16, 32, 64, 64]

        cc = CC
        mlo = MLO

        for i in range(4):
            with tf.variable_scope('CC_layers_%s' % i) as scope:
                cc = F.conv(cc, layers[i])
                cc = F.batch_norm(cc, is_train)
                cc = F.activation(cc)
            cc = F.max_pool(cc)
        with tf.variable_scope('CC_features') as scope:
            cc = F.dense(cc, layers[i + 1])
            cc = F.batch_norm(cc, is_train)
            cc = F.activation(cc)

        for j in range(4):
            with tf.variable_scope('MLO_layers_%s' % j) as scope:
                mlo = F.conv(mlo, layers[j])
                mlo = F.batch_norm(mlo, is_train)
                mlo = F.activation(mlo)
            mlo = F.max_pool(mlo)
        with tf.variable_scope('MLO_features') as scope:
            mlo = F.dense(mlo, layers[j + 1])
            mlo = F.batch_norm(mlo, is_train)
            mlo = F.activation(mlo)

        with tf.variable_scope('softmax') as scope:
            concat = tf.concat(1, [cc, mlo])
            h = F.dense(concat, self._num_classes)

        return h
Пример #5
0
    def _inference(self, X, keep_prob, is_train):
        dropout_rate = [0.9, 0.8, 0.7, 0.6, 0.5]
        layers = [64, 128, 256, 512, 512]
        iters = [2, 2, 3, 3]
        h = X

        # VGG Network Layer
        for i in range(4):
            for j in range(iters[i]):
                with tf.variable_scope('layers%s_%s' % (i, j)) as scope:
                    h = F.conv(h, layers[i])
                    h = F.batch_norm(h, is_train)
                    h = F.activation(h)
                    h = F.dropout(h, dropout_rate[i], is_train)
            h = F.max_pool(h)

        # Fully Connected Layer
        with tf.variable_scope('fully_connected_layer') as scope:
            h = F.dense(h, layers[i + 1])
            h = F.batch_norm(h, is_train)
            h = F.activation(h)
            h = F.dropout(h, dropout_rate[i + 1], is_train)

        # Softmax Layer
        with tf.variable_scope('softmax_layer') as scope:
            h = F.dense(h, self._num_classes)

        return h
Пример #6
0
    list(range(6, 12)),
    list(range(11, 17)),
    list(range(16, 22)),
    list(range(21, 27)),
    list(range(26, 32))
])

test_image.shape
test_filter1 = np.array([[0, 0, 0], [1, 2, 1], [0, 0, 0]])
test_filter2 = test_filter1.T

test_filter = np.array([test_filter1, test_filter2])
test_conv, filter_mat, inter_conv = fun.convolute(image=test_image,
                                                  filter_matrix=test_filter)

out_maxpool, index_maxpool = fun.max_pool(test_conv)

ind = np.where(
    index_maxpool[0] ==
    True)  # for these indices we need gradients with respect to inmage?

test_conv[index_maxpool]  # diese indices wollen wir mit deltaL * image[]

test_conv[0, ind[0], ind[1]]  # if this works for image, wooooow!

test_conv.shape
ind[0]
ind[1]

test_image.shape
Пример #7
0
    def _inference(self, X, keep_prob, is_train):
        # Conv_layer 1
        conv = F.conv(X, 192)
        batch_norm = F._batch_norm(self, 'bn1', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.9, is_train)

        conv = F.conv(dropout, 192)
        batch_norm = F._batch_norm(self, 'bn2', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.9, is_train)

        max_pool = F.max_pool(dropout) # 16 x 16

        # Conv_layer 2
        conv = F.conv(max_pool, 192)
        batch_norm = F._batch_norm(self, 'bn3', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.8, is_train)

        conv = F.conv(dropout, 192)
        batch_norm = F._batch_norm(self, 'bn4', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.8, is_train)

        max_pool = F.max_pool(dropout) # 8 x 8

        # Conv_layer 3
        conv = F.conv(max_pool, 256)
        batch_norm = F._batch_norm(self, 'bn5', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.7, is_train)

        conv = F.conv(dropout, 256)
        batch_norm = F._batch_norm(self, 'bn6', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.7, is_train)

        conv = F.conv(dropout, 256)
        batch_norm = F._batch_norm(self, 'bn7', conv, is_train)
        dropout = F.dropout(relu, 0.7, is_train)

        max_pool = F.max_pool(dropout) # 4 x 4

         # Conv_layer 4
        conv = F.conv(max_pool, 512)
        batch_norm = F._batch_norm(self, 'bn8', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        conv = F.conv(dropout, 512)
        batch_norm = F._batch_norm(self, 'bn9', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        conv = F.conv(max_pool, 512)
        batch_norm = F._batch_norm(self, 'bn10', conv, is_train)
        relu = F.activation(batch_norm)
        dropout = F.dropout(relu, 0.6, is_train)

        max_pool = F.max_pool(dropout) # 2 x 2

        # Fully Connected Layer
        h = tf.reduce_mean(max_pool, reduction_indices=[1,2])
        h = F.dropout(h, 0.5, is_train)
        h = F.dense(h, 512)
        h = F._batch_norm(self, 'bn11', h, is_train)
        h = F.activation(h)
        h = F.dropout(h, 0.5, is_train)
        h = F.dense(h, self._num_classes)

        return h
import functions as fun

np.random.seed(seed=666); image = np.random.randn(6, 6) * 3
image = np.round(image)
label = 1

num_filters = 2
np.random.seed(seed=666); filter_conv = np.random.randn(num_filters, 3, 3) / 9

## after transpsosing input to softmax, feedforward agrees,
# but maybe it would agree in backprop, because I put the num filters first,
# he put them last

out_ownconv, filter_conv, inter = fun.convolute((image / 255) - 0.5, filter_conv)
out_ownconv.T
out_maxown, index_maxown = fun.max_pool(out_ownconv)

np.random.seed(seed=666); weight_soft = np.random.randn(8, 2) / 8
np.random.seed(seed=666); bias_soft = np.zeros(2)

probabilities, intermediates = fun.softmax(out_maxown.T, weight_soft, bias_soft)

out_maxown[0]
out_maxown.T[:, :, 0]

################################## backprop ######################

weight_soft.shape

### agrees, after Transposing input!
# needed to transpose weight matrix as well, now gradients from softmax roughly agree
Пример #9
0
### forward pass is the same!
out_conv = conv.forward((image / 255) - 0.5)
out_convown, filter_conv, inter = fun.convolute((image / 255) - 0.5,
                                                filter_conv)

for i in range(num_filters):
    if np.sum(out_conv[:, :, i] == out_convown[i],
              axis=(0, 1)) == np.prod(out_conv[:, :, 0].shape):
        print("Yeah, it works!")

out_conv.shape
out_convown.shape
# maxpool as well
out_max = pool.forward(out_conv)
out_maxown, index_maxown = fun.max_pool(out_convown)

for i in range(num_filters):
    if np.sum(out_max[:, :, i] == out_maxown[i],
              axis=(0, 1)) == np.prod(out_max[:, :, 0].shape):
        print("Yeah, it works!")

## output of softmax is different, maybe because input (out_max) is transposed)
size = np.prod(out_max.shape)
np.random.seed(seed=666)
weight_soft = np.random.randn(size, 2) / size
bias_soft = np.zeros(2)

out_soft = softmax.forward(out_max)
# hmmmm not quite the same, maybe take exact the same input?
# update: exactly the same, transposing of tensor was just wrong
###############################################################################
## pass in net from blog and own net
###############################################################################

########################### feedforward

# feedforward conv layer
out_conv = conv.forward(image)
out_convown, filter_conv, inter = fun.convolute(image, filter_conv)

if np.sum(out_conv == out_convown) == np.prod(out_convown.shape):
    print("Yeaaaah!")
# feedforward maxpool layer

out_max = pool.forward(out_conv)
out_maxown = fun.max_pool(out_convown)

if np.sum(out_max == out_maxown) == np.prod(out_maxown.shape):
    print("Yeaaaah!")

# feedforward softmax layer
out_soft, weights, summe = softmax.forward(out_max)  #
np.random.seed(seed=666)
weight_soft = (np.random.randn(dim_maxpool, 10) / dim_maxpool) * 10
bias_soft = np.zeros(10)
#bias_soft = np.array([-5, -1, 4, 5, 6, 7] )
probabilities, inter_soft = fun.softmax(output_maxpool=out_max,
                                        weight_matrix=weight_soft,
                                        bias_vector=bias_soft)

if np.sum(out_soft == probabilities) == np.prod(out_soft.shape):