Exemplo n.º 1
0
def body2(i, x, out):
    # Convolution Layer
    inputx = x.read(index=i)
    conv1 = func.conv2d(inputx, weights['wc1'], biases['bc1'])
    # Pooling (down-sampling)
    p1 = func.extract_patches(conv1, 'SAME', 2, 2)
    f1 = func.majority_frequency(p1)
    #     maxpooling
    pool1 = func.majority_pool(p=p1, f=f1)

    # Convolution Layer
    conv2 = func.conv2d(pool1, weights['wc2'], biases['bc2'])
    #     Pooling (down-sampling)
    p2 = func.extract_patches(conv2, 'SAME', 2, 2)
    f2 = func.majority_frequency(p2)
    #     maxpooling
    pool2 = func.majority_pool(p=p2, f=f2)

    # Fully connected layer
    # Reshape conv2 output to fit fully connected layer input
    fc = tf.reshape(pool2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    # Apply Dropout
    fc1 = tf.nn.dropout(fc1, dropout)
    # Output, class prediction
    out = out.write(index=i,
                    value=tf.add(tf.matmul(fc1, weights['out']),
                                 biases['out']))
    i += 1
    return i, x, out
Exemplo n.º 2
0
def body(i, x, y, grads):
    # Convolution Layer
    inputx = x.read(index=i)
    conv1 = func.conv2d(inputx, weights['wc1'], biases['bc1'])
    # Pooling (down-sampling)
    p1 = func.extract_patches(conv1, 'SAME', 2, 2)
    f1 = func.majority_frequency(p1)
    #     maxpooling
    pool1, mask1 = func.pca_pool_with_mask(temp=p1)

    # Convolution Layer
    conv2 = func.conv2d(pool1, weights['wc2'], biases['bc2'])
    #     Pooling (down-sampling)
    p2 = func.extract_patches(conv2, 'SAME', 2, 2)
    f2 = func.majority_frequency(p2)
    #     maxpooling
    pool2, mask2 = func.pca_pool_with_mask(temp=p2)

    # Fully connected layer
    # Reshape conv2 output to fit fully connected layer input
    fc = tf.reshape(pool2, [-1, weights['wd1'].get_shape().as_list()[0]])
    fc1 = tf.add(tf.matmul(fc, weights['wd1']), biases['bd1'])
    fc1 = tf.nn.relu(fc1)
    # Apply Dropout
    fc1 = tf.nn.dropout(fc1, dropout)

    # Output, class prediction
    yi = y.read(index=i)
    temp_pred = tf.add(tf.matmul(fc1, weights['out']), biases['out'])
    grads[8] = pred.write(index=i, value=temp_pred)

    # ------------------------------end of define graph------------------------------------

    # ------------------------------define gradient descent-------------------------

    # the last fc
    e = tf.nn.softmax(temp_pred) - yi
    grads[3] = grads[3].write(index=i, value=tf.transpose(fc1) @ e)
    grads[7] = grads[7].write(index=i, value=tf.reduce_sum(e, axis=0))

    # the second last fc
    # we use droupout at the last second layer, then we should just update the nodes that are active
    e = tf.multiply(e @ tf.transpose(weights['out']), tf.cast(tf.greater(fc1, 0), dtype=tf.float32)) / dropout
    grads[2] = grads[2].write(index=i, value=tf.transpose(fc) @ e)
    grads[6] = grads[6].write(index=i, value=tf.reduce_sum(e, axis=0))

    # the last pooling layer
    e = e @ tf.transpose(weights['wd1'])
    e = tf.reshape(e, pool2.get_shape().as_list())

    # the last conv layer
    # unpooling get error from pooling layer
    e = func.error_pooling2conv(e, mask2)

    # multiply with the derivative of the active function on the conv layer
    #     this one is also important this is a part from the upsampling, but
    e = tf.multiply(e, tf.cast(tf.greater(conv2, 0), dtype=tf.float32))
    temp1, temp2 = func.filter_gradient(e, pool1, conv2)
    grads[1] = grads[1].write(index=i, value=temp1)
    grads[5] = grads[5].write(index=i, value=temp2)

    # conv to pool
    e = func.error_conv2pooling(e, weights['wc2'])

    # pool to the first conv
    e = func.error_pooling2conv(e, mask1)
    e = tf.multiply(e, tf.cast(tf.greater(conv1, 0), dtype=tf.float32))
    temp1, temp2 = func.filter_gradient(e, inputx, conv1)
    grads[0] = grads[0].write(index=i, value=temp1)
    grads[4] = grads[4].write(index=i, value=temp2)
    i += 1
    return i, x, y, grads