Esempio n. 1
0
def testConvolution(conv_images,W,b,zca_white,patch_mean):
    """
    函数功能:测试卷积操作是否正常
    """
    # step1:检测卷积过程
    convolved_features = cnn.cnn_convolve(patch_dim, hidden_size, conv_images,
                        W, b, zca_white, patch_mean)
    for i in range(1000):
        # 随机选取图像上的起始位置
        feature_num = np.random.randint(0,hidden_size)
        image_num = np.random.randint(0,8)
        image_row = np.random.randint(0,image_dim - patch_dim + 1)
        image_col = np.random.randint(0,image_dim - patch_dim + 1)
        # 根据其实位置截取一块特征图像
        patch = conv_images[image_row:image_row + patch_dim,
                            image_col:image_col + patch_dim,
                            :,image_num]
        # 重构特征图像结构,消除通道维度
        patch = np.concatenate((patch[:,:,0].flatten(),patch[:,:,1].flatten(),patch[:,:,2].flatten()))
        patch = np.reshape(patch,(patch.size,1))
        # 零均值化
        patch = patch - np.tile(patch_mean, (patch.shape[1], 1)).transpose()
        # ZCA白化
        patch = np.dot(zca_white,patch)
        # 将特征图像映射到特征空间(隐藏层)
        W1 = W
        b1 = b.reshape(hidden_size,1)
        features = sparse_autoencoder.sigmoid(np.dot(W1,patch) + b1)
        # 检测卷积获得的特征值与编码器编码得到的特征值是否在误差允许范围内相等
        if abs(features[feature_num,0] - convolved_features[feature_num,image_num,image_row,image_col]) > 1:
            print('Convolved feature does not match activation from autoencoder')
            print('Feature Number      :', feature_num)
            print('Image Number        :', image_num)
            print('Image Row           :', image_row)
            print('Image Column        :', image_col)
            print('Convolved feature   :', convolved_features[feature_num, image_num, image_row, image_col])
            print('Sparse AE feature   :', features[feature_num, 0])
            sys.exit("Convolved feature does not match activation from autoencoder. Exiting...")
    print("Congratulations! Your convolution code passed the test.")
Esempio n. 2
0
  Implement convolution in the function cnnConvolve in cnnConvolve.m
"""
# Note that we have to preprocess the images in the exact same way
# we preprocessed the patches before we can obtain the feature activations.

# Load training images and labels
train_subset = scipy.io.loadmat('data/stlTrainSubset.mat')
n_train_images = train_subset['numTrainImages'][0, 0]
train_images   = train_subset['trainImages'] # shape (rows, cols, channels, n_train_images)
train_labels   = train_subset['trainLabels'][:, 0]

# Use only the first 8 images for testing
conv_images = train_images[:, :, :, :8]

# NOTE: Implement cnn_convolve first!
convolved_features = cnn_convolve(patch_dim, hidden_size, conv_images, W, b, zca_white, mean_patch)


"""
  STEP 2b: Checking your convolution
  To ensure that you have convolved the features correctly, we have
  provided some code to compare the results of your convolution with
  activations from the sparse autoencoder
"""

# For 1000 random points
for i in range(1000):
    feature_num = np.random.randint(hidden_size)
    image_num = np.random.randint(8)
    image_row = np.random.randint(image_dim - patch_dim + 1)
    image_col = np.random.randint(image_dim - patch_dim + 1)
Esempio n. 3
0
b = np.random.randn(num_filters)

##======================================================================
## STEP 1: Implement and test convolution
#  In this step, you will implement the convolution and test it on
#  on a small part of the data set to ensure that you have implemented
#  this step correctly.

## STEP 1a: Implement convolution
#  Implement convolution in the function cnnConvolve in cnnConvolve.m

## Use only the first 8 images for testing
conv_images = images[:, :, 0:8]

# NOTE: Implement cnnConvolve in cnnConvolve.m first!
convolved_features = cnn.cnn_convolve(filter_dim,num_filters,conv_images,W,b)

## STEP 1b: Checking your convolution
#  To ensure that you have convolved the features correctly, we have
#  provided some code to compare the results of your convolution with
#  activations from the sparse autoencoder
# (SR-71) wtf is this business about the sparse autoencoder?  holdover from earlier iteration?

for _ in range(1000):
    filter_num = np.random.randint(0,num_filters)
    image_num = np.random.randint(0,8)
    image_row = np.random.randint(0,image_dim - filter_dim + 1)
    image_col = np.random.randint(0,image_dim - filter_dim + 1)

    patch = conv_images[image_row:image_row+filter_dim,image_col:image_col+filter_dim,image_num]
Esempio n. 4
0
def trainFeatures():
    """
    函数功能: 对原始训练数据和测试数据进行卷积和池化操作,获得隐藏层上的特征数据
    """
    # 载入线性编码器参数
    encoderFile = 'stl10_features.pickle'
    W,b,zca_white,patch_mean = loadEncoder(encoderFile)
    # 载入训练数据
    trainFile = 'stlTrainSubset.mat'
    trainParams = ['trainImages','trainLabels','numTrainImages']
    train_images,train_labels,n_train_images = loadDate(trainFile,trainParams)
    # 载入测试数据
    testFile = 'stlTestSubset.mat'
    testParams = ['testImages','testLabels','numTestImages']
    test_image,test_labels,n_test_images = loadDate(testFile,testParams)
    # 测试实现的卷积和池化函数是否正确
    if debug == True:
        # 使用前8幅图像测试卷积操作是否正常
        conv_images = train_images[:,:,:,0:8]
        testConvolution(conv_images,W,b,zca_white,patch_mean)
        testPooling()
    
    # 在训练数据上利用卷积和池化对隐藏层的特征分块进行训练
    pooled_features_train = np.zeros(shape = (hidden_size,n_train_images,
                                    int(np.floor((image_dim - patch_dim + 1) / pool_dim)),
                                    int(np.floor((image_dim - patch_dim + 1) / pool_dim))),
                                    dtype = np.float)
    pooled_features_test = np.zeros(shape = (hidden_size,n_test_images,
                                    int(np.floor((image_dim - patch_dim + 1) / pool_dim)),
                                    int(np.floor((image_dim - patch_dim + 1) / pool_dim))),
                                    dtype = np.float)    
    # 特征步长
    step_size = 25
    # 检查特征个数能否被步长整除
    assert hidden_size % step_size == 0,"step_size should divide hidden_size"
    feature_part_num = int(hidden_size / step_size)
    start_time = time.time()
    for conv_part in range(feature_part_num):
        feature_start = conv_part * step_size
        feature_end = (conv_part + 1) * step_size
        print('Step:',conv_part,'\nfeatures',feature_start,'to',feature_end)
        # 选取特征参数,用于后续从图像中卷积提取在这些特征上的图像信息
        Wt = W[feature_start:feature_end,:]
        bt = b[feature_start:feature_end]
        # 在训练数据上卷积并池化
        print('Convolving and pooling train_images')
        convolved_features = cnn.cnn_convolve(patch_dim, step_size, train_images,
                                              Wt, bt, zca_white, patch_mean)
        pooled_features = cnn.cnn_pool(pool_dim, convolved_features)
        pooled_features_train[feature_start:feature_end,:,:,:] = pooled_features
        print('Time elapsed:',str(datetime.timedelta(seconds = time.time() - start_time)))
        # 在测试数据上卷积并池化
        print('Convolving and pooling test_images')
        convolved_features = cnn.cnn_convolve(patch_dim, step_size, test_image,
                                              Wt, bt, zca_white, patch_mean)
        pooled_features = cnn.cnn_pool(pool_dim, convolved_features)
        pooled_features_test[feature_start:feature_end,:,:,:] = pooled_features
        print('Time elapsed:',str(datetime.timedelta(seconds = time.time() - start_time)))
    # 保存池化后的特征数据
    print('Saving pooled features...')
    with open(p.join(PATH,'cnn_pooled_features.pickle'),'wb') as f:
        pickle.dump(pooled_features_train,f)
        pickle.dump(pooled_features_test,f)
        pickle.dump(train_labels,f)
        pickle.dump(test_labels,f)
    print('Saved')
    print('Time elapsed:',str(datetime.timedelta(seconds=time.time() - start_time)))
Esempio n. 5
0
## STEP 2a: Implement convolution
#  Implement convolution in the function cnnConvolve in cnnConvolve.m

# Note that we have to preprocess the images in the exact same way
# we preprocessed the patches before we can obtain the feature activations.

stl_train = scipy.io.loadmat('data/stlTrainSubset.mat')
train_images = stl_train['trainImages']
train_labels = stl_train['trainLabels']
num_train_images = stl_train['numTrainImages'][0][0]

## Use only the first 8 images for testing
conv_images = train_images[:, :, :, 0:8]

convolved_features = cnn.cnn_convolve(patch_dim, hidden_size, conv_images,
                                      W, b, zca_white, patch_mean)

## STEP 2b: Checking your convolution
#  To ensure that you have convolved the features correctly, we have
#  provided some code to compare the results of your convolution with
#  activations from the sparse autoencoder

# For 1000 random points
for i in range(1000):
    feature_num = np.random.randint(0, hidden_size)
    image_num = np.random.randint(0, 8)
    image_row = np.random.randint(0, image_dim - patch_dim + 1)
    image_col = np.random.randint(0, image_dim - patch_dim + 1)

    patch = conv_images[image_row:image_row + patch_dim, image_col:image_col + patch_dim, :, image_num]