def plot_histogram(image): hist, bins = np.histogram(image.flatten(), 256, [0, 256]) cdf = hist.cumsum() cdf_normalized = cdf * hist.max() / cdf.max() plt.plot(cdf_normalized, color='b') plt.hist(image.flatten(), 256, [0, 256], color='r') plt.xlim([0, 256]) plt.legend(('cdf', 'histogram'), loc='upper left') plt.show() return
def load_data(path): """ TODO: Add documentation :param path: :return: """ labels = [] X = [] y = [] products = os.listdir(path) for i, product in enumerate(products): print("Product {} [{}/{}] loaded...".format(product, i + 1, len(products))) labels.append(product) images = os.listdir(os.path.join(path, product)) for filename in images: img = Image.open(os.path.join(path, product, filename)) img.thumbnail((50, 50), Image.ANTIALIAS) image = np.array(img) image = image.flatten() X.append(image) y.append(i) X = np.array(X) y = np.array(y) return X, y, labels
def prepare_image(img): from keras.preprocessing.image import img_to_array image = img_to_array(img) # Scale the image pixels by 255 (or use a scaler from sklearn here) image = 255 - (image) # Flatten into a 1x28*28 array image = image.flatten().reshape(-1, 28 * 28) return (image)
def get_next_batch(batch_size=64): batch_x = np.zeros([batch_size, IMAGE_HEIGHT * IMAGE_WIDTH]) batch_y = np.zeros([batch_size, MAX_CAPTCHA * CHAR_SET_LEN]) for i in range(batch_size): name, image = get_name_and_image() batch_x[i, :] = 1 * (image.flatten()) batch_y[i, :] = name2vec(name) return batch_x, batch_y
def prepare_image(image): if image.mode != "RGB": image = image.convert("RGB") image_size = (28, 28) image = image.resize(image_size) # image.save("fromform.png") image = img_to_array(image)[:, :, 0] image /= 255 image = 1 - image return image.flatten().reshape(-1, 28 * 28)
def FrameClustering(FramePath): train_data = [] train_labels_one_hot =[] trainfileList =[] for root, dirs, files in os.walk(FramePath): for file in files: if file.endswith(".jpg"): fullName = os.path.join(root, file) trainfileList.append(fullName) for imagePath in trainfileList: image = cv2.imread(imagePath) image = cv2.resize(image, (28, 28)) image = img_to_array(image) image.ndim image = image.flatten() image /= 255 train_data.append(image) #train_data /= 255 print(image.shape) print(image.dtype) dfFrame = pd.DataFrame.from_records(train_data) dfFrame.shape #Kmeans to label poses kmeans = KMeans(n_clusters=12) kmeansoutput = kmeans.fit(dfFrame) clusLabel = kmeans.predict(dfFrame) cluCenter = kmeans.cluster_centers_ Lables = pd.DataFrame(clusLabel) dfFrame['Cluster'] = clusLabel #PCA for visulaisation pca = PCA(n_components=2) principalComponents = pca.fit_transform(dfFrame) principalDf = pd.DataFrame(data = principalComponents, columns = ['PCA1', 'PCA2']) pl.scatter(principalDf['PCA1'],principalDf['PCA2'], c=kmeansoutput.labels_,s=100) return dfFrame