def __init__(self, den_path, fdc_path): self.den_model_pth = den_path self.den = DEN() self.den.load_state_dict(torch.load(self.den_model_pth), strict=False) self.fdc = FDC(self.den) self.fdc.load_weights(fdc_path) self.crop_ratios = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1] self.transform = Compose([ transforms_nyu.Normalize(), transforms_nyu.FDCPreprocess(self.crop_ratios) ])
class FDCPredictor: def __init__(self, den_path, fdc_path): self.den_model_pth = den_path self.den = DEN() self.den.load_state_dict(torch.load(self.den_model_pth), strict=False) self.fdc = FDC(self.den) self.fdc.load_weights(fdc_path) self.crop_ratios = [0.6, 0.65, 0.7, 0.75, 0.8, 0.85, 0.9, 0.95, 1] self.transform = Compose([ transforms_nyu.Normalize(), transforms_nyu.FDCPreprocess(self.crop_ratios) ]) def prediction(self, img_path): print(img_path.split(".")) with open(img_path, 'rb') as f_img: image = pickle.load(f_img) img = Image.fromarray(image, 'RGB') img.save(img_path + "ng") print(img_path + "ng" + " saved!") nyu_dict = {'image': image, 'depth': image} cropped = self.transform(nyu_dict)['stacked_images'] cropped = cropped.unsqueeze(0) bsize, crops, c, h, w = cropped.size() print(bsize, crops, c, h, w) return self.fdc(cropped)[0] def save(self, img, des_path): img = Image.fromarray(img.numpy()) img.save("/root/DEN/images/depth_img/" + des_path.split(".")[-2] + ".tiff") print("/root/DEN/images/depth_img/" + des_path.split(".")[-2] + ".tiff" + "saved!")
def compute_linkage_matrix(model): from fdc import FDC """Wrapper for constructing the final linkage matrix using build_dendrogram function Parameters --------------- model : FDC class object Contains the coarse graining information determined by fitting and coarse graining with the data Return ------- Z : linkage matrix - (n_coarse_grain, 4) From scipy's definition : "An (n−1)(n−1) by 4 matrix Z is returned. At the ii-th iteration, clusters with indices Z[i, 0] and Z[i, 1] are combined to form cluster n+in+i. A cluster with an index less than nn corresponds to one of the nn original observations. The distance between clusters Z[i, 0] and Z[i, 1] is given by Z[i, 2]. The fourth value Z[i, 3] represents the number of original observations in the newly formed cluster." """ from copy import deepcopy assert type(model) == type(FDC()), 'wrong type !' hierarchy = deepcopy(model.hierarchy) noise_range = deepcopy(model.noise_range) # ---- PADDING trick --- for plotting purposes ... n_elem = len(hierarchy[-1]['cluster_labels']) terminal_cluster = hierarchy[-1]['idx_centers'][0] hierarchy.append({'idx_centers': [terminal_cluster], 'cluster_labels' : np.zeros(n_elem,dtype=int)}) noise_range.append(1.5*model.max_noise) # ------------------------------------------- # ------------------------------------------- Z = build_dendrogram(hierarchy, noise_range) return Z
''' Created on Feb 1, 2017 @author: Alexandre Day Purpose: Perform density clustering on gaussian mixture ''' from fdc import FDC from sklearn.datasets import make_blobs from fdc import plotting import pickle import numpy as np n_true_center = 15 np.random.seed(0) print("------> Example with %i true cluster centers <-------"%n_true_center) X, y = make_blobs(10000, 2, n_true_center) # Generating random gaussian mixture model = FDC(noise_threshold=0.05, nh_size=40) # specifying density clustering parameters model.fit(X) # performing the clustering plotting.set_nice_font() # nicer plotting font ! plotting.summary_model(model, ytrue=y, show=True, savefile="result.png")
from fdc import plotting import pickle import numpy as np from matplotlib import pyplot as plt n_true_center = 15 np.random.seed(0) print("------> Example with %i true cluster centers <-------"%n_true_center) X, y = make_blobs(10000, 2, n_true_center) # Generating random gaussian mixture X = StandardScaler().fit_transform(X) # always normalize your data :) # set eta=0.0 if you have excellent density profile fit (lots of data say) model = FDC(eta = 0.01)#, atol=0.0001, rtol=0.0001) model.fit(X) # performing the clustering x = np.linspace(-0.5, 0.6,200) y = 1.5*x+0.15 X_2 = np.vstack([x,y]).T xy2 = X_2[65] b=xy2[0]/1.5+xy2[1] y2 = -x/1.5+b #rho = np.exp(model.density_model.evaluate_density(X_2)) #plt.plot(rho) #plt.show() #exit() plt.scatter(x, y, c="green", zorder=2)
""" Setting FDC parameters (note these are the same across all datasets) """ noise_threshold = 1.0 datasets = [noisy_circles, noisy_moons, varied, aniso, blobs, no_structure] for i_dataset, dataset in enumerate(datasets): X, y = dataset # normalize dataset for easier parameter selection X = StandardScaler().fit_transform(X) # create clustering estimators model = FDC(noise_threshold=noise_threshold) s=time.time() model.fit(X) dt=time.time()-s n_center=len(model.idx_centers) plt.subplot(3,2,plot_num) plt.scatter(X[:, 0], X[:, 1], color=colors[model.cluster_label].tolist(), s=10,zorder=1) plt.text(.99, .07, ('%.2fs' % (dt)).lstrip('0'), transform=plt.gca().transAxes, size=15, horizontalalignment='right',zorder=2,
################################# ################################# ################################# """ Setting FDC parameters (note these are the same across all datasets) """ datasets = [noisy_circles, noisy_moons, varied, aniso, blobs, no_structure] for i_dataset, dataset in enumerate(datasets): X, y = dataset # normalize dataset for easier parameter selection X = StandardScaler().fit_transform(X) # create clustering estimators # atol and rtol set the precision of the density map, higher value improves performanc but reduces accuracy model = FDC(eta=0.4) s = time.time() model.fit(X) dt = time.time() - s n_center = len(model.idx_centers) plt.subplot(3, 2, plot_num) plt.scatter(X[:, 0], X[:, 1], color=colors[model.cluster_label].tolist(), s=10, zorder=1)