Example #1
0
 def forward(self, img_path):
     im = process_image(img_path)
     self.net.blobs['data'].data[...] = im
     out = self.net.forward()
     feature = out['View_1'][0]
     feature = bn(feature, ReidModel.bn_mean, ReidModel.bn_var, ReidModel.bn_weight)
     feature = normalize(feature)
     return feature
Example #2
0
 def forward(self, img_paths):
     ims = get_batch_images(img_paths)
     self.net.blobs['data'].reshape(len(ims), 3, 160, 80)    
     self.net.blobs['data'].data[...] = ims
     out = self.net.forward()
     feature = out['View_1']
     feature = bn(feature, ReidModel.bn_mean, ReidModel.bn_var, ReidModel.bn_weight)
     feature = [normalize(a) for a in feature]
     return feature
Example #3
0
import numpy
import pandas
import seaborn
from matplotlib import pyplot

from ny_felony_analysis.clusterization._utils import plot_regression
from utils.common import factorize, correlate_sort, drop_infrequent, normalize

seaborn.set(color_codes=True)

data = pandas.read_csv('../../NYPD_Felony_Data.csv')
data = data.drop(columns=['CMPLNT_TO_DT', 'CMPLNT_TO_TM'], axis=1)
data = data.dropna()
data = drop_infrequent(data)
data_factorized = data.apply(factorize)
data_norm = normalize(data_factorized)

print("Mode and mean of violation code grouped by suspect race:")
for name, cluster in data_norm.groupby(['SUSP_RACE']):
    print("CLUSTER_" + str(name))
    print("mean: " + str(cluster['PD_CD'].mean()))
    print("mode: " + str(cluster['PD_CD'].mode()))
    print("\n")

print("Mode and mean of violation code grouped by victim race:")
for name, cluster in data_norm.groupby(['VIC_RACE']):
    print("CLUSTER_" + str(name))
    print("mean: " + str(cluster['PD_CD'].mean()))
    print("mode: " + str(cluster['PD_CD'].mode()))
    print("\n")
Example #4
0
from ny_felony_analysis.clusterization.clusterers.agglomerative import Agglomerative
from ny_felony_analysis.clusterization.clusterers.density_based import DensityBased
from ny_felony_analysis.clusterization.clusterers.expectation_maximization import ExpectationMaximization
from ny_felony_analysis.clusterization.clusterers.k_means import Kmeans
from ny_felony_analysis.clusterization.clusterers.optic import Optic
from utils.common import factorize, pca, normalize, correlate_sort, get_linear_regression_values, LABEL_UNIQUES, \
    drop_infrequent

seaborn.set(color_codes=True)

data = pandas.read_csv('../NYPD_Felony_Data.csv')
data = data.drop(columns=['CMPLNT_TO_DT', 'CMPLNT_TO_TM'], axis=1)
data = data.dropna()
data = drop_infrequent(data)
data = data.apply(factorize)
data_norm = normalize(data)
data_pca = pca(data_norm, n_components=2)

susp_info = pca(data_norm[['SUSP_SEX', 'SUSP_RACE', 'SUSP_AGE_GROUP']],
                n_components=1)
misc_info = pca(data_norm[['PREM_TYP_DESC', 'VIC_RACE', 'PD_CD']],
                n_components=1)

data_chosen_pca = pandas.DataFrame()
data_chosen_pca['SUSP_INFO'] = susp_info[0]
data_chosen_pca['VIC_INFO'] = data_norm['VIC_RACE']

plot_clustermap(
    data_norm.drop(columns=['Longitude', 'Latitude']).sample(n=13_000,
                                                             random_state=666),
    method='ward',