Beispiel #1
0
from features_cooccurance_one_user import UserFeaturesCooccurences as UFC
from pprint import *
import sys
sys.path.insert(0, "/home/dehajjik/workspace/src/utils")
from numpy_utils import Numpy as n


ufc = UFC('/speech/dbwork/mul/reco1/AppPrediction/SonyLogging/Logs/from_TKY/pulled_from_TKY/mixs_launcher_logs/json/352136065015162/all/all_in_one_log.json')
print("features")
print(ufc.features)
print("\n\nco occurences rates")
print(n.str(ufc.cooccurences_rates))
print("\n\nco occurences numbers")
print(n.str(ufc.cooccurences_number))
print("\n\noccurences rates")
print(n.str(ufc.occurences_number))
Beispiel #2
0
total_features_occurences_number = np.zeros(
    (len(UserFeaturesCooccurences.features),
     len(UserFeaturesCooccurences.features)))
result = "\n\n\nThe features ids follows the order below : \n" + pp.pformat(
    UserFeaturesCooccurences.features)
user_number = 1
for json_file in users_json_files_array:
    if os.path.isfile(json_file):
        ufc = UserFeaturesCooccurences(json_file)
        total_features_cooccurences_number = total_features_cooccurences_number.__add__(
            ufc.cooccurences_number)
        total_features_occurences_number = total_features_occurences_number.__add__(
            ufc.occurences_number)
        result = (result + "\n \n \n user " + str(user_number) + "\n" +
                  "co-occurrences number matrix:\n" +
                  n.str(ufc.cooccurences_number) +
                  "\n\nco-occurrences rate matrix:\n" +
                  n.str(ufc.cooccurences_rates))
    print("user " + str(user_number) + " extracted")
    user_number += 1

#compute the overall rate and add it to the result
total_features_cooccurences_rate = np.nan_to_num(
    (total_features_cooccurences_number *
     100).__div__(total_features_occurences_number))
result = (result + "\n \n \n overall users\n" +
          "co-occurrences number matrix:\n" +
          n.str(total_features_cooccurences_number) +
          "\n\nco-occurrences rate matrix:\n" +
          n.str(total_features_cooccurences_rate))
def transform_to_matrix_one_user(user_id):
	
		
	print "loading data for user "+str(user_id)
	categorized_data = DataExtractor.load_json_data(user_id)
	data = DataExtractor.complete_data(categorized_data)
	metadata = DataExtractor.complete_metadata(categorized_data)
	
	#order the data by the alphabetic name of the features
	print "ordering data "+str(user_id)
	data = collections.OrderedDict(sorted(data.items()))
	
	#get the first date and the last date
	print "getting first date and last date "
	end_date = date_min
	start_date = datetime.now()
	for feature, feature_data in data.iteritems():
		feature_data = collections.OrderedDict(sorted(feature_data.items()))
		begin_date = DataExtractor.start_date_of_realization(feature_data.keys()[0])
		if begin_date < start_date:
			start_date = begin_date
			
		last_date = DataExtractor.start_date_of_realization(feature_data.keys()[len(feature_data.keys())-1])
		if last_date > end_date:
			end_date = last_date
		
		data[feature] = feature_data
	
	#construct the data matrix
	#I- construct the matrices of all the features
	print "constructing the matrixes "
	rows = 0
	
	transformers = {} 
	for feature, feature_date in data.iteritems():
		if feature == "location":
			transformers[feature] = MatrixLocationFeatureTransformer(feature, data[feature], metadata[feature], start_date, end_date, coocurring_precision)
		elif feature == "bluetoothSeen" or feature == "bluetoothPaired":
			transformers[feature] = MatrixBleutoothFeatureTransformer(feature, data[feature], metadata[feature], start_date, end_date, coocurring_precision)
		else :
			transformers[feature] = MatrixFeatureTransformer(feature, data[feature], metadata[feature], start_date, end_date, coocurring_precision)
			
		if feature in features_importance_score_one:
			transformers[feature].let_importance_scores_to_1 = True
		
		transformers[feature].transform()
		rows += transformers[feature].nbdimentions
	
	#construct the time feature
	transformers[MatrixTimeFeatureTransformer.feature_name] = MatrixTimeFeatureTransformer(start_date, end_date, coocurring_precision)
	transformers[MatrixTimeFeatureTransformer.feature_name].transform()
	rows +=  transformers[MatrixTimeFeatureTransformer.feature_name].nbdimentions
	columns = transformers[MatrixTimeFeatureTransformer.feature_name].nbtimeslots
	
	#II-concatenate all the matrices of each feature into one big matrix (do the same for the labels vector)
	print "regrouping the matrixes "
	data_matrix = np.zeros((columns, rows))
	labels_vector = [""]* rows
	dimentions_importance_score = np.zeros(rows)
	transformers = collections.OrderedDict(sorted(transformers.items()))
	
	begin_row_idex = 0
	end_row_index = 0
	for feature, feature_transformer in transformers.iteritems():
		end_row_index = begin_row_idex + feature_transformer.nbdimentions
		data_matrix[:, begin_row_idex:end_row_index] =  feature_transformer.matrix_data
		labels_vector[begin_row_idex:end_row_index] = feature_transformer.labels_vector
		dimentions_importance_score[begin_row_idex:end_row_index]=feature_transformer.realization_importance_score
		begin_row_idex = end_row_index
	
	'''
	The matrix contains a lot of feature vectors that contains 0 in all the features except the time features.
	Those vectors corresponds to the times where any record has been done.
	We want to eliminate those timestamps and their corresponding times
	'''
	time_vector = transformers.values()[0].time_vector
	[data_matrix, time_vector] = eliminate_empty_records(data_matrix, time_vector)
	data_matrix = np.transpose(data_matrix)
	
	print "the labels are : "
	print JsonUtils.dict_as_json_str(labels_vector)
	
	
	print "first date of observation "+str(start_date)
	print "first date of observation "+str(end_date)
	print "dimension of the labels (features) vector : "+str(len(labels_vector))
	print "dimension of the time vector : "+str(len(time_vector))
	print "dimension of the resulted matrix (features, time) "+str(data_matrix.shape)
	print "the number of non zeros values is : "+str(np.count_nonzero(data_matrix))+"/"+str(np.size(data_matrix))
	print "the number of negative values in the matrix is : "+str(np.size(ma.masked_array(data_matrix, mask=(data_matrix>=0)).compressed()))
	print "the data matrix printed : "
	print Numpy.str(data_matrix)
	
	#write the matrix data
	MDataExtractor.save_matrix(user_id, data_matrix)
	
	#write the labels vector, then the time vector and the importance scores
	MDataExtractor.save_labels_vector(user_id, labels_vector)
	MDataExtractor.save_time_vector(user_id, time_vector)
	MDataExtractor.save_importance_scores(user_id, dimentions_importance_score)
#array containing the path to the validated json fata for each user
users_json_files_array = [json_data_dir+x+"/all/all_in_one_validated_log.json" for x in os.listdir(json_data_dir)]

pp.pprint(users_json_files_array)
total_features_cooccurences_number = np.zeros((len(UserFeaturesCooccurences.features),len(UserFeaturesCooccurences.features)))
total_features_occurences_number = np.zeros((len(UserFeaturesCooccurences.features),len(UserFeaturesCooccurences.features)))
result = "\n\n\nThe features ids follows the order below : \n"+pp.pformat(UserFeaturesCooccurences.features)
user_number = 1
for json_file in users_json_files_array:
	if os.path.isfile(json_file):
		ufc = UserFeaturesCooccurences(json_file)
		total_features_cooccurences_number = total_features_cooccurences_number.__add__(ufc.cooccurences_number)
		total_features_occurences_number = total_features_occurences_number.__add__(ufc.occurences_number)
		result = (result+"\n \n \n user "+str(user_number)+"\n"+ "co-occurrences number matrix:\n"+
		n.str(ufc.cooccurences_number)+"\n\nco-occurrences rate matrix:\n"+n.str(ufc.cooccurences_rates))
	print("user "+str(user_number)+" extracted")
	user_number+=1

#compute the overall rate and add it to the result
total_features_cooccurences_rate = np.nan_to_num((total_features_cooccurences_number*100).__div__(total_features_occurences_number))
result = (result + "\n \n \n overall users\n"+ "co-occurrences number matrix:\n"+
		n.str(total_features_cooccurences_number)+"\n\nco-occurrences rate matrix:\n"+
		n.str(total_features_cooccurences_rate))

#write an explanation about the results
comment = ("This file represents the co-occurrences of the different features. \n"+  
"For each user, 2 matrices are shown:\n"+ 
	"- cooccurences_number: each cell (i,j) represents the number of co-occurrences \n"+ 
	"that feature i(row) and j(column) has. This matrix is thus diagonal.\n"+ 
	"- cooccurences_rates: each cell (i,j) represents the percentage over the number of appearence\n"+