def Initialization(filename):
	
	FILE = open(filename,'rU')
	rawdata = FILE.read()
	FILE.close()
	decoded = json.loads(rawdata)
	print "Initial_Instance_filename = ", decoded['Initial_Instance_filename']
	# initial models' instance
	print "Initial_Clustering_filename = ", decoded['Initial_Clustering_filename']
	print "metric_filename = ", decoded['metric_filename']
	# initial models's clustering result

	#read the instance from initial model

	Instance = read_dataset(decoded['Initial_Instance_filename'],'\t')
	Instance = Convert2FloatArray(Instance)

	#read the clustering result from initial instances
	FILE = open(decoded['Initial_Clustering_filename'],'rU')
	Clustering = FILE.read()
	Clustering = Clustering.split('\n')
	while len(Clustering[Clustering.__len__()-1]) == 0:
		Clustering = Clustering[:-1]	
	FILE.close()

	#read the clustering metrics for instances from external file
	
	Clustering_Metric = read_dataset(decoded['metric_filename'],'\t')
	Clustering_Metric = Convert2FloatArray(Clustering_Metric)

	return decoded["log_filename"], decoded["WL_filename"], decoded["Semantic_filename"], Instance , Clustering, Clustering_Metric
def AddModelInstance(instance_filename, clustering_filename, metric_filename, Instance, Clustering, Clustering_Metric):
	
	AdditionInstance = read_dataset(instance_filename,'\t')

	FILE = open(clustering_filename,'rU')
	AdditionClustering = FILE.read()
	AdditionClustering = AdditionClustering.split('\n')
	while len(AdditionClustering[AdditionClustering.__len__()-1]) == 0:
		AdditionClustering = AdditionClustering[:-1]
	FILE.close()

	Instance = Instance + AdditionInstance
	if min(AdditionClustering) == 0:
		#  if the AdditionClustering starts from 0, it means that we need to relabel AdditionClustering (add the bench of Clustering)
		bench = max(Clustering)+1
		for i in range(AdditionClustering.__len__()):
			AdditionClustering[i] += bench

	Clustering = Clustering + AdditionClustering

	FILE = open(metric_filename,'rU')
	AdditionMetric = FILE.read()
	AdditionMetric = AdditionMetric.split('\n')
	for i in range(AdditionMetric.__len__()):
		AdditionMetric[i] = AdditionMetric[i].split('\t')
		for j in range(len(AdditionMetric[i])):
			AdditionMetric[i][j] = float(AdditionMetric[i][j])
	FILE.close()
	Clustering_Metric = Clustering_Metric + AdditionMetric

	return Instance, Clustering, Clustering_Metric
def InputData(filename):
	FILE = open(filename,'rU')
	rawdata = FILE.read()
	decoded = json.loads(rawdata)
	FILE.close()
	print decoded["totaldata_filename"]
	print decoded["totalclustering_filename"]
	print decoded["configure_filename"]
	print decoded["ARFF_output_filename"]
	print decoded["ARFF_header_filename"]
	#read data
	totaldata = read_dataset(decoded["totaldata_filename"],"\t")
	#read label
	FILE_label = open(decoded["totalclustering_filename"],'rU')
	totallabel = FILE_label.read()
	totallabel = totallabel.split('\n')
	if len(totallabel[totallabel.__len__()-1]) == 0:
		totallabel = totallabel[:-1]
	FILE_label.close()
	#read configure in each parameter
	FILE_configure = open(decoded["configure_filename"],'rU')
	configure = FILE_configure.read()
	configure = configure.split('\n')
	if len(configure[configure.__len__()-1]) == 0:
		configure = configure[:-1]
	for i in range(configure.__len__()):
		configure[i] = configure[i].split('\t')
	FILE_configure.close()
	return 0, decoded["ARFF_output_filename"], decoded["ARFF_header_filename"], totaldata, totallabel, configure
def main(decoded):

	print decoded["clustering_filename"]
	print decoded["dataset_filename"]
	print decoded["metric_filename"]
	
	instance = read_dataset(decoded["dataset_filename"],'\t')

	FILE = open(decoded["clustering_filename"],'rU')
	clustering = FILE.read()
	clustering = clustering.split('\n')
	if clustering[clustering.__len__()-1]:
		clustering = clustering[:-1]
	FILE.close()

	instance = Convert2FloatArray(instance)

	for i in range(clustering.__len__()):
		clustering[i] = int(clustering[i])

	max_no = max(clustering)

	clustering_head = []
	for cluster_item in range(max_no+1):
		item = []
		for i in range(len(instance[0])):
			item.append(0.0)
		count = 0
		for i in range(clustering.__len__()):
			if clustering[i] == cluster_item:
				count += 1
				for j in range(len(instance[i])):
					item[j] += instance[i][j]
		for i in range(item.__len__()):
			item[i] = round(item[i] / float(count), 2)
		clustering_head.append(item)

	FILE = open(decoded["metric_filename"],'w')
	print clustering_head
	for i in range(clustering_head.__len__()):
		if i!=0:
			FILE.write('\n')
		for j in range(len(clustering_head[i])):
			if j != 0:
				FILE.write('\t')
			FILE.write(str(clustering_head[i][j]))
	FILE.close()

######################################

	clustering_head_label = []
	for i in range(clustering_head.__len__()):
		clustering_head_label.append(i)

	return clustering_head, clustering_head_label
def ActivityRecognition(AR_filename, WL_filename, Semantic_filename, Instance, Clustering, Clustering_Metric):
	pass
	#read the file from AR_filename
	AR_instance = read_dataset(AR_filename,'\t')
	AR_instance = Convert2FloatArray(AR_instance)
	AR_instance_ARFF = ConvertInstance2ARFF(AR_instance, Clustering)
	#read the semantic meaning from extrenal file
	Semantic_Meaning = read_json(Semantic_filename)

	#build classifier for the next step's processing
	clf = BuildClassifier(Instance, Clustering, Clustering_Metric)
	print "type of Semantic_Meaning = ", type(Semantic_Meaning)
	is_unfamilar_pattern = -1
	new_semantic_meaning = False
	#for index, inst in enumerate(AR_instance_ARFF):
	for index, inst in enumerate(AR_instance):
		Distribution = ModelPossibilityDistribution(clf, inst)
		is_familar_pattern = isFamilarPattern(Distribution, Semantic_Meaning)
		print "is_familar_pattern = ", is_familar_pattern
		if is_familar_pattern < 0:
			print "Add a new instance into WaitingList..."
			PrintInstanceWL(AR_instance[index],WL_filename)
		else:
			if Semantic_Meaning.has_key((is_familar_pattern)) == True:
				#find propable semantic meaning
				print "AR Result: " + Semantic_Meaning[str(is_familar_pattern)]
			else:
				#cannot find proper semantic mearning
				new_semantic_meaning = True
				semantic_label = raw_input('please enter the Semantic Meaning for the context')
				Semantic_Meaning[str(is_familar_pattern)] = semantic_label

	if new_semantic_meaning == True:
		print_json_to_file(Semantic_filename, Semantic_Meaning)

	return 0
Beispiel #6
0
def make_cls(path, n):
    dataset = tools.read_dataset(path)
    ngram_dict, hist_dataset = ngrams.dataset2ngrams(dataset, n)
    print(len(ngram_dict))
    return Cls(ngram_dict, hist_dataset)
Beispiel #7
0
def make_cls(path,n):
    dataset=tools.read_dataset(path)
    ngram_dict,hist_dataset=ngrams.dataset2ngrams(dataset,n)
    print(len(ngram_dict))
    return Cls(ngram_dict,hist_dataset)
Beispiel #8
0
def test(sys, weight_scale, weight_inp, weight_fb, alpha, inital_washout, padding_s ):

	units = 28*28
	indim = 6
	outdim = 6
	

	esn = ESN(
		units, indim, outdim, weight_scale,weight_inp,weight_fb, alpha, fback
		)
	esn.load("trainied.pickle")
	stepper = esn.step_taped()
	

	dtsets = read_dataset(sys.argv[1], sys.argv[2])
	# import pdb;pdb.set_trace()
	inputs, outputs, padIdxs, idxs = dtsets[0]

	
	plot_output =[]
	plot_state =[]

	import time
	start = time.time()
	###########TRAIN
	# all_states = []
	# all_this = []
	# all_states, all_this= train(idxs, padIdxs, esn, stepper, inputs, outputs)

	# M_tonos = np.linalg.pinv(all_states)
	# # import pdb; pdb.set_trace()
	# all_this = np.arctanh(all_this)
	# W_trans = np.dot(M_tonos,all_this)
	# esn.W_out.set_value(W_trans)
	# print W_trans
	
	###########END TRAIN
	print "Time taken ", time.time() - start
	#########TESTING#############
	
	outputs1 = np.zeros(outputs.shape)
	outputs1[1:] = outputs[:-1]

	state, output, this = stepper(
		inputs, outputs, 0.)

	print output.shape
	plot_state.extend(state[:,:units])
	plot_output.extend(output)

	
	#########TESTING#############
	if int(sys.argv[3]) == 1:
		f, axarr = plt.subplots(4, sharex=True)
		for oid,tpt in enumerate(np.array(plot_output).transpose()):
			try:
				axarr[0].plot(tpt,label="output"+str(oid))
			except:
				pass
		axarr[0].set_title('output')
		# axarr[0].legend()
		axarr[1].plot(outputs,label="outputs")
		axarr[2].plot(plot_state,label="state")
		axarr[2].set_title('state')
		# axarr[1].legend()
		axarr[3].plot(inputs,label="inputs")
		axarr[3].set_title('inputs')
		# axarr[2].legend()
		# plt.draw()
		# plt.figure()
		# plt.plot(inputs)
		# # plt.figure()
		# plt.plot(outputs)
		plt.show()