コード例 #1
0
def extract_feature_by_kmeans(class_num, subsample_size, window_size, cluster_num, max_iter, rnd_number) :
	file_name = gen_feature_fname(class_num, subsample_size, window_size, cluster_num)
	start = time.time()
	train_X, test_X, train_y, test_y = FetchFile.gen_data(class_num, subsample_size, window_size, rnd_number)
	print("Generate Data Time: ",time.time()-start)
	features = learnvocabulary(train_X, cluster_num, max_iter)
	features = mynormalize_multi(features)
	write_features(file_name, features)
	print("=== Feature Extraction Finish ===")
コード例 #2
0
def grid_search_for_neighbor_multiprocess(class_num, subsample_size, window_size, cluster_num, max_iter, rnd_number, neighbor_num_seq):
	#Classification.classifiy(class_num, subsample_size, window_size, cluster_num, max_iter, rnd_number, neighbor_num)
	train_X, test_X, train_y, test_y = FetchFile.gen_data(class_num, subsample_size, window_size, rnd_number)
	jobs = []
	#for neighbor_num in [2**i for i in range(neighbor_log2_num)]:
	for neighbor_num in neighbor_num_seq:
		p = multiprocessing.Process(target=Classification.classifiy, args=(class_num, subsample_size, window_size, cluster_num, \
			max_iter, rnd_number, neighbor_num, train_X, train_y, test_X, test_y))
		jobs.append(p)
		p.start()
	# end for
	print(jobs)
コード例 #3
0
def grid_search_for_neighbor(class_num, subsample_size, window_size, cluster_num, max_iter, rnd_number, neighbor_num_seq):
	train_X, test_X, train_y, test_y = FetchFile.gen_data(class_num, subsample_size, window_size, rnd_number)
	#for neighbor_num in [2**i for i in range(neighbor_log2_num)]:
	for neighbor_num in neighbor_num_seq:
		Classification.classifiy(class_num, subsample_size, window_size, cluster_num, max_iter, rnd_number, neighbor_num, train_X, train_y, test_X, test_y)