def parameter_tuning_f1(dataset_path): image_list = prefetch_images(dataset_path) if len(image_list) <= 0: return param1 = (3, 5, 7, 9) param2 = (16, 32, 64, 128, 256) output = {} num_samples = 40 for p1 in param1: for p2 in param2: print("*****" * 2, p1, p2, "*****" * 2) features = {} # dictionary<family name, list of feature vector> for i in range(len(image_list)): family = image_list[i][0] images = image_list[i][1] # print("fetching...", family) if len(images) < num_samples: print("Insufficient samples") return for j in range(num_samples): image_path = dataset_path + family + '/' + images[j] # print(image_path, j + 1, "out of", num_samples) img = io.imread(image_path) # Feature 1: horizontal edge hist = project_h_edge(img, gauss_sigma=p1, print_img=False, total_blocks=p2) # resize to desired length. e.g. 256 hist_arr = np.array(hist).reshape(1, len(hist)) feature_vec = transform.resize(hist_arr, (1, p2), mode='reflect').tolist()[0] if family not in features: features[family] = list() l = features.get(family) l.append(feature_vec) best_accuracy = fitting_scoring(features) output[(p1, p2)] = best_accuracy output = dict( OrderedDict( sorted(output.items(), key=lambda t: t[1], reverse=True))) print(output) best_key = list(output.keys())[0] best_val = output[best_key] print("-" * 20) print("Feature1: param set", best_key, "accuracy", best_val) print("-" * 20)
def parameter_tuning_f2(dataset_path): image_list = prefetch_images(dataset_path) if len(image_list) <= 0: return param1 = ((4, 4), (6, 6), (8, 8), (10, 10), (12, 12)) param2 = (16, 32, 64, 128, 256) output = {} num_samples = 40 for p1 in param1: for p2 in param2: print("*****" * 2, p1, p2, "*****" * 2) features = {} # dictionary<family name, list of feature vector> for i in range(len(image_list)): family = image_list[i][0] images = image_list[i][1] print("fetching...", family) if len(images) < num_samples: print("Insufficient samples") return for j in range(num_samples): image_path = dataset_path + family + '/' + images[j] # print(image_path, j + 1, "out of", num_samples) img = io.imread(image_path) # Feature 2: Histogram of Gaussian feature_vec = extract_HOG(img, blocks=p1, buckets=p2).tolist() if family not in features: features[family] = list() l = features.get(family) l.append(feature_vec) best_accuracy = fitting_scoring(features) output[(p1, p2)] = best_accuracy output = dict( OrderedDict( sorted(output.items(), key=lambda t: t[1], reverse=True))) print(output) best_key = list(output.keys())[0] best_val = output[best_key] print("-" * 20) print("Feature 2: param set", best_key, "accuracy", best_val) print("-" * 20)
def parameter_tuning_f3(dataset_path): image_list = prefetch_images(dataset_path) if len(image_list) <= 0: return param1 = ((1, 1), (2, 2), (4, 4), (8, 8), (16, 16)) output = {} num_samples = 40 for p1 in param1: print("*****" * 2, p1, "*****" * 2) features = {} # dictionary<family name, list of feature vector> for i in range(len(image_list)): family = image_list[i][0] images = image_list[i][1] print("fetching...", family) if len(images) < num_samples: print("Insufficient samples") return for j in range(num_samples): image_path = dataset_path + family + '/' + images[j] # print(image_path, j + 1, "out of", num_samples) img = io.imread(image_path) # Feature 3: Mean intensity of each grid grids = extract_grid_blocks(img, blocks_per_image=p1) feature_vec = means_feature(grids) if family not in features: features[family] = list() l = features.get(family) l.append(feature_vec) best_accuracy = fitting_scoring(features) output[p1] = best_accuracy output = dict( OrderedDict( sorted(output.items(), key=lambda t: t[1], reverse=True))) print(output) best_key = list(output.keys())[0] best_val = output[best_key] print("-" * 20) print("Feature 3: param set", best_key, "accuracy", best_val) print("-" * 20)
def parameter_tuning_f6(dataset_path): image_list = prefetch_images(dataset_path) if len(image_list) <= 0: return param1 = ((4, 4), (8, 8), (16, 16), (32, 32), (64, 64), (128, 128)) output = {} num_samples = 40 for p1 in param1: print("*****" * 2, p1, "*****" * 2) features = {} # dictionary<family name, list of feature vector> for i in range(len(image_list)): family = image_list[i][0] images = image_list[i][1] print("fetching...", family) if len(images) < num_samples: print("Insufficient samples") return for j in range(num_samples): image_path = dataset_path + family + '/' + images[j] # print(image_path, j + 1, "out of", num_samples) img = io.imread(image_path) # Feature 6: Media images (flatten) feature_vec = median_feature(img, resize_shape=p1) if family not in features: features[family] = list() l = features.get(family) l.append(feature_vec) best_accuracy = fitting_scoring(features) output[p1] = best_accuracy output = dict( OrderedDict( sorted(output.items(), key=lambda t: t[1], reverse=True))) print(output) best_key = list(output.keys())[0] best_val = output[best_key] print("-" * 20) print("Feature 6: param set", best_key, "accuracy", best_val) print("-" * 20)
def parameter_tuning_f5(dataset_path): image_list = prefetch_images(dataset_path) if len(image_list) <= 0: return output = {} num_samples = 40 print("*****" * 2, "*****" * 2) features = {} # dictionary<family name, list of feature vector> for i in range(len(image_list)): family = image_list[i][0] images = image_list[i][1] print("fetching...", family) if len(images) < num_samples: print("Insufficient samples") return for j in range(num_samples): image_path = dataset_path + family + '/' + images[j] # print(image_path, j + 1, "out of", num_samples) img = io.imread(image_path) # Feature 5: Histogram(Contrast) feature_vec = histogram_feature(img) if family not in features: features[family] = list() l = features.get(family) l.append(feature_vec) best_accuracy = fitting_scoring(features) output[256] = best_accuracy output = dict( OrderedDict(sorted(output.items(), key=lambda t: t[1], reverse=True))) print(output) best_key = list(output.keys())[0] best_val = output[best_key] print("-" * 20) print("Feature 5: param set", best_key, "accuracy", best_val) print("-" * 20)