Ejemplo n.º 1
0
def AnalyseInputImage(queryDir, maxNo, model_name, img_canvas, root):
	h5f = h5py.File(model_name,'r')
	feats = h5f['dataset_1'][:]
	imgNames_utf = h5f['dataset_2'][:]
	imgNames = []
	for i in imgNames_utf:
		imgNames.append(i.decode('utf-8'))
	h5f.close()
	model = VGGNet()
	
	queryVec = model.extract_feat(queryDir)
	scores = np.dot(queryVec, feats.T)
	rank_ID = np.argsort(scores)[::-1]
	rank_score = scores[rank_ID]
	maxres = int(maxNo)
	imlist = [imgNames[index] for i,index in enumerate(rank_ID[0:maxres])]
	file_io = FileIO()
	file_io.save_obj(imlist,"imlist")

	img_canvas.delete('all')
	vsbar = Scrollbar(frame_canvas, orient=VERTICAL, command=img_canvas.yview)
	vsbar.grid(row=0, column=1, sticky=NS)
	vsbar.config(command=img_canvas.yview)
	img_canvas.configure(yscrollcommand=vsbar.set)
	frame_images = Frame(img_canvas, bg="grey")
	img_canvas.create_window((0,0), window=frame_images, anchor='nw')
	img_no = 0
	max_in_row = 0
	height_total = 0

	for i in imlist:
		basewidth = 300
		img = Image.open(i)
		wpercent = (basewidth/float(img.size[0]))
		hsize = int((float(img.size[1])*float(wpercent)))
		max_in_row = max(max_in_row, hsize)
		img = img.resize((basewidth,hsize), Image.ANTIALIAS)
		render = ImageTk.PhotoImage(img)
		img_show = Label(frame_images, image=render, name=str(img_no))
		img_show.bind("<Button-1>", ShowOriginalImage)
		img_show.image = render
		img_show.grid(row=img_no//3, column=img_no%3)
		img_no += 1
		if img_no%3==0:
			height_total += max_in_row
			max_in_row = 0
	frame_canvas.config(height=height_total)
	root.update()
	img_canvas.config(scrollregion=img_canvas.bbox("all"))
Ejemplo n.º 2
0
def GenerateFeatureDatabase(fd_entry, ft_label, pgbar, pglabel, root):
	ft_name = "feature"
	fd_name = fd_entry.get()
	root.update()
	if os.path.exists(fd_name) and os.path.isdir(fd_name):
		pass
	else:
		messagebox.showerror(title="文件夹读取错误", message="无法读取文件或文件夹不存在")
		return
	file_handler = FileHandler()
	# img_list = file_handler.get_imlist(fd_name)
	img_list = [os.path.join(dp, f) for dp, dn, filenames in os.walk(fd_name) for f in filenames if os.path.splitext(f)[1] == '.jpg' or os.path.splitext(f)[1] == '.jpeg']
	# print(img_list)
	print("--------------------------------------------------")
	print("         feature extraction starts")
	print("--------------------------------------------------")
	feats = []
	names = []
	model = VGGNet()
	ite_no = 0
	ite_to = len(img_list)
	error_occured = False
	if getattr(sys, 'frozen', False):
		application_path = os.path.dirname(sys.executable)
	elif __file__:
		application_path = os.path.dirname(__file__)
	for i, img_path in enumerate(img_list):
		try:
			ite_no += 1
			norm_feat = model.extract_feat(img_path)
			img_name = img_path
			feats.append(norm_feat)
			names.append(img_name.encode('utf-8'))
			print("extracting feature from image No. %d , %d images in total" %((i+1), len(img_list)))
			pgbar['value'] = (100 * ite_no) / ite_to
			print(pgbar['value'])
			label_content="{0:0.2f}%".format(pgbar['value'])
			pglabel.configure(text=label_content)
			root.update()
		except Exception as e:
			error_occured = True
			logger_fname = os.path.join(application_path, 'error.log')
			now = datetime.now()
			dt_string = now.strftime("%d/%m/%Y %H:%M:%S")
			with open(logger_fname, "a+", encoding='utf-8') as f:
				f.write('[Error] ' + dt_string + "\n")
				f.write('[Trace Back] ' + str(e) + "\n")
				f.write('[Detail] ' + img_path + "\n")
	feats = np.array(feats)
	print("--------------------------------------------------")
	print("      writing feature extraction results ...")
	print("--------------------------------------------------")

	if error_occured:
		messagebox.showerror(title="提取特征时发生错误", message="提取特征时发生错误,程序跳过了错误文件完成特征提取,请将error.log文件,以及后台截图发给开发者。")

	output = ft_name + "_" + hashlib.md5(fd_name.encode()).hexdigest() + ".h5"
	output = os.path.join(application_path, output)
	h5f = h5py.File(output, 'w')
	h5f.create_dataset('dataset_1', data = feats)
	h5f.create_dataset('dataset_2', data = np.string_(names))
	h5f.close()
	ft_label.configure(text=os.path.abspath(output))

	# Load array from file and update the file path array
	file_io = FileIO()
	if file_io.check_obj("save"):
		pic_dataset_paths = file_io.load_obj("save")
		ft_existed = 0
		for path in pic_dataset_paths:
			if path == fd_name:
				break
			ft_existed += 1
		if ft_existed >= len(pic_dataset_paths):
			pic_dataset_paths.append(fd_name)
		else:
			tmp = pic_dataset_paths[ft_existed]
			pic_dataset_paths[ft_existed] = pic_dataset_paths[-1]
			pic_dataset_paths[-1] = tmp
		print(pic_dataset_paths)
	else:
		pic_dataset_paths = [fd_name]
	file_io.save_obj(pic_dataset_paths,"save")
	fd_entry["values"] = pic_dataset_paths
	fd_entry.current(len(pic_dataset_paths) - 1)