def image_styletransfer(): if request.method == 'POST': if request.form.get('filename') is None: # 接收图片并保存 im = request.files['image'] filename = save_file(im) im2 = request.files['style_image'] filename2 = save_file(im2) style_name = im2.filename.split('.')[0] print(style_name) if filename is None: return render_template('error.html', msg='不支持的文件格式!') else: filename = request.form.get('filename') # 滤波 res_name = img_style_transfer(filename, filename2) # 返回结果 return redirect( url_for('filter_bp.filter_result', mode=style_name, back='style_transfer', filename=filename, res_name=res_name, flag='style_transfer')) else: return render_template('filter/style_transfer.html')
def create_triples(relations, house_objects, visualgenome_path): ''' Create RDF triples of the relations in two formats: labels and URIs ''' house_object_uris = set(house_objects.values()) house_object_names = set(house_objects.keys()) lemmadb = map_lemmadb(lowercase=True) relations_with_uris = [] triple_labels = [] triple_uris = [] for object_name, category, attribute in relations: if object_name in lemmadb and attribute in lemmadb: # objects and attributes must to have URIs object_uri = create_uri(lemmadb[object_name]) object_name = object_name.split('.')[0] if object_uri in house_object_uris or object_name in house_object_names: relations_with_uris.append((object_name, category, attribute)) triple_labels.append( '<%s> <%s> <%s>' % (object_name, category, attribute.split('.')[0])) category_uri = 'http://ns.inria.fr/deko/ontology/deko.owl#' + category triple_uris.append( '<%s> <%s> <%s>' % (object_uri, category_uri, create_uri(lemmadb[attribute]))) calculate_statistics(relations_with_uris, visualgenome_path) save_file(join(visualgenome_path, 'selected_triples.nt'), triple_uris) save_file(join(visualgenome_path, 'selected_triples_label.nt'), triple_labels) logging.info('Total valid relations with URIs: %s' % len(relations_with_uris))
def calculate_statistics(object_relations, concepnet_parsed_path): ''' Calculate statistics of the dataset ''' relation_counter = {} object_counter = set() for object1, relation, object2 in object_relations: if relation not in relation_counter: relation_counter[relation] = 0 relation_counter[relation] += 1 object_counter.add(object1) object_counter.add(object2) table = PrettyTable() table.field_names = ['Item', 'Amount'] table.align['Item'] = 'l' table.align['Amount'] = 'r' table.add_row(['Objects', len(object_counter)]) table.add_row( ['Relations (object-object)', sum(relation_counter.values())]) for relation in relation_counter.keys(): table.add_row(['Relation "%s"' % relation, relation_counter[relation]]) print(table) save_file(join(concepnet_parsed_path, 'statistics_dataset.txt'), [table.get_string()])
def calculate_statistics(object_relations, visualgenome_path): ''' Calculate statistics of the dataset ''' category_counter = {} object_counter = set() attribute_counter = [] for object, category, attributte in object_relations: if category not in category_counter: category_counter[category] = 0 category_counter[category] += 1 object_counter.add(object) attribute_counter.append(attributte) table = PrettyTable() table.field_names = ['Item', 'Amount'] table.align['Item'] = 'l' table.align['Amount'] = 'r' total_objects = len(object_counter) table.add_row(['Objects', total_objects]) table.add_row(['Attributes', len(set(attribute_counter))]) table.add_row(['Relations (object-attribute)', len(object_relations)]) total_objects = float(total_objects) for category in sorted(category_counter.keys()): table.add_row( ['Relations "%s"' % category, category_counter[category]]) table.add_row([ 'Avg. attributes by object', round(len(attribute_counter) / total_objects, 2) ]) for category in sorted(category_counter.keys()): table.add_row([ 'Avg. relations "%s" by object' % category, round(category_counter[category] / total_objects, 2) ]) print(table) save_file(join(visualgenome_path, 'statistics_dataset.txt'), [table.get_string()]) table.clear_rows() table.field_names = ['Attribute', 'Frequency'] for attribute, frequency in sorted(Counter(attribute_counter).items(), key=lambda x: x[1], reverse=True): table.add_row([attribute, frequency]) save_file(join(visualgenome_path, 'statistics_attributes.txt'), [table.get_string()])
def init_train(self, train_path, data_path="store/train_data.pkl"): """初始化数据""" try: print("开始载入初始化数据....", file=sys.stderr) train_data = load_file(data_path) except BaseException: train_data = dict() for line in process_data.read_file(filename=train_path): arr = line.split('\t') train_data.setdefault(int(arr[0]), set()) train_data[int(arr[0])].add(int(arr[1])) save_file(data_path, train_data) return train_data
def train(self, sim_matrix_path="store/buy_user_sim.pkl"): print("开始训练模型", file=sys.stderr) try: print("开始载入用户协同矩阵....", file=sys.stderr) self.user_sim_matrix = load_file(sim_matrix_path) print("载入协同过滤矩阵完成", file=sys.stderr) except BaseException: print("载入用户协同过滤矩阵失败,重新计算协同过滤矩阵", file=sys.stderr) # 计算用户协同矩阵 self.user_sim_matrix = self.user_similarity() print("开始保存协同过滤矩阵", file=sys.stderr) save_file(sim_matrix_path, self.user_sim_matrix) print("保存协同过滤矩阵完成", file=sys.stderr)
def image_filter(): if request.method == 'POST': if request.form.get('filename') is None: # 接收图片并保存 im = request.files['image'] filename = save_file(im) if filename is None: return render_template('error.html', msg='不支持的文件格式!') else: filename = request.form.get('filename') # 获取滤波方式 mode = request.form.get('mode', None) # 滤波 res_name, olddata = img_filter(request.form, filename, mode=mode) # 返回结果 print(filename) return redirect( url_for('filter_bp.filter_result', mode=mode, back='filter', filename=filename, res_name=res_name, flag='filter', olddata=olddata)) else: return render_template('filter/filter.html')
def image_enhance(): if request.method == 'POST': if request.form.get('filename') is None: im = request.files['image'] # 保存图像 filename = save_file(im) if filename is None: return render_template('error.html', msg='不支持的文件格式!') else: filename = request.form.get('filename') enhance_type = request.form.get('enhance', None) factor_select = request.form.get('factor-select', None) factor_custom = request.form.get('factor-custom', None) # 设置增强因子 if factor_custom != '': factor = float(factor_custom) else: factor = float(factor_select) # 图像增强 res_name = img_enhance(filename, enhance_type, factor) return redirect(url_for('filter_bp.filter_result', mode=enhance_type, back='enhance', filename=filename, res_name=res_name, flag='enhance', olddata=factor)) else: return render_template('filter/enhance.html')
def image_kernel(): if request.method == 'POST': if request.form.get('filename') is None: # 保存图像 im = request.files.get('image', None) # 保存图像 filename = save_file(im) if filename is None: return render_template('error.html', msg='不支持的文件格式!') else: filename = request.form.get('filename') # 获取卷积核信息 size = request.form.get('size', None) kernel_size, kernel = get_kernel(size) scale = request.form.get('scale', None) offset = request.form.get('offset', None) if kernel is None: return redirect(url_for('filter_bp.image_kernel')) # 处理图像 res_name = img_kernel(filename, kernel_size, kernel, scale, offset) olddata = '#'.join(str(e) for e in kernel) + '#' + scale + '#' + offset return redirect(url_for('filter_bp.filter_result', mode='Custom Kernel', back='kernel', filename=filename, res_name=res_name, olddata=olddata, flag='kernel-' + size)) else: return render_template('filter/kernel.html')
def train(self, origin_data, sim_matrix_path="store/user_sim.pkl"): """训练模型 @param origin_data: 原始数据 @param sim_matrix_path: 协同矩阵保存的路径 """ self.origin_data = origin_data # 初始化训练集 self._init_train(origin_data) print("开始训练模型", file=sys.stderr) try: print("开始载入用户协同矩阵....", file=sys.stderr) self.user_sim_matrix = load_file(sim_matrix_path) print("载入协同过滤矩阵完成", file=sys.stderr) except BaseException: print("载入用户协同过滤矩阵失败,重新计算协同过滤矩阵", file=sys.stderr) # 计算用户协同矩阵 self.user_sim_matrix = self.user_similarity() print("开始保存协同过滤矩阵", file=sys.stderr) save_file(sim_matrix_path, self.user_sim_matrix) print("保存协同过滤矩阵完成", file=sys.stderr)
def water_mask_img(): if request.method == 'POST': mask_type = request.form.get('type') file1 = request.form.get('file1') file2 = request.form.get("file2") if file1 is None: # 获取原始图像数据 im1 = request.files['image1'] file1 = save_file(im1) olddata = '' if mask_type == 'logo': if file2 is None: # Logo水印 logo = request.files['image2'] file2 = save_file(logo) res_filename = water_mask(file1, file2) else: # 获取文字水印内容 text = request.form.get('text') color = request.form.get('color') font_size = request.form.get('font_size') x = request.form.get('x') y = request.form.get('y') res_filename, olddata = text_mask(file1, text, color, font_size, (x, y)) args = { 'back': 'tools_bp.water_mask_img', 'res_filename': res_filename, 'file1': file1, 'file2': file2, 'time': time(), 'olddata': olddata } return redirect(url_for('tools_bp.water_mask_result', **args)) else: return render_template('tools/water_mask.html')
def calculate_statistics(object_relations, output_path): ''' Calculate statistics of the dataset ''' ftype_counter = set() felement_counter = set() ffiller_counter = set() for triple in object_relations: ffiller, felement, ftype = re.match('<(.+)> <(.+)> <(.+)>', triple).groups() ftype_counter.add(ftype) felement_counter.add(felement) ffiller_counter.add(ffiller) table = PrettyTable() table.field_names = ['Item', 'Amount'] table.align['Item'] = 'l' table.align['Amount'] = 'r' table.add_row(['Frame types', len(ftype_counter)]) table.add_row(['Frame elements', len(felement_counter)]) table.add_row(['Frame elements fillers', len(ffiller_counter)]) table.add_row(['Relations', len(object_relations)]) print(table) save_file(join(output_path,'statistics_dataset.txt'), [table.get_string()])
def blend_img(): if request.method == "POST": file1 = request.form.get('file1', None) file2 = request.form.get('file2', None) if file1 is None or file2 is None: # 获取图像并保存 im1 = request.files['image1'] im2 = request.files['image2'] file1 = save_file(im1) file2 = save_file(im2) # 获取alpha alpha = request.form['alpha'] res_filename = overlay(file1, file2, float(alpha)) args = { 'back': 'tools_bp.blend_img', 'file1': file1, 'file2': file2, 'res_filename': res_filename, 'time': time(), 'olddata': alpha } return redirect(url_for('tools_bp.blend_result', **args)) else: return render_template('tools/blend.html')
def select_relations(conceptnet_raw_path, concepnet_parsed_path, house_objects_path): # ''' Select some relations from Conceptnet JSON files ''' validated_relations = validate_relations(conceptnet_raw_path) wn31db = map_wn31db() relations_with_uris = [] objects_with_uris = { v.replace(' ', '_'): k['dbpedia_uri'] for v, k in load_json(house_objects_path).items() } triple_labels = [] triple_uris = [] for object1, relation, object2 in validated_relations: if object2 not in objects_with_uris: objects_with_uris[object2] = to_dbpedia( get_uri('/c/en/' + object2, 10), wn31db) for object1, relation, object2 in validated_relations: if objects_with_uris[ object2]: # object2 must to have an URI (by default object1 has an URI) relation_uri = 'http://ns.inria.fr/deko/ontology/deko.owl#' + relation relations_with_uris.append((objects_with_uris[object1], relation, objects_with_uris[object2])) triple_uris.append('<%s> <%s> <%s>' % (objects_with_uris[object1], relation_uri, objects_with_uris[object2])) triple_labels.append('<%s> <%s> <%s>' % (object1, relation, object2)) calculate_statistics(relations_with_uris, concepnet_parsed_path) save_file(join(concepnet_parsed_path, 'selected_triples.nt'), triple_uris) save_file(join(concepnet_parsed_path, 'selected_triples_label.nt'), triple_labels) logging.info('Total valid relations with URIs: %s' % len(relations_with_uris))
def resize_img(): if request.method == 'POST': filename = request.form.get('filename') if filename is None: img = request.files['image'] filename = save_file(img) prop = float(request.form.get('proportion')) if prop < 0.1: raise Exception('比例系数不合法!') res_filename = resize(filename, proportion=prop) args = { 'back': 'tools_bp.resize_img', 'filename': filename, 'res_filename': res_filename, 'olddata': prop } return redirect(url_for('tools_bp.tools_result', **args)) else: return render_template('tools/resize.html')
def mosaic_img(): if request.method == 'POST': # 获取文件名 filename = request.form.get('filename') if filename is None: im = request.files['image'] filename = save_file(im) # 获取参数 startx = request.form.get('startx') starty = request.form.get('starty') endx = request.form.get('endx') endy = request.form.get('endy') granularity = int(request.form.get('g')) res_filename, olddata = mosaic(filename, [startx, starty], [endx, endy], granularity) args = { 'back': 'tools_bp.mosaic_img', 'filename': filename, 'res_filename': res_filename, 'olddata': olddata } return redirect(url_for('tools_bp.tools_result', **args)) else: return render_template('tools/mosaic.html')
def select_relations(frame_parsed_path, house_objects_path): ''' Select unique relations of frames about house's object ''' house_objects = {v.replace(' ', '_'):k['dbpedia_uri'] for v,k in load_json(house_objects_path).items()} house_object_uris = set(house_objects.values()) house_object_names = set(house_objects.keys()) frame_instances = load_json(join(frame_parsed_path, 'frame_instances.json')) netlemma = map_netlemma() wn31db = map_wn31db() triple_uris = [] triple_labels = [] for frame_id in frame_instances.keys(): valid_frame = False frame = frame_instances[frame_id] frame_uris, frame_labels = create_triples(frame['type'], frame['elements'], wn31db, netlemma) for i in range(len(frame_uris)): object_uri = re.match('<(.+)> <(.+)> <(.+)>', frame_uris[i]).group(1) if object_uri in house_object_uris and frame_uris[i] not in triple_uris: triple_uris.append(frame_uris[i]) triple_labels.append(frame_labels[i]) valid_frame = True else: object_name = re.match('<(.+)> <(.+)> <(.+)>', frame_labels[i]).group(1) if object_name in house_object_names and frame_uris[i] not in triple_uris: triple_uris.append(frame_uris[i]) triple_labels.append(frame_labels[i]) valid_frame = True if not valid_frame: del frame_instances[frame_id] calculate_statistics(triple_uris, frame_parsed_path) save_file(join(frame_parsed_path, 'selected_triples.nt'), triple_uris) save_file(join(frame_parsed_path, 'selected_triples_label.nt'), triple_labels) save_file(join(frame_parsed_path, 'selected_verbalized.txt'), [verbalize_frame(f['type'], f['elements'].items(), netlemma) for f in frame_instances.values()]) logging.info('Total valid relations with URIs: %s' % len(triple_uris))
def user_similarity(self): """建立用户的协同过滤矩阵""" user_list = self.train_data.keys() for user in user_list: items = self.train_data.get(user) return # 建立用户倒排表 item_user = dict() item_user_path = "store/item_user.pkl" try: print("开始载入用户倒排表....", file=sys.stderr) item_user = load_file(item_user_path) except BaseException: for user, items in self.train_data.items(): print('user:'******'item:' + str(item)) for u in users: N[u] += 1 save_file(N_path, N) try: print("开始载入用户协同过滤矩阵....", file=sys.stderr) user_sim_matrix = load_file(user_sim_matrix_path) except BaseException: i = 1 print(len(item_user)) for item, users in item_user.items(): print(i) i += 1 # print('item:' + str(item)) for u in users: for v in users: if u == v: continue # user_sim_matrix.setdefault(u, defaultdict(int)) # user_sim_matrix[u][v] += 1 save_file(user_sim_matrix_path, user_sim_matrix) # print(N) # # 计算相关度 # for u, related_users in user_sim_matrix.items(): # for v, con_items_count in related_users.items(): # user_sim_matrix[u][v] = con_items_count / math.sqrt(N[u] * N[v]) return user_sim_matrix