def text_to_image(title): word_list = [] title_roman = romanize(title).lower() title_roman_wo_space = re.sub( r" ", r"_-", re.sub(r"[`;<>~0-9\[\]|:+-/.,!?@#$%^*()\'\"]", r" ", re.sub(r"&", r" and ", title_roman))) NPOINTS = MAX_LENGTH + 1 x = [] y = [] len_title_roman = len(title_roman_wo_space) if len_title_roman < MAX_LENGTH: spare_space = MAX_LENGTH - len_title_roman for a in (str("-") + title_roman_wo_space + str("_" * (spare_space + 2))): try: word_list.append(phone_dict[a]) except: pass for i in range(len(word_list) - 1): x_, y_ = word_list[i], word_list[i + 1] i += 1 x.append(x_) y.append(y_) plt.figure(figsize=(2.24, 2.24)) for i in range(NPOINTS - 1): plt.plot(x[i:i + 2], y[i:i + 2], alpha=float(NPOINTS - 1 - i) / (NPOINTS - 1), linewidth=5 * math.log10(1000. / len_title_roman) * float(NPOINTS - 1 - i) / (NPOINTS - 1), color='black') plt.plot(47, 47, alpha=0.00000000001) plt.axis('off') filename = "image/" + title + ".png" plt.savefig(filename) img = cv2.imread(filename, 0) img = (img * -1 + 255.) / 255. scipy.misc.imsave(filename, img)
from tflearn.layers.core import input_data, dropout, fully_connected from tflearn.layers.estimator import regression import tensorflow as tf from data.text_to_img_2 import save_pair_image from converter.romanize import romanize import re MAX_LENGTH = 20 LR = 0.001 IMG_SIZE = 224 MODEL_NAME = 'trademarks-{}-{}.model'.format(LR, '2conv-basic') title1 = input("Title1 EN limit 20, KR limit 7:") title1_roman = romanize(title1) title1_input = title1_roman title2 = input("Title2 EN limit 20, KR limit 7:") title2_roman = romanize(title2) title2_input = title2_roman save_pair_image(title1_input, title2_input, 0) title1_refine = re.sub(r"[`;&<>~0-9\[\]|:+-/.,!?@#$%^*()\'\"]", r"", title1_input) title2_refine = re.sub(r"[`;&<>~0-9\[\]|:+-/.,!?@#$%^*()\'\"]", r"", title2_input) path = 'image_test/' + str(title1_refine[:MAX_LENGTH]) + "." + str(title2_refine[:MAX_LENGTH]) + \ '.' + str(0) + '.png'
def textpair_to_image(title1, title2): title1_roman = romanize(title1).lower() title2_roman = romanize(title2).lower() title1_roman_wo_space = re.sub( r" ", r"_-", re.sub(r"[`;<>~0-9\[\]|:+-/.,!?@#$%^*()\'\"]", r"", re.sub(r"&", r" and ", title1_roman))) title2_roman_wo_space = re.sub( r" ", r"_-", re.sub(r"[`;<>~0-9\[\]|:+-/.,!?@#$%^*()\'\"]", r"", re.sub(r"&", r" and ", title2_roman))) NPOINTS1 = MAX_LENGTH + 1 NPOINTS2 = MAX_LENGTH + 1 img3 = np.zeros([224, 224], dtype=np.float64) word1_list = [] word2_list = [] x1 = [] y1 = [] x2 = [] y2 = [] len_title1_roman = len(title1_roman_wo_space) len_title2_roman = len(title2_roman_wo_space) if len_title1_roman < MAX_LENGTH and \ len_title2_roman < MAX_LENGTH: spare_space1 = MAX_LENGTH - len_title1_roman spare_space2 = MAX_LENGTH - len_title2_roman for a in (str("-") + title1_roman_wo_space + str("_" * (spare_space1 + 2))): try: word1_list.append(phone_dict[a]) except: pass for a in (str("-") + title2_roman_wo_space + str("_" * (spare_space2 + 2))): try: word2_list.append(phone_dict[a]) except: pass for i in range(len(word1_list) - 1): x_, y_ = word1_list[i], word1_list[i + 1] i += 1 x1.append(x_) y1.append(y_) for i in range(len(word2_list) - 1): x_, y_ = word2_list[i], word2_list[i + 1] i += 1 x2.append(x_) y2.append(y_) plt.figure(figsize=(2.24, 2.24)) for i in range(NPOINTS1 - 1): plt.plot(x1[i:i + 2], y1[i:i + 2], alpha=float(NPOINTS1 - 1 - i) / (NPOINTS1 - 1), linewidth=5 * math.log10(1000. / len_title1_roman) * float(NPOINTS1 - 1 - i) / (NPOINTS1 - 1), color='black') plt.plot(47, 47, alpha=0.00000000001) plt.axis('off') title1_refine = re.sub(r"[`;&<>~0-9\[\]|:+-/.,!?@#$%^*()\'\"]", r"", title1) filename1 = "image/" + str(title1_refine) + ".png" plt.savefig(filename1) img1 = cv2.imread(filename1, 0) img1 = (img1 * -1 + 255.) / 255. scipy.misc.imsave(filename1, img1) # print("img1: ", img1.shape) plt.close() plt.figure(figsize=(2.24, 2.24)) for j in range(NPOINTS2 - 1): plt.plot(x2[j:j + 2], y2[j:j + 2], alpha=float(NPOINTS2 - 1 - j) / (NPOINTS2 - 1), linewidth=5 * math.log10(1000. / len_title2_roman) * float(NPOINTS2 - 1 - j) / (NPOINTS2 - 1), color='black') plt.plot(47, 47, alpha=0.00000000001) plt.axis('off') title2_refine = re.sub(r"[`;&<>~0-9\[\]|:+-/.,!?@#$%^*()\'\"]", r"", title2) filename2 = "image/" + str(title2_refine) + ".png" plt.savefig(filename2) img2 = cv2.imread(filename2, 0) img2 = (img2 * -1 + 255.) / 255. scipy.misc.imsave(filename2, img2) # print("img2: ", img2.shape) plt.close() img = np.dstack((img1, img2, img3)) return img else: pass
def textpair_to_array(title1, title2, similarity=0): word_list1 = [] word_list2 = [] title1_roman = romanize(title1).lower() title1_roman_wo_space = re.sub( r" ", r"_-", re.sub(r"[`;<>~0-9\[\]|:+-/.,!?@#$%^*()\'\"]", r" ", re.sub(r"&", r" and ", title1_roman))) title2_roman = romanize(title2).lower() title2_roman_wo_space = re.sub( r" ", r"_-", re.sub(r"[`;<>~0-9\[\]|:+-/.,!?@#$%^*()\'\"]", r" ", re.sub(r"&", r" and ", title2_roman))) NPOINTS1 = len(title1_roman_wo_space) NPOINTS2 = len(title2_roman_wo_space) x1 = [] y1 = [] x2 = [] y2 = [] for a in title1_roman_wo_space: try: word_list1.append(phone_dict[a]) except Exception as e: print(e) for b in title2_roman_wo_space: try: word_list2.append(phone_dict[b]) except Exception as e: print(e) for i in range(len(word_list1) - 1): x1_, y1_ = word_list1[i], word_list1[i + 1] x1.append(x1_) y1.append(y1_) for j in range(len(word_list2) - 1): x2_, y2_ = word_list2[j], word_list2[j + 1] x2.append(x2_) y2.append(y2_) length_line1 = 0 length_line2 = 0 for i in range(NPOINTS1 - 2): line_piece = np.linalg.norm([x1[i + 1] - x1[i], y1[i + 1] - y1[i]]) length_line1 = length_line1 + line_piece for j in range(NPOINTS2 - 2): line_piece = np.linalg.norm([x2[j + 1] - x2[j], y2[j + 1] - y2[j]]) length_line2 = length_line2 + line_piece # print(length_line1, length_line2) img = np.zeros([96, 96], dtype=np.float64) img1 = np.zeros([96, 96], dtype=np.float64) img2 = np.zeros([96, 96], dtype=np.float64) # img3 = np.zeros([96, 96], dtype=np.float64) for i in range(NPOINTS1 - 2): img1 = cv2.line(img, (x1[i], y1[i]), (x1[i + 1], y1[i + 1]), (255. / (i + 1)), min(10, max(2, int(2000 / length_line1)))) for j in range(NPOINTS2 - 2): img2 = cv2.line(img, (x2[j], y2[j]), (x2[j + 1], y2[j + 1]), (255. / (j + 1)), min(10, max(2, int(2000 / length_line2)))) print(length_line1, length_line2) img1 = img1 / 255. img2 = img2 / 255. img = np.dstack((img1, img2)) # 직접 확인하고 싶을 경우 img3=0 을 포함하여 확인 # img = np.dstack((img1, img2, img3)) # cv2.imshow('image', img) # cv2.waitKey(0) # cv2.destroyAllWindows() return img
import pandas as pd from data.text_to_img import save_pair_image, text_to_image, textpair_to_image from converter.romanize import romanize import re data = pd.read_csv('sample_trademark_data.csv') num_data = len(data) print(num_data) MAX_LENGTH = 20 for i in range(num_data): title1_roman = romanize(data['title1'][i]).lower() title2_roman = romanize(data['title2'][i]).lower() title1_roman_wo_space = re.sub( r" ", r"_-", re.sub(r"[`;<>~\[\]|:+-/.,!?@#$%^*()\'\"]", r"", re.sub(r"&", r" and ", title1_roman))) title2_roman_wo_space = re.sub( r" ", r"_-", re.sub(r"[`;<>~\[\]|:+-/.,!?@#$%^*()\'\"]", r"", re.sub(r"&", r" and ", title2_roman))) if len(title1_roman_wo_space) < MAX_LENGTH and len( title2_roman_wo_space) < MAX_LENGTH: if bool(re.search(r'\d', data['title1'][i])) == False and bool( re.search(r'\d', data['title2'][i])) == False: try: save_pair_image(data['title1'][i], data['title2'][i], data['similarity'][i]) except: