def extractWords(wordfile): lines, _ = wordio.read(wordfile) assert pth.exists('../crops/') word_array = [] for line_idx, line in enumerate(lines): for word_idx, word in enumerate(line): temp_word = "" for char_idx, char in enumerate(word.characters): if (re.match("^[a-zA-Z]", char.text)): char.text = char.text.decode('utf-8', 'ignore').encode("utf-8").lower() temp_word = temp_word + char.text word_array.append(temp_word) return word_array
def extractImages(wordfile, imgfile, l_file, monogram, bigram, trigram): print wordfile lines, _ = wordio.read(wordfile) img = Image.open(imgfile) #img = pamImage.PamImage(imgfile) #line_iter = iter(lines) #cur_line = line_iter.next() #word_iter = iter(cur_line) assert pth.exists('../crops/') for line_idx, line in enumerate(lines): for word_idx, word in enumerate(line): previous = "" second_previous = "" for idx, char in enumerate(word.characters): char.text = char.text.decode('utf-8', 'ignore').encode("utf-8").lower() if(re.match("^[a-zA-Z]",char.text)): if char.text in monogram: monogram[char.text] += 1 else: monogram[char.text] = 1 if idx>0 and previous!="": if char.text+previous in bigram: bigram[char.text+previous] +=1 else: bigram[char.text+previous] = 1 if idx>1 and second_previous!="": if char.text+previous+second_previous in trigram: trigram[char.text + previous + second_previous] += 1 else: trigram[char.text+previous+second_previous] = 1 else: previous = char.text second_previous = previous else: previous = char.text else: previous = "" second_previous = "" return monogram,bigram,trigram
def extractImages(wordfile, imgfile, l_file): print wordfile lines, _ = wordio.read(wordfile) img = Image.open(imgfile) #img = pamImage.PamImage(imgfile) #line_iter = iter(lines) #cur_line = line_iter.next() #word_iter = iter(cur_line) assert pth.exists('../crops/') out_str = '../crops/' + pth.basename(imgfile) out_str = out_str.replace(IM_EXT, '') for line_idx, line in enumerate(lines): for word_idx, word in enumerate(line): cropped = img.crop( (word.left, word.top, word.right, word.bottom) ) # croplib.crop(img, word.left, word.top, word.right, word.bottom) out_path = out_str + '_l' + str(line_idx) + '_w' + str( word_idx) + '_t_' + word.text + IM_EXT cropped.save(out_path.replace('/crops', '/crops/words')) for idx, char in enumerate(word.characters): cropped = img.crop( (char.left, char.top, char.right, char.bottom) ) # croplib.crop(img, word.left, word.top, word.right, word.bottom) if 0 in cropped.size: continue #print cropped.size otsu_img = img_as_ubyte( np.copy(np.asarray(cropped.convert('L')))) try: threshold_global_otsu = threshold_otsu(otsu_img) except TypeError: print 'Something weird happened' continue global_otsu = otsu_img >= threshold_global_otsu l_otsu_im = 1 - global_otsu index_row = np.tile( np.arange(otsu_img.shape[0]).reshape(otsu_img.shape[0], 1), (1, otsu_img.shape[1])) index_col = np.tile(np.arange(otsu_img.shape[1]), (otsu_img.shape[0], 1)) non0 = l_otsu_im.nonzero() Wrow = np.multiply(l_otsu_im[non0], index_row[non0]) Wcol = np.multiply(l_otsu_im[non0], index_col[non0]) Mrow = np.mean(Wrow) Mcol = np.mean(Wcol) Std_row = np.std(Wrow) Std_col = np.std(Wcol) top = max(0, int(Mrow - 3 * Std_row)) bottom = min(cropped.size[1], int(Mrow + 3 * Std_row)) left = max(0, int(Mcol - 3 * Std_col)) right = min(cropped.size[0], int(Mcol + 3 * Std_col)) sub_cropped = cropped.crop((left, top, right, bottom)) char_string = char.text #.lower() #.lower() #if char.text is not '\\' else 'bb' if char_string not in 'abcdefghijklmnopqrstuvwxyz&#*\\ABCDEFGHJIKLMNOPQRSTUVWXYZ' or char_string == '': continue out_path = out_str + '_l' + str(line_idx) + '_w' + str( word_idx) + '_c' + str(idx) + '_t_' + char_string + IM_EXT try: sub_cropped.save( out_path.replace('/crops', '/crops/letters')) except SystemError: print 'bad annotation...' if char_string in word_log: word_log[char_string] += 1 else: word_log[char_string] = 1 #if char_string in label_dict: #l_file.write(out_path + ' ' + str(ord(char_string) - ord('a')) + '\n') l_file.write(out_path + ' ' + str(label_mapping.index(char_string)) + ' \n')
def extractImages(wordfile, imgfile): print wordfile lines, _ = wordio.read(wordfile) img = Image.open(imgfile) #img = pamImage.PamImage(imgfile # line_iter = iter(lines) # cur_line = line_iter.next() # word_iter = iter(cur_line) assert pth.exists('../crops/') out_str = '../crops/' + pth.basename(imgfile) out_str = out_str.replace(IM_EXT, '') train_y = [] for line_idx, line in enumerate(lines): for word_idx, word in enumerate(line): cropped = img.crop( (word.left, word.top, word.right, word.bottom) ) # croplib.crop(img, word.left, word.top, word.right, word.bottom) dict_name = out_str + '_l' + str(line_idx) + '_w' + str( word_idx) + '_t_' + word.text out_path = out_str + '_l' + str(line_idx) + '_w' + str( word_idx) + '_t_' + word.text + IM_EXT word_array = np.zeros((word.right - word.left)) gaussian_array = {} cropped.save(out_path) for idx, char in enumerate(word.characters): # if(char.left - word.left >= 0):e if (char.left - word.left < 0): word_array[0] = 1 elif ((char.left - word.left) >= (word.right - word.left)): word_array[(char.left - word.left) - 1] = 1 else: word_array[(char.left - word.left)] = 1 word_array[(word.right - word.left) - 1] = 1 gaussian_array[dict_name] = gaussian_filter1d(word_array, 0.5) # pyplot.plot(gaussian_array) # pyplot.show() train_y.append(gaussian_array) """ if 0 in cropped.size: continue # print cropped.size otsu_img = img_as_ubyte(np.copy(np.asarray(cropped.convert('L')))) try: threshold_global_otsu = threshold_otsu(otsu_img) except TypeError: print 'Something weird happened' continue global_otsu = otsu_img >= threshold_global_otsu l_otsu_im = 1 - global_otsu index_row = np.tile(np.arange(otsu_img.shape[0]).reshape(otsu_img.shape[0], 1), (1, otsu_img.shape[1])) index_col = np.tile(np.arange(otsu_img.shape[1]), (otsu_img.shape[0], 1)) non0 = l_otsu_im.nonzero() Wrow = np.multiply(l_otsu_im[non0], index_row[non0]) Wcol = np.multiply(l_otsu_im[non0], index_col[non0]) Mrow = np.mean(Wrow) Mcol = np.mean(Wcol) Std_row = np.std(Wrow) Std_col = np.std(Wcol) top = max(0, int(Mrow - 3 * Std_row)) bottom = min(cropped.size[1], int(Mrow + 3 * Std_row)) left = max(0, int(Mcol - 3 * Std_col)) right = min(cropped.size[0], int(Mcol + 3 * Std_col)) sub_cropped = cropped.crop((left, top, right, bottom)) out_path = out_str + '_l' + str(line_idx) + '_w' + str(word_idx) + '_c' + str( idx) + '_t_' + char.text + IM_EXT try: sub_cropped.save(out_path) except SystemError: print 'bad annotation...' if char.text in word_log: word_log[char.text.lower()] += 1 else: word_log[char.text.lower()] = 1 if char.text.lower() in label_dict: l_file.wri te(out_str + ' ' + str(label_dict[char.text.lower()] - 1) + '\n') """ return train_y
def __init__(self, word_file, img_file, out_file): self.out_file = out_file win = gtk.Window(gtk.WINDOW_TOPLEVEL) win.set_size_request(500, 250) win.connect('destroy', lambda w: gtk.main_quit()) vbox = gtk.VBox() self.new_lines = [[]] # store the annotated words + chars here self.lines, _ = wordio.read(word_file) self.img = pamImage.PamImage(img_file) # Keep track of where we are in the file self.line_iter = iter(self.lines) self.cur_line = self.line_iter.next() self.word_iter = iter(self.cur_line) self.pb = None self.word = None self.cropped = None # Cursor and annotation-points (x-coordinates) self.current_x = 0 self.points = [] # Drawing area to draw the lines on self.drawing_area = gtk.DrawingArea() self.drawing_area.connect("expose-event", self.on_expose) self.drawing_area.connect("motion_notify_event", self.on_motion) self.drawing_area.connect("button_press_event", self.on_mouse_button_press) self.drawing_area.set_events(gtk.gdk.EXPOSURE_MASK | gtk.gdk.LEAVE_NOTIFY_MASK | gtk.gdk.BUTTON_PRESS_MASK | gtk.gdk.POINTER_MOTION_MASK | gtk.gdk.POINTER_MOTION_HINT_MASK) vbox.pack_start(self.drawing_area) hbox = gtk.HBox() vbox.pack_start(hbox) # Label row label = gtk.Label() label.set_text("Characters labeled above:") hbox.pack_start(label) self.entry = gtk.Entry() hbox.pack_start(self.entry) # control button row self.reset_button = gtk.Button("Reset") self.next_button = gtk.Button("Next") self.reset_button.connect("clicked", self.on_reset) self.next_button.connect("clicked", self.on_next) hbox = gtk.HBox() hbox.pack_start(self.reset_button) hbox.pack_start(self.next_button) vbox.pack_start(hbox) self.next_word() win.add(vbox) win.show_all()
def process_file(wordfile, imgfile): n_words = matches = 0 lines, _ = wordio.read(wordfile) img = Image.open(imgfile) for line_idx, line in enumerate(lines): for word_idx, word in enumerate(line): n_words += 1 cropped = img.crop((word.left, word.top, word.right, word.bottom)) color = np.copy(np.asarray(cropped)) monograms, start, end = get_monograms_otsu(color, cropped) if monograms is None: continue for mg in monograms: if model == 'VGG': im = 255 - mg.im[:, :, ::-1] im[:, :, 0] -= 103.939 im[:, :, 1] -= 116.779 im[:, :, 2] -= 123.68 im = im.transpose((2, 0, 1)) convnet_output = conv_model.predict(im.reshape((1, im.shape[0], im.shape[1], im.shape[2]))) features = np.concatenate( (np.max(convnet_output[:, :, :convnet_output.shape[2] / 2, :], axis=2), np.max(convnet_output[:, :, convnet_output.shape[2] / 2:, :], axis=2)), axis=1).transpose((0, 2, 1)) feature_seq, _ = pad_sequences([features], maxlen=31, dim=1024) lstm_output = lstm_model.predict_classes([feature_seq, feature_seq], batch_size=1, verbose=0) letter = chr(lstm_output[0] + ord('a')) # print letter mg.set_prediction(letter) else: width, height = 30, 55 im = np.asarray(255 - mg.im, dtype=np.float32) im /= 255. im = scipy.misc.imresize(im, (height, im.shape[1], im.shape[2])) # if im.shape[1] <= width : # im = np.pad(im, ((0,0), (0,width-im.shape[1]), (0,0)), mode='constant', constant_values=0) # else : im = scipy.misc.imresize(im, (im.shape[0], width, im.shape[2])) # plt.imshow(im) # plt.show() im = np.asarray(im, dtype=np.float32) # im = im[:, :, :] - 126 im /= 255. im = im.transpose((2, 0, 1)) conv_output = conv_model.predict_classes(im.reshape((1, im.shape[0], im.shape[1], im.shape[2])), batch_size=1, verbose=0) conv_confidences = conv_model.predict(im.reshape((1, im.shape[0], im.shape[1], im.shape[2])), batch_size=1, verbose=0) letter = label_mapping[conv_output[0]] #chr(conv_output[0] + ord('a')) mg.set_prediction(letter) mg.set_confidence(conv_confidences[0][conv_output[0]]) if len(monograms) < 50: # So that it isn't that slow/freezes threshold = 0.1 convident_idcs = conv_confidences > threshold #charlist = label_mapping #(convident_idcs * range(26))[convident_idcs] + ord('a') char_idxs = (convident_idcs * range(len(label_mapping)))[convident_idcs] print char_idxs chars = np.asarray(label_mapping)[char_idxs] #np.asarray(label_mapping)[convident_idcs]#[chr(char) for char in charlist] confs = (conv_confidences)[convident_idcs] options = zip(chars, confs) mg.set_options(options) else: mg.set_options([(letter, mg.get_confidence())]) N_grams = monograms words = [] word_prediction = '' print 'Target: ', word.text, def build_words(wrd, N_grams, start, end): if wrd is None: for g in N_grams: if g.start == start: build_words(g, N_grams, start, end) return if wrd.end == end: words.append(wrd) return for idx, g in enumerate(N_grams): if wrd.followed_by(g): build_words(wrd.combine(g), N_grams[idx:], start, end) def build_words2(wrd, N_grams, start, end): if wrd is None: for g in N_grams: if g.start == start: for char, conf in g.get_options(): cpy = N_gram(g.im, g.start, g.end, char, conf) build_words2(cpy, N_grams, start, end) return if wrd.end == end: words.append(wrd) return for idx, g in enumerate(N_grams): if wrd.followed_by(g): for char, conf in g.get_options(): cpy = N_gram(g.im, g.start, g.end, char, conf) build_words2(wrd.combine(cpy), N_grams[idx:], start, end) build_words2(None, N_grams, start, end) words = sorted(words, key=lambda w: w.get_confidence())[::-1] # for w in words[:10]: # print w.prediction, w.get_confidence() if len(words) == 0: print '' continue word_strings = [wrd.prediction for wrd in words] for wrd in word_strings: if wrd in vocabulary: word_prediction = wrd break if word_prediction == '': words = [] def build_words(wrd, N_grams, start, end): if wrd is None: for g in N_grams: if g.start == start: build_words(g, N_grams, start, end) return if wrd.end == end: words.append(wrd) return for idx, g in enumerate(N_grams): if wrd.followed_by(g): build_words(wrd.combine(g), N_grams[idx:], start, end) build_words(None, N_grams, start, end) words = sorted(words, key=lambda w: w.get_confidence())[::-1] word_strings = [wrd.prediction for wrd in words[:10]] word_exists = checkWordInNgrams(word_strings, ngram_voc) if len(word_exists) == 1: word_prediction = word_exists[0] else: lev = calculateDistance(word_strings, vocabulary) # print type(lev) lev = sorted(lev.items(), key=operator.itemgetter(1)) counter = 0 # for k, v in lev[:10]: # print "word:" + k + " with score:" + str(v) if lev == {}: if len(word_exists) >= 1: word_prediction = word_exists[0] else: word_prediction = words[0].prediction else: closest = [] minDist = min([l[1] for l in lev]) for k, v in lev: if v == minDist: closest.append(k) if len(closest) == 1: word_prediction = closest[0] else: maxConf = 0 final_prediction = '' for c in closest: for w in words: if w.prediction == c and maxConf < w.get_confidence(): maxConf = w.get_confidence() final_prediction = w.prediction if final_prediction == '': if len(word_exists) >= 1: word_prediction = word_exists[0] else: word_prediction = words[0].prediction else: llev = calculateDistance([final_prediction], vocabulary) llev = sorted(llev.items(), key=operator.itemgetter(1)) # print 'dictieeeee', llev[0][0] word_prediction = llev[0][0] print ', prediction: ', word_prediction matches += 1 if word.text.replace(';',"").replace('~','').replace('valde_','').replace('Ottonenem', '').replace('ducem', '').replace('@','').replace('^o', '').replace('@ampersand', '&').replace(']', '') == word_prediction else 0 return matches, n_words
def extractImages(wordfile, imgfile, l_file): print wordfile lines, _ = wordio.read(wordfile) img = Image.open(imgfile) #img = pamImage.PamImage(imgfile) #line_iter = iter(lines) #cur_line = line_iter.next() #word_iter = iter(cur_line) assert pth.exists('../crops/') out_str = '../crops/' + pth.basename(imgfile) out_str = out_str.replace(IM_EXT, '') for line_idx, line in enumerate(lines): for word_idx, word in enumerate(line): cropped = img.crop((word.left, word.top, word.right, word.bottom)) # croplib.crop(img, word.left, word.top, word.right, word.bottom) out_path = out_str + '_l' + str(line_idx) + '_w' + str(word_idx) + '_t_' + word.text + IM_EXT cropped.save(out_path.replace('/crops', '/crops/words')) for idx, char in enumerate(word.characters): cropped = img.crop((char.left, char.top, char.right, char.bottom)) # croplib.crop(img, word.left, word.top, word.right, word.bottom) if 0 in cropped.size: continue #print cropped.size otsu_img = img_as_ubyte(np.copy(np.asarray(cropped.convert('L')))) try: threshold_global_otsu = threshold_otsu(otsu_img) except TypeError: print 'Something weird happened' continue global_otsu = otsu_img >= threshold_global_otsu l_otsu_im = 1 - global_otsu index_row = np.tile(np.arange(otsu_img.shape[0]).reshape(otsu_img.shape[0], 1), (1, otsu_img.shape[1])) index_col = np.tile(np.arange(otsu_img.shape[1]), (otsu_img.shape[0], 1)) non0 = l_otsu_im.nonzero() Wrow = np.multiply(l_otsu_im[non0], index_row[non0]) Wcol = np.multiply(l_otsu_im[non0], index_col[non0]) Mrow = np.mean(Wrow) Mcol = np.mean(Wcol) Std_row = np.std(Wrow) Std_col = np.std(Wcol) top = max(0, int(Mrow - 3 * Std_row)) bottom = min(cropped.size[1], int(Mrow + 3 * Std_row)) left = max(0, int(Mcol - 3 * Std_col)) right = min(cropped.size[0], int(Mcol + 3 * Std_col)) sub_cropped = cropped.crop((left, top, right, bottom)) char_string = char.text #.lower() #.lower() #if char.text is not '\\' else 'bb' if char_string not in 'abcdefghijklmnopqrstuvwxyz&#*\\ABCDEFGHJIKLMNOPQRSTUVWXYZ' or char_string == '': continue out_path = out_str + '_l' + str(line_idx) + '_w' + str(word_idx) + '_c' + str(idx) + '_t_' + char_string + IM_EXT try: sub_cropped.save(out_path.replace('/crops', '/crops/letters')) except SystemError: print 'bad annotation...' if char_string in word_log: word_log[char_string] += 1 else: word_log[char_string] = 1 #if char_string in label_dict: #l_file.write(out_path + ' ' + str(ord(char_string) - ord('a')) + '\n') l_file.write(out_path + ' ' + str(label_mapping.index(char_string)) + ' \n')
def extractImages(wordfile, imgfile): print wordfile lines, _ = wordio.read(wordfile) img = Image.open(imgfile) #img = pamImage.PamImage(imgfile # line_iter = iter(lines) # cur_line = line_iter.next() # word_iter = iter(cur_line) assert pth.exists('../crops/') out_str = '../crops/' + pth.basename(imgfile) out_str = out_str.replace(IM_EXT, '') train_y = [] for line_idx, line in enumerate(lines): for word_idx, word in enumerate(line): cropped = img.crop((word.left, word.top, word.right, word.bottom)) # croplib.crop(img, word.left, word.top, word.right, word.bottom) dict_name = out_str +'_l' + str(line_idx) + '_w' + str(word_idx) + '_t_' + word.text out_path = out_str + '_l' + str(line_idx) + '_w' + str(word_idx) + '_t_' + word.text + IM_EXT word_array = np.zeros((word.right - word.left)) gaussian_array = {} cropped.save(out_path) for idx, char in enumerate(word.characters): # if(char.left - word.left >= 0):e if(char.left - word.left < 0): word_array[0] = 1 elif((char.left-word.left)>=(word.right-word.left)): word_array[(char.left - word.left)-1] = 1 else: word_array[(char.left - word.left)] = 1 word_array[(word.right-word.left)-1] = 1 gaussian_array[dict_name] = gaussian_filter1d(word_array,0.5) # pyplot.plot(gaussian_array) # pyplot.show() train_y.append(gaussian_array) """ if 0 in cropped.size: continue # print cropped.size otsu_img = img_as_ubyte(np.copy(np.asarray(cropped.convert('L')))) try: threshold_global_otsu = threshold_otsu(otsu_img) except TypeError: print 'Something weird happened' continue global_otsu = otsu_img >= threshold_global_otsu l_otsu_im = 1 - global_otsu index_row = np.tile(np.arange(otsu_img.shape[0]).reshape(otsu_img.shape[0], 1), (1, otsu_img.shape[1])) index_col = np.tile(np.arange(otsu_img.shape[1]), (otsu_img.shape[0], 1)) non0 = l_otsu_im.nonzero() Wrow = np.multiply(l_otsu_im[non0], index_row[non0]) Wcol = np.multiply(l_otsu_im[non0], index_col[non0]) Mrow = np.mean(Wrow) Mcol = np.mean(Wcol) Std_row = np.std(Wrow) Std_col = np.std(Wcol) top = max(0, int(Mrow - 3 * Std_row)) bottom = min(cropped.size[1], int(Mrow + 3 * Std_row)) left = max(0, int(Mcol - 3 * Std_col)) right = min(cropped.size[0], int(Mcol + 3 * Std_col)) sub_cropped = cropped.crop((left, top, right, bottom)) out_path = out_str + '_l' + str(line_idx) + '_w' + str(word_idx) + '_c' + str( idx) + '_t_' + char.text + IM_EXT try: sub_cropped.save(out_path) except SystemError: print 'bad annotation...' if char.text in word_log: word_log[char.text.lower()] += 1 else: word_log[char.text.lower()] = 1 if char.text.lower() in label_dict: l_file.wri te(out_str + ' ' + str(label_dict[char.text.lower()] - 1) + '\n') """ return train_y
def show_hists(wordfile, imgfile): lines, _ = wordio.read(wordfile) img = Image.open(imgfile) for line_idx, line in enumerate(lines): for word_idx, word in enumerate(line): cropped = img.crop((word.left, word.top, word.right, word.bottom)) color = np.copy(np.asarray(cropped)) print color.shape #color = augment(color) hist = np.mean(color, axis=(0,2)) hist = np.maximum(np.mean(color[color.shape[0]/2:], axis=(0, 2)), hist) hist = np.maximum(np.mean(color[:color.shape[0] / 2], axis=(0, 2)), hist) hist = np.maximum(np.mean(color[color.shape[0]/4:-color.shape[0]/4], axis=(0, 2)), hist) hist = np.maximum(np.mean(color[color.shape[0]/8:-3*color.shape[0]/8], axis=(0, 2)), hist) hist = np.maximum(np.mean(color[3*color.shape[0]/8:-color.shape[0]/8], axis=(0, 2)), hist) print hist.shape otsu_img = img_as_ubyte(np.copy(np.asarray(cropped.convert('L')))) try: threshold_global_otsu = threshold_otsu(otsu_img) except TypeError: print 'Something weird happened' continue global_otsu = np.array(otsu_img >= threshold_global_otsu).astype(np.int64) hist2 = np.zeros(global_otsu.shape[1]) for col in range(global_otsu.shape[1]): max_white = 0 white = 0 for row in range(global_otsu.shape[0]): white += 1 if global_otsu[row, col] == 1 else 0 max_white = max(white, max_white) hist2[col] = max_white plt.imshow(color) smooth_win = 11 hist = hist2 #hist_small = np.convolve(hist, 5 * [1 / 5.], 'same') hist = np.convolve(hist, 5 * [1/5.], 'same') / 25 #hist[:smooth_win-1] = hist_small[:smooth_win-1] #hist[-(smooth_win-1):] = hist_small[-(smooth_win-1):] plt.bar(np.arange(color.shape[1]), hist) plt.title(word.text) for idx in range(1, len(hist)-1): if hist[idx] == hist[idx+1] and hist[idx-1] < hist[idx]: hist[idx] = (hist[idx+1] + hist[idx-1]) / 2 maxes = argrelextrema(hist, np.greater) med = np.median(hist) cuts = maxes[0] ''' for idx in range(len(cuts) - 1): #while len(cuts) > idx + 1 and hist[cuts[idx]] < med: # cuts = np.delete(cuts, idx) while len(cuts) > idx + 1 and cuts[idx + 1] - cuts[idx] < 10: cuts[idx] = cuts[idx] if hist[cuts[idx]] > hist[cuts[idx+1]] else cuts[idx] cuts = np.delete(cuts, idx+1) ''' plt.bar(cuts, len(cuts) * [color.shape[0]], color='r') print hist plt.xticks([]) plt.yticks([]) plt.tight_layout() plt.show() monograms = [] for idx, cut in enumerate(cuts): for next in cuts[idx+1:]: print next, cut if 10 < next - cut < 50: monograms.append(Monogram(color[:, cut:next], cut, next))