def image_segmentation(path, origin_path, tolerance=14, mode="Manhattan", partition="quad"): #load the necessary information files = os.listdir(origin_path) print files for f in files: print f if not os.path.isdir(f): start = time.time() # read in the next image and process it image = cv.imread(path + "\\" + f) # make the segmentation testViewer = seg.Segmentation(image, tolerance, mode, partition) # save the fractalilzed output subpath = path + "\\processed\\" + fractal_subfolder cv.imwrite(subpath + "\\" + f, testViewer.Quadtree.toImage()) # save the segmentation subpath = path + "\\processed\\" + segmented_subfolder cv.imwrite(subpath + "\\" + f, testViewer.displaySegments()) # save the network graphic subpath = path + "\\processed\\" + network_subfolder testViewer.saveNetwork(subpath + "\\" + f) print time.time() - start del testViewer
def train(path, use_letters, used_classifier, dim): src = cv.imread("data\\" + path) train = src[:, :4 * src.shape[1] // 2] test = src[:, 4 * src.shape[1] // 5:] seg = sg.Segmentation(dim, dim, BIN_THRESHOLD) ext = fe.FeatureExtraction(EXTRACTION_STEP) training_input = preprocess(train, seg, ext, use_letters, dim) test_input = preprocess(test, seg, ext, use_letters, dim) if use_letters: out_layer_size = 26 else: out_layer_size = 10 classifier = cl.Classifier(used_classifier, in_hidden_layer_size=IN_HIDDEN_LAYER_SIZE, out_layer_size=out_layer_size) classifier.train(training_input) result = classifier.predict(test_input) count = 0 for label, predicted in zip(test_input[1], result): if label == int(predicted[0]): count += 1 print(count / len(test_input[1])) classifier.save(use_letters)
def parse_seg(segfile): """Get segmentation info from a segfile and fill Segmentation object. Arguments: segfile - path to a Nuance segmentation file to be parsed Returns: seg - Segmentation object filled with seg info """ # This base name will be used to match segmentations to wav files base = os.path.splitext(os.path.basename(segfile))[0] seg = segmentation.Segmentation(base) try: f = open(segfile, 'r') txt = f.readlines() f.close() except IOError as e: print "I/O error({0}): {1}".format(e.errno, e.strerror) table = False # Parse seg file line by line in a single pass for line in txt: e = line.split() if len(e) > 0: if e[0] == 'frame_rate': seg.set_frame_rate(float(e[2])) # e.g. frame_rate = 100 # adding an utterance updates the pointer to the current utterance elif e[0] == 'utterance_number': seg.add_utterance(e[2]) # e.g. utterance_number = 0 elif e[0] == 'prompt': prompt = line.split('"')[1] seg.curr_utt.set_prompt(prompt) elif e[0] == 'transcription': transcript = line.split('"')[1] seg.curr_utt.set_transcript(transcript) elif e[0] == 'number_of_phonemes': seg.curr_utt.set_nphone(e[2]) elif e[0] == 'number_of_pels': seg.curr_utt.set_npel(e[2]) elif e[0] == 'score': seg.curr_utt.set_score(e[2]) elif e[0] == 'TABLE': table = True elif e[0] == 'ENDTABLE': table = False elif table and e[0] != ';': curr_word = seg.curr_utt.curr_word # If this line is the start of a new word, create new Word # Initialization of a new Word, automatically creates a new # Phone and a new Pel if curr_word is None or curr_word.no != int(e[0]): seg.curr_utt.add_word(e[0], e[1], e[2], e[3], e[4], e[5]) # if this line is the start of a new phoneme, create new Phone # Initialization of a new Phone automatically creates a new Pel elif curr_word.curr_phn.no != int(e[1]): curr_word.add_phone(e[1], e[2], e[3], e[4], e[5]) # Each line of the table represents a new Pel else: curr_word.curr_phn.add_pel(e[3], e[4], e[5]) return seg
def do_GET(self): if self.path == "/": sg = segmentation.Segmentation() sg.segment(filePath='../../data/舔狗日记.txt', label='舔狗') sg.segment(filePath='../../data/心灵鸡汤.txt', label='鸡汤') cf = classification.Classification() cf.train() cf.test() self.send_response(200) self.end_headers() self.wfile.write(b'success') else: self.send_response(404) self.end_headers() self.wfile.write(b'not found')
import segmentation from glob import glob as ls from shutil import move as mv master = segmentation.Segmentation('datasets/dataset-main') print('Loading master\'s cache') master.Load('master-main') total_sessions = len(ls('sessions/*.csv')) for (filenum, session_path) in enumerate(ls('sessions/*.csv')): print('Learning session ' + str(filenum + 1) + '/' + str(total_sessions)) session_file = session_path.split('/')[1] master.LearningSession(session_path) mv(session_path, './sessions/sessions.old') print('Storing training to master\'s cache') master.Store()
import segmentation import tool import volsunga word_flag_freq_dic = tool.get_freq_dic("./data/word_flag_freq_dic.txt") flag_freq_dic = tool.get_freq_dic("./data/flag_freq_dic.txt") flag_relation_freq_dic = tool.get_freq_dic("./data/flag_relation_freq_dic.txt") word_flag_dic = tool.get_flag_dic("./data/chineseDic.txt") v = volsunga.Volsunga(word_flag_freq_dic, flag_freq_dic, flag_relation_freq_dic, word_flag_dic) s = segmentation.Segmentation()
print tri2.simplices.size start = time.time() quad3 = qt.Quadtree(image,16,'Manhattan','shift_center') end = time.time()-start print quad3.nodecount(), end output = quad3.toimage(quad3.RootNode, mode = "smooth") #cv.imshow("Image",output) cv.imwrite("output3.jpg",output) """ testViewer = seg.Segmentation(image, 14, 'Manhattan', 'quad') testPic = testViewer.displaySegments() """#print len(testPics) fs = np.zeros_like(testPic) for pic in testPics: fs += pic #plt.imshow(pic) #plt.show() """ print testPic.shape, testPic plt.imshow(testPic, cmap='nipy_spectral') plt.show()
print(args) scale = lambda inVal, inMin, inMax, outMin, outMax: outMin + ( inVal - inMin) / (inMax - inMin) * (outMax - outMin) images = [ cv2.imread(os.path.join(args.dir, str(index).zfill(3) + '.jpg'), cv2.IMREAD_GRAYSCALE).astype(np.float).reshape(-1) for index in range(args.num) ] if args.seg: changingIndices = segmentation.Segmentation().segment( sklearn.decomposition.PCA(args.var).fit_transform(images), args.lam) changingIndices.insert(0, 0) changingIndices.append(args.num) images = [ np.mean(images[first:last], 0) for first, last in zip(changingIndices[0:], changingIndices[1:]) for _ in range(first, last) ] masks = [ np.load(os.path.join(fileDir, 'masks', str(index).zfill(3) + '.npy')) for index in range(111) ]