예제 #1
0
    def open(self):
        """
        Open Igor exported TXT files
        """
        self.parameters['k'] = 1.0
        speed = 0.0

        righe = self.getFile()
        newline = self.newline

        y1 = []
        y2 = []
        x1 = []
        x2 = []

        for rigo in righe:
            r = rigo.strip(newline)
            (ffb, eeb, fff, eef) = r.split()
            if ffb.strip() == 'ffb':
                continue
            if eef.strip() != 'NAN':
                x1.append(float(eef))
                y1.append(float(fff))
            if eeb.strip() != 'NAN':
                x2.append(float(eeb))
                y2.append(float(ffb))

        self.segments.append(segment.segment(x1, y1))
        self.segments.append(segment.segment(x2, y2))

        for p in self.segments:
            p.speed = speed

        return True
예제 #2
0
 def open(self):
     """
     Open Igor exported TXT files
     """
     self.parameters['k'] = 1.0        
     speed = 0.0
     
     righe = self.getFile()
     newline = self.newline
     
     y1=[]
     y2=[]
     x1=[]
     x2=[]
     
     for rigo in righe:
         r = rigo.strip(newline)
         (ffb,eeb,fff,eef)= r.split()
         if ffb.strip()=='ffb':
             continue
         if eef.strip() != 'NAN':
             x1.append(float(eef))
             y1.append(float(fff))
         if eeb.strip() != 'NAN':
             x2.append(float(eeb))            
             y2.append(float(ffb))
     
     
     self.segments.append(segment.segment(x1, y1))
     self.segments.append(segment.segment(x2, y2))
 
     for p in self.segments:
         p.speed = speed
         
     return True
예제 #3
0
def filemode_json(book_json_filepath: str):
    seg = pkuseg()
    with open(book_json_filepath, encoding='utf8') as f:
        book = json.load(f)
    title = book['title']
    author = book['author']
    chapters = book['chapters']

    book_cut = segment(seg, title, DICT_ONLY, DICT_WORDS)
    book_cut.extend(segment(seg, author, DICT_ONLY, DICT_WORDS))
    chapters_output = []
    for chapter in chapters:
        chapter_title = chapter['title']
        chapter_content = chapter['content']
        cut = segment(seg, chapter_title, DICT_ONLY, DICT_WORDS)
        content_cut = segment(seg, chapter_content, DICT_ONLY, DICT_WORDS)
        cut.extend(content_cut)
        chapter_output = {'title': chapter_title, 'cut': cut}
        chapters_output.append(chapter_output)

    print(
        json.dumps({
            'title_cut': book_cut,
            'chapter_cuts': chapters_output
        },
                   indent=4,
                   ensure_ascii=False))
예제 #4
0
	def __init__ (self,num_segs,lens,base_location=[0,0,0],base_angle=0,positions=[[0,0,0]],forward_angle=0,leg_ID=0,beta1=0.8,beta2=2,step_offset=0,z_offset_height=0):
		# lens is a list of leg segment lengths
		self.segments = [sg.segment(0,0,lens[0],0)]
		for i in range(1,num_segs):
			self.segments.append(sg.segment(self.segments[i-1].get_base()[0]+lens[i-1],0,lens[i],0))
		self.base_location = np.array(base_location)
		self.base_angle = base_angle
		# Used for non-linear least-squares
		self.beta1 = beta1
		self.beta2 = beta2

		self.ID = leg_ID

		self.positions = np.array(positions)
		self.forward_angle = 0
		self.step_count = 0
		self.set_forward_angle(forward_angle)

		self.max_step = len(positions) - 1

		self.forward = 1
		step_offset = int(step_offset*self.max_step)
		self.step(force_step=step_offset)
                #self.z_height_offset = z_height_offset

		ct.translate(self.positions,0,0,z_offset_height)
예제 #5
0
파일: _cam.py 프로젝트: lwd8cmd/Mitupead
 def analyze_frame(self):
     # try:
     _, img = self.cam.read()  # get frame
     segment.segment(img, self.fragmented, self.t_ball, self.t_gatey, self.t_gateb)  # update threshold maps
     self.analyze_balls(self.t_ball)
     self.analyze_gate((self.t_gatey if self.gate == 0 else self.t_gateb), self.gate)  # my gate
     if self.gates[self.gate] is None:  # other gate
         self.analyze_gate((self.t_gateb if self.gate == 0 else self.t_gatey), 1 - self.gate)
예제 #6
0
파일: arm.py 프로젝트: m214368/si475-lab-2
 def __init__(self):
     self.ac = ArmController()
     seg = list()
     seg.append(s.segment(0, .012, 0, 0))
     seg.append(s.segment(0, 0, .077, (np.pi / 2)))
     seg.append(s.segment((np.pi / 2) - acos(.128 / .13), .130, 0, 0))
     seg.append(s.segment(acos(.128 / .13) - (np.pi / 2), .124, 0, 0))
     seg.append(s.segment(0, .126, 0, -(np.pi / 2)))
     self.segments = seg
     super(arm1, self).__init__(seg)
예제 #7
0
    def __init__(self, num_segs, lens, beta1_=0.8, beta2_=2):
        # lens is a list of leg segment lengths
        self.segments = [sg.segment(0, 0, lens[0], 0)]
        for i in range(1, num_segs):
            self.segments.append(
                sg.segment(self.segments[i - 1].get_base()[0] + lens[i - 1], 0,
                           lens[i], 0))

        # Used for non-linear least-squares
        self.beta1 = beta1_
        self.beta2 = beta2_
예제 #8
0
파일: _cam.py 프로젝트: lwd8cmd/Mitupead
 def analyze_frame(self):
     #try:
     _, img = self.cam.read()  #get frame
     segment.segment(img, self.fragmented, self.t_ball, self.t_gatey,
                     self.t_gateb)  #update threshold maps
     self.analyze_balls(self.t_ball)
     self.analyze_gate((self.t_gatey if self.gate == 0 else self.t_gateb),
                       self.gate)  #my gate
     if self.gates[self.gate] is None:  #other gate
         self.analyze_gate(
             (self.t_gateb if self.gate == 0 else self.t_gatey),
             1 - self.gate)
예제 #9
0
    def addsegment(self):
        tail = self.body[-1]
        dx, dy = tail.dirnx, tail.dirny

        if dx == 1 and dy == 0:
            self.body.append(segment((tail.pos[0] - 1, tail.pos[1])))
        elif dx == -1 and dy == 0:
            self.body.append(segment((tail.pos[0] + 1, tail.pos[1])))
        elif dx == 0 and dy == 1:
            self.body.append(segment((tail.pos[0], tail.pos[1] - 1)))
        elif dx == 0 and dy == -1:
            self.body.append(segment((tail.pos[0], tail.pos[1] + 1)))

        self.body[-1].dirnx = dx
        self.body[-1].dirny = dy
예제 #10
0
파일: artist.py 프로젝트: ghidra/plot
 def waiting(self):
     if self.waitcount > self.wait:
         self.turtle_last = self.origin()
         self.ready = True
         #return [ self.lift( self.turtle_last ) ]
     self.waitcount += 1
     return [segment(vector3(), vector3())]
예제 #11
0
파일: artist.py 프로젝트: ghidra/plot
 def drop(self, position):
     self.skating = False
     seg = segment(vector3(position.x, position.y, self.skateheight),
                   vector3(position.x, position.y, 0.0),
                   draw=False)
     self.segment_count += 1
     return seg
예제 #12
0
파일: artist.py 프로젝트: ghidra/plot
 def lift(self, position):
     self.skating = True
     seg = segment(vector3(position.x, position.y, 0.0),
                   vector3(position.x, position.y, self.skateheight), True,
                   False)
     self.segment_count += 1
     return seg
예제 #13
0
    def Qsearch(self,query):
        words = seg.segment(query.strip())
        #words = self.segmentor.segment(query.strip())
        #print ' '.join(words)
        vm_env = lucene.getVMEnv()
        vm_env.attachCurrentThread()
        result = QueryParser(Version.LUCENE_CURRENT, "contents",self.analyzer)
        result.setPhraseSlop(0)
        # "\""+' '.join(words)+"\"~0" means words should be continuous
        query = result.parse("\""+' '.join(words)+"\"~0")
        totalHits = self.searcher.search(query, 50)
        #print "%s total matching documents." % totalHits.totalHits
        #return totalHits.totalHits

        for hit in totalHits.scoreDocs:
            #print"Hit Score: ",hit.score, "Hit Doc:",hit.doc, "HitString:",hit.toString()
            doc= self.searcher.doc(hit.doc)
            #print doc.get("name").encode("utf-8")
        #print "----------------------------------------"
        t = Term('contents',' '.join(words))
        #termDocs = ireader.termDocs(t)
        #for tt in termDocs:
        #       print ireader.document(termDocs.docs).getFeildable('neme'),termDocs.freq()
        #print self.reader.totalTermFreq(t)
        return self.reader.totalTermFreq(t)
예제 #14
0
 def reset(self, pos):
     self.head = segment(pos)
     self.body = []
     self.body.append(self.head)
     self.turns = {}
     self.dirnx = 0
     self.dirny = 1
예제 #15
0
def find_best_epoch(min_epoch=1, max_epoch=1000):
    """
    Iterates over epochs min-max, produces graphs for all in terms of accuracy
    against the training set we have
    """
    MODEL_FILENAME = "model.hdf5"
    MODEL_LABELS_FILENAME = "model_labels.dat"
    VALIDATION_FOLDER = "segmenter_output"

    grey = cv2.imread(TEST_PNG)
    if len(grey.shape) > 2:
        grey = cv2.cvtColor(grey, cv2.COLOR_BGR2GRAY)
    image_characters = segment(grey)
    with open(MODEL_LABELS_FILENAME, 'rb') as f:
        lb = pickle.load(f)

    for i in range(min_epoch, max_epoch):
        print("TESTING EPOCH =", i)
        train_network(i, "../neural_network/images")

        # Load the trained neural network
        model = load_model(MODEL_FILENAME)

        accuracy, letter_match = get_accuracy(image_characters, model, lb)
        print("ACCURACY @EPOCH", i, ":", accuracy)
        with open('dataepoch' + str(i), 'w') as f:
            f.write(str(accuracy) + "\n")
            for key in letter_match:
                f.write(str(key) + "\t" + str(letter_match[key]) +"\n")
def train_compeyele(image, \
        conv_segment=False, \
        greedy_thresh=4.6):
    """
    Segments the image using either a greedy approach or a convolutional greedy
    approach, then converts the segmented character to text by using manual cin
    :arg image input image
    :arg conv_segment whether or not to use the convolutional segmentation
         approach
    :arg greedy_thresh value to use for binarization with greedy segmentation
    :return plaintext representation of the image
    """
    # greyscale if able
    grey = image.copy()
    if len(image.shape) > 2:
        grey = cv2.cvtColor(grey, cv2.COLOR_BGR2GRAY)
    if conv_segment:
        image_characters = perform_conv_segmentation(grey)
    else:
        image_characters = segment(grey, greedy_thresh)

    result = ""
    for row in range(image_characters.shape[0]):
        for col in range(image_characters.shape[1]):
            # if you're a space
            if (image_characters[row][col].astype(np.uint8) - 255).sum() == 0:
                result += " "
            else:
                import matplotlib.pyplot as plt
                plt.imshow(image_characters[row][col])
                plt.show()
                char = input("Enter Character>")
                result += char
    return result
예제 #17
0
def generateColorSegment(test_folder):
    current_directory = os.getcwd()
    segmented_folder = "ColorSegmented"
    if (os.path.exists(os.path.join(current_directory, segmented_folder))):
        shutil.rmtree(os.path.join(current_directory, segmented_folder))
        os.makedirs(os.path.join(current_directory, segmented_folder))
    else:
        os.makedirs(os.path.join(current_directory, segmented_folder))

    test_directory = os.path.join(current_directory, test_folder)

    print("\nGenerating color segment .. ")

    if os.path.exists(test_directory):
        for t in os.listdir(test_directory):
            test_path = os.path.join(test_directory, t)
            if not t.startswith('.') and os.path.isfile(test_path):
                print("Color segment: " + str(t))
                img = cv2.imread(test_path)
                color_segmented_list = segment.segment(img, "all")
                save_path = os.path.join(os.getcwd(), "ColorSegmented")
                for cs in color_segmented_list:
                    cv2.imwrite(
                        os.path.join(
                            save_path,
                            os.path.splitext(t)[0] + "_" + cs[0] + ".png"),
                        cs[1][0])
                    cv2.imwrite(
                        os.path.join(
                            save_path,
                            os.path.splitext(t)[0] + "_" + cs[0] + "_mask" +
                            ".png"), cs[1][1])
    else:
        print("Input folder not found. ")
        exit()
def load_demo_images(paths):
    paths = [os.path.join(paths, x) for x in os.listdir(paths)]
    img_h = cfg.CONST.IMG_H
    img_w = cfg.CONST.IMG_W

    model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
    """
    imgs = []
    for path in paths:
        img = Image.open(path)
        #img = segment(path)
        #img = transforms.ToPILImage()(img)
        img = img.resize((img_h, img_w), Image.ANTIALIAS)
        img = preprocess_img(img, train=False)
        imgs.append([np.array(img).transpose( \
                        (2, 0, 1)).astype(np.float32)])
    """

    imgs = segment(paths, model)
    result = []
    for img in imgs:
        img = transforms.ToPILImage()(img)
        img = img.resize((img_h, img_w), Image.ANTIALIAS)
        img = preprocess_img(img, train=False)
        result.append([np.array(img).transpose( \
                        (2, 0, 1)).astype(np.float32)])

    ims_np = np.array(result).astype(np.float32)

    return torch.from_numpy(ims_np)
예제 #19
0
 def classify(self, q):
     #text = jieba.cut_for_search(q)
     text = segment(q)
     text = " ".join(text)
     classifier = self.model
     res = classifier.predict_proba([text], k=self.topK)
     return res
def generate_restaurant_eating_csv_using_daily_segment(data_type: int):
    if data_type == variable.daily_data:
        file_name = 'personicle-1498515264813-' + date + '-export'

        # daily data
        segment_path = '/Users/hyungiko/Desktop/Personicle Data/json/daily/' + userName + '/' + file_name + '.json'
        lifelog_path = '/Users/hyungiko/Desktop/Personicle Data/json/daily/' + userName + '/lifelog/' + file_name + '.json'

        extract_eating = extractEating('', lifelog_path, segment_path, date,
                                       data_type)
        filtered_event = extract_eating.get_filtered_event(
            extract_eating.get_event('eating'))

        get_segment = segment('', '', '')
        path = '/Users/hyungiko/Desktop/Personicle Data/eating/segment/' + userName + '_segment_res_' + date + '.csv'

        get_segment.get_segment_res(filtered_event, path)

        # extract_eating.write_data(filtered_event, userName)

    elif data_type == variable.whole_data:
        # whole data
        whole_path = '/Users/hyungiko/Desktop/Personicle Data/json/whole/' + fileName + '.json'

        extract_eating = extractEating(whole_path, '', '', '', data_type)
        filtered_event = extract_eating.get_filtered_event(
            extract_eating.get_event('eating'))
        extract_eating.write_data(filtered_event, userName)
예제 #21
0
def anneal(text, segs, iterations, cooling_rate, lex_trie=None):
    """
    simulate annealing on :text: by randomly segmenting it

    :param text: the text to be segmented
    :param segs: the initial segmentation
    :param iterations: the number of iterations per cooling cycle
    :param cooling_rate: the rate at which to cool
    :return:
    """
    def __eval(text, segs, lexicon):
        return evaluate_lexicon(
            text, guess) if not lexicon else evaluate_specificity(
                text, guess, lexicon)

    temperature = float(len(segs))
    while temperature > 0.5:
        best_segs, best = segs, evaluate_lexicon(text, segs)
        for i in range(iterations):
            guess = flip_n(segs, int(round(temperature)))
            score = __eval(text, segs, lex_trie)
            if score < best:
                best, best_segs = score, guess
        score, segs = best, best_segs
        temperature = temperature / cooling_rate
        print(__eval(text, segs, lex_trie), segment.segment(text, segs))
    return segs
예제 #22
0
    def render(self):
        self.segment = []

        segment_insert_index = 0  #if we have multiple assets, we need to insert skates at the right index
        oldTotal = 0
        numpyTotal = 0
        segTotal = 0

        for asset in self.assets:

            if not self.useNumpy:
                newttm = matrix4()
                newttm = newttm.multiply(asset["rnm"]).multiply(self.rnm)
                newttm = newttm.multiply(self.rm)

                points = []
                for p in asset["points"]:
                    point = vector4(p.x, p.y, p.z, 1.0)
                    point = point.mult_matrix4(newttm)

                    ux = point.x / point.w
                    uy = point.y / point.w
                    uz = point.z / point.w  #z is kind of useless in this case

                    points.append(vector3(ux, uy, 0.0))

                #NOW PREPARE THE SEGMENTS
                for seg in asset["segments"]:
                    for i in range(len(seg) - 1):
                        self.segment.append(
                            segment(points[seg[i]], points[seg[
                                i +
                                1]]))  # print( points[seg[i]].printable() )
                        self.segment_count += 1

                    #now for each curve, do the lifting and skating and dropping
                    self.segment.append(
                        self.lift(self.segment[len(self.segment) - 1].p2)
                    )  #lift the pen from the last position of the last segment
                    self.skate_to(
                        self.segment[segment_insert_index].p1,
                        index=segment_insert_index
                    )  #insert the lifing skating and droping to the first position of the curve
                    #set data for next loop
                    self.turtle_last = vector2(
                        self.segment[len(self.segment) - 1].p2.x,
                        self.segment[len(self.segment) - 1].p2.y)
                    segment_insert_index = len(self.segment)

            else:
                #numpy version
                newttm_n = np.identity(4)
                newttm_n = newttm_n * asset["rnm_n"]
                newttm_n = newttm_n * self.rnm_n
                newttm_n = newttm_n * self.rm_n

                points_n = asset["numpy_points"].dot(newttm_n)
                points_n = points_n[:, :2] / points_n[:, [
                    3
                ]]  #if I want z, add a 3, ie [:,:3] to first delete
예제 #23
0
def filemode(filepath: str):
    seg = pkuseg()
    with open(filepath) as f:
        input_str = f.read()
    segmented = segment(seg, input_str, DICT_ONLY, DICT_WORDS)
    for word in segmented:
        print(word)
예제 #24
0
파일: main.py 프로젝트: weiyudaixing/repair
def load_masks(h, w):
    masks = None
    if args.masks is not None:
        # load masks
        masks = torch.load(args.masks)
        if style_mask.shape[1] != h or style_mask.shape[2] != w:
            print(
                "Style mask shape is not compatible with desired image size ({}, {})"
                .format(h, w))
            masks = None
        if content_mask.shape[1] != h or content_mask.shape[2] != w:
            print(
                "Content mask shape is not compatible with desired image size ({}, {})"
                .format(h, w))
            masks = None

    if masks is None:
        # create masks
        from segment import segment
        masks = segment(args, h, w)

    style_mask = masks["tar"]
    content_mask = masks["in"]
    style_mask = style_mask.to(device).unsqueeze(1)
    content_mask = content_mask.to(device).unsqueeze(1)
    return style_mask, content_mask
예제 #25
0
    def open(self):
        """
        Open internal Igor Text File ITX
        """
        self.parameters['k'] = 1.0
        speed = 0.0

        righe = self.getFile()
        newline = self.newline

        y1 = []
        y2 = []
        x1 = []
        x2 = []

        speed = 0.0
        del righe[0:3]
        for rigo in righe:
            r = rigo.strip(newline)
            if r.strip() == 'END':
                break
            (ffb, eeb, fff, eef) = r.split()
            if ffb.strip() == 'ffb':
                continue
            if eef.strip() != 'NAN':
                x1.append(float(eef))
                y1.append(float(fff))
            if eeb.strip() != 'NAN':
                x2.append(float(eeb))
                y2.append(float(ffb))

        self.segments.append(segment.segment(x1, y1))
        self.segments.append(segment.segment(x2, y2))

        r = righe[-1].strip(newline)
        r = r[r.find('"') + 1:-1]
        sl = r.split(';')
        for var in sl:
            nm, val = var.split('=')
            if nm.strip() == 'SC(pN/nm)':
                self.parameters['k'] = float(val)
            if nm.strip() == 'PullingRate(nm/s)':
                speed = float(val) / 1.0e9

        for p in self.segments:
            p.speed = speed
        return True
예제 #26
0
 def open(self):
     """
     Open internal Igor Text File ITX
     """
     self.parameters['k'] = 1.0        
     speed = 0.0
     
     righe = self.getFile()
     newline = self.newline
     
     y1=[]
     y2=[]
     x1=[]
     x2=[]
     
     speed = 0.0
     del righe[0:3]
     for rigo in righe:
         r = rigo.strip(newline)
         if r.strip() =='END':
             break
         (ffb,eeb,fff,eef)= r.split()
         if ffb.strip()=='ffb':
             continue
         if eef.strip() != 'NAN':
             x1.append(float(eef))
             y1.append(float(fff))
         if eeb.strip() != 'NAN':
             x2.append(float(eeb))            
             y2.append(float(ffb))
 
     self.segments.append(segment.segment(x1, y1))
     self.segments.append(segment.segment(x2, y2))
     
     r = righe[-1].strip(newline)
     r = r[r.find('"')+1:-1]
     sl = r.split(';')
     for var in sl:
         nm,val = var.split('=')
         if nm.strip() =='SC(pN/nm)':
             self.parameters['k'] = float(val)
         if nm.strip() == 'PullingRate(nm/s)':
             speed = float(val)/1.0e9
     
     for p in self.segments:
         p.speed = speed
     return True
예제 #27
0
    def merge_segments(self, filename, current_segment, overlapping_segment):
        start = min([current_segment.start, overlapping_segment.start])
        end = max([current_segment.end, overlapping_segment.end])
        axis = ''.join(
            sorted(set(current_segment.axis + overlapping_segment.axis)))
        new_segment = sgmnt.segment(start, end, axis, filename)

        return new_segment
def generate_non_restaurant_eating_csv_using_daily_segment():
    # date = '1020_2018'
    fileName = 'personicle-1498515264813-' + date + '-export'
    # userName2 = userName + '_' + date
    data_path = '/Users/hyungiko/Desktop/Personicle Data/json/daily/' + userName + '/' + fileName + '.json'

    get_segment = segment(data_path, userName + '_' + date, date)
    get_segment.get_segment()
예제 #29
0
파일: artist.py 프로젝트: ghidra/plot
 def advect(self, v):
     newpos = self.turtle + v
     self.segment = [
         segment(vector3(self.turtle.x, self.turtle.y, 0.0),
                 vector3(newpos.x, newpos.y, 0.0))
     ]
     self.segment_count += 1
     self.turtle = newpos
예제 #30
0
def finish(SNDADDR):
    global Socket
    global nextseq

    Socket.sendto(segment.segment(seq_num=nextseq + 2, fin=1).seg, SNDADDR)
    senderLog.writelines("snd  %2.3f F %8d %3d %8d\n" % (time.time() % 1*10, nextseq+2, 0, nextack))
    while True:
        inf, outf, errf = select([Socket, ], [], [], 0)
        if inf:
            data,SNDADDR = Socket.recvfrom(1024)
            seg = tr_seg(data)
            if seg.FIN == 1 and seg.ACK == 1:
                senderLog.writelines("rcv  %2.3f FA%8d %3d %8d\n" % (time.time() % 1*10, seg.seq_num, 0, seg.ack_num))
                Socket.sendto(segment.segment(seq_num=nextseq + 3, ack=1).seg, SNDADDR);
                senderLog.writelines("snd  %2.3f A %8d %3d %8d\n" % (time.time() % 1*10, seg.ack_num, 0, seg.seq_num+1))
                Socket.close()
                break
예제 #31
0
파일: tests.py 프로젝트: hpsoar/searchZhihu
 def test_segment(self):
     string = '写代码是一种怎样的体验?'
     words = segment(string)
     self.assertTrue('写' in words)
     self.assertTrue('代码' in words)
     self.assertTrue('是' not in words)
     self.assertTrue('一种' in words)
     self.assertTrue('怎样' not in words)
     self.assertTrue('体验' in words)
예제 #32
0
def encode(data):
    seg_str = data.decode("UTF-8")
    self = segment.segment(syn=int(seg_str[0]),
                           fin=int(seg_str[1]),
                           ack=int(seg_str[2]),
                           seq_num=int(seg_str[3:11]),
                           ack_num=int(seg_str[11:19]),
                           data=seg_str[19:])
    return self
예제 #33
0
def evaluate(dir_idx, song_idx, error_radius=5):
    true_bounds, types, dir_path, label_path, audio_path = parse_ttl(
        dir_idx, song_idx)
    pred_bounds = segment(audio_path, feature_method='stft', display=True)
    pred_bounds = np.floor(pred_bounds)
    print true_bounds, pred_bounds
    correct_num, total_num, correct_types, incorrect_types = bounds_check(
        pred_bounds, true_bounds, types, error_radius=error_radius)
    return correct_num, total_num, correct_types, incorrect_types
예제 #34
0
def main():
    if len(sys.argv) < 4:
        print('Usage: python segment.py <image> <plaintext> <output folder>')
        sys.exit()

    img = cv2.imread(sys.argv[1], 0)

    if img is None:
        print('Invalid image path!')
        print('Usage: python segment.py <input>')
        sys.exit()

    segments = segment(img, 100)
    # print(segments)

    counts = {}

    with open(sys.argv[2]) as f:
        for row, line in enumerate(f):
            for col, letter in enumerate(line):
                if letter == '\n' or letter == '\r' or letter == '\t':
                    continue
                label = letter
                # print(str(row) + " " + str(col))
                if letter in inv_map:
                    label = inv_map[letter]
                elif letter.isupper():
                    label = label + "_upper"
                # print(label)    
                
                save_path = sys.argv[3]
                
                # make folder
                if not os.path.exists(save_path):	
                    os.makedirs(save_path)
                
                save_path = os.path.join(save_path, label)
                
                # make folder
                if not os.path.exists(save_path):	
                    os.makedirs(save_path)
                
                count = counts.get(label, 1)
                
                if count > 20:
                    # we have too much of the character, skip it
                    continue
                
                try:
                    save_path = os.path.join(save_path, label + (".png" * count))	
                    # print(save_path)
                    cv2.imwrite(save_path, segments[row][col])
                except:
                    pass
                
                counts[label] = count + 1
예제 #35
0
def extract_segment_content(route_number, segment_dictionary_list):
    segment_list = [
        segment(route_number, seg['id'],
                (seg['name'] or "").encode('utf-8').replace(",", ""), index,
                seg['distance'], seg['elev_difference'], seg['start_distance'],
                seg['end_distance'], seg['ratio'],
                seg['newly_created_segment'], seg['avg_grade'])
        for index, seg in enumerate(segment_dictionary_list)
    ]
    return segment_list
def length_histogram(image):
    """Create histogram of cell lengths."""
    segmentation, angle = segment(image)
    lengths = get_lengths(segmentation)
    plt.hist(lengths)
    plt.xlabel("Major axis cell length (pixels)", fontsize=16)
    plt.ylabel("Frequency", fontsize=16)
    plt.xticks(fontsize=16)
    plt.yticks(fontsize=16)
    plt.savefig("length_histogram.png")
예제 #37
0
파일: tokenizer.py 프로젝트: iitis/dnsclass
def do_segment(word):
	ret = []

	if P.tok.seg > 0 and len(word) > P.tok.seg:
		for token in segment(word):
			if len(token) > 1:
				ret.append(token)
	elif len(word) > 0:
		ret.append(word)

	return ret
예제 #38
0
def get_result(fn):
    im = Image.open(fn).convert("RGB")
    im = preprocess(im)
    regs = segment.segment(im)
    clr = ml.get_clr()
    data = []
    for _, reg in enumerate(regs):
        imgData = util.getArrayData(reg)
        data.append(imgData)
    res = clr.predict(data).tostring()
    return res
예제 #39
0
 def index_one_link(self,url,name):
     words = segment(name)
     for w in words:
         if self.link_index_dict.has_key(w):
             dct = self.link_index_dict[w]
             if dct.has_key(url):
                 self.link_index_dict[w][url] += 1
             else:
                 self.link_index_dict[w][url] = 1
         else:
             self.link_index_dict[w] = {}
             self.link_index_dict[w][url] = 1
     self.client.hset(TABLE_URL_TITLE_LINK,url,name)
예제 #40
0
 def index_title(self,url,title):
     #print url,title
     words = segment(title)
     for w in words:
         if self.title_index_dict.has_key(w):
             dct = self.title_index_dict[w]
             if dct.has_key(url):
                 self.title_index_dict[w][url] += 1
             else:
                 self.title_index_dict[w][url] = 1
         else:
             self.title_index_dict[w] = {}
             self.title_index_dict[w][url] = 1
     self.client.hset(TABLE_URL_TITLE_TITLE,url,title)
예제 #41
0
    def ltp_model(self,term,pron):
        ### ltp segmentor model. Judge pron
        seg_list = seg.segment(term)
        #print ' '.join(seg_list)
        # find all segment pron
        pron_find_str = ''
        for word in seg_list:
            word_res = self.db_query(word)
            for each in word_res:
                pron_find_str += each[self.DICTSOURCE]+'\t'
        print 'ltp',pron_find_str,

        if pron in pron_find_str:
            print 1,
            return 1
        print 0,
        return 0
예제 #42
0
def scores(samples, outDir=None):
    data = []
    label = []
    for idx, (fn, lb) in enumerate(samples):
        print "start processing:", idx
        im = Image.open(fn).convert("RGB")
        im = preprocess(im)
        regs = segment.segment(im)
        f = fn.split(os.sep)[-1]
        for ix, reg in enumerate(regs):
            # reg.save("./imgs/liepin/out/"+f[:-4]+"_"+str(ix)+f[5+ix]+".bmp")
            imgData = util.getArrayData(reg)
            data.append(imgData)
            label.append(lb[ix])
            # print lb[ix],f[5+ix]
    clr = ml.get_clr()
    clr.train(data, label)
예제 #43
0
파일: truth.py 프로젝트: dimart/app
def compute_truth(path=inpath):
  for imname in os.listdir(path):
    if (imname[0] == '.'): continue

    img      = io.imread(inpath + '/' + imname)
    segments = segment(img)
    segnum   = len(np.unique(segments))

    print imname, segnum
    compute_pixelMap(segments, segnum)
    assign_label(img, segnum)

    # print pixelMap
    # io.imshow(mark_boundaries(img, segments))
    # io.show()

    update_truth(imname, segments, segnum)
def highlight_plot(input_file, ouput_file, plot_id):
    """Highlight a particular plot in a field image"""
    image = Image.from_file(input_file)

    # Debug speed up.
#   image = image[0:500, 0:500]  # Quicker run time for debugging purposes.

    name, ext = os.path.splitext(input_file)

    plots = segment(image)

    ann = get_grayscale_ann(image)
    ann = color_in_plots(ann, image, plots)
    ann = outline_plots(ann, image, plots)
    ann = red_outline(ann, plots, plot_id)

    with open(ouput_file, "wb") as fh:
        fh.write(ann.png())
예제 #45
0
    def ltp_model(self,term,pos,pron):
        ### ltp segmentor model. Judge pron
        #words = self.segmentor.segment(term.encode('utf-8'))
        words = seg.segment(term)
        # Position and start pos
        start = 0
        for i in range(0,len(words)):
            #print i,words[i]
            if start + len(words[i])/3 >= pos:
                break
            else:
                start += len(words[i])/3
        # word seg: i  querypos: wordpos
        wordpos = pos-start
        if len(words)==0:
            return 0

        return self.dict_model(words[i],wordpos,pron)
예제 #46
0
    def process_query(self):

        """Return (at most) five most distinct keywords in the query."""
        keywords = []
        for words in self.query:
            keywords += segment(words)

        main_keywords = []
        # Digits and English words are distinctive in Zhihu search
        for keyword in keywords[:]:
            if keyword[0] in digits_and_letters:
                main_keywords.append(keyword)
                keywords.remove(keyword)
        # Sort keywords by length
        keywords.sort(key=len, reverse=True)
        main_keywords.sort(key=len, reverse=True)
        # Extract at most five keywords with descending importance
        main_keywords += keywords[:5]
        return main_keywords[:5]
def analyse_file(fpath, output_directory, csv_fhandle):
    """Analyse a single file."""
    logging.info("Analysing file: {}".format(fpath))
    image = Image.from_file(fpath)

    # Debug speed up.
#   image = image[0:500, 0:500]  # Quicker run time for debugging purposes.

    fname = os.path.basename(fpath)
    name, ext = os.path.splitext(fname)

    plots = segment(image)  # 26s
    plots = filter_sides(plots)  # +7s
    plots = filter_touching_border(plots)  # +6s

    # print('time to stop')
    # sys.exit(0)

    # Experimenting...
    # import grid
    # from jicbioimage.core.util.color import pretty_color_from_identifier
    # ydim, xdim = plots.shape
    # columns = grid.grid(plots)
    # ann = get_grayscale_ann(image)
    # for i, c in enumerate(columns):
    #     color = pretty_color_from_identifier(i)
    #     for j, r in enumerate(c):
    #         ann.text_at("{},{}".format(i, j), r.centroid,
    #                     color=color, size=60, center=True)
    #     for i in range(3):
    #         ann.draw_line((0, c.x_mean - i), (ydim-1, c.x_mean - i), color)
    #         ann.draw_line((0, c.x_mean + i), (ydim-1, c.x_mean + i), color)

    ann = get_grayscale_ann(image)  # + 2 min 20s / now +2s
    ann = color_in_plots(ann, image, plots)  # +4s
    ann = outline_plots(ann, image, plots)  # +10s
    ann = overlay_text(ann, image, plots, name)  # +11s

    ann_fpath = os.path.join(output_directory, name + ".png")
    with open(ann_fpath, "wb") as fh:
        fh.write(ann.png())
예제 #48
0
    def fp_growth(self, filepath, minsup):
        import csv
        import unicodecsv
        from fp_growth import find_frequent_itemsets

        formattedpath = filepath + '.format.csv'
        with open(formattedpath, 'wb') as outputfile:
            writer = unicodecsv.writer(outputfile, delimiter='\t', encoding='utf-8')
        
            segmentor = segment()
            with open(filepath) as inputfile:
                for transaction in csv.reader(inputfile, delimiter='\t'):

                    assert len(transaction) == 1, "Invalid"
                    writer.writerow(segmentor.char_segment(transaction[0]))

        finalresult = {}
        with open(formattedpath) as inputfile:
            for itemset, support in find_frequent_itemsets(csv.reader(inputfile, delimiter='\t'), minsup, True):
                finalresult[', '.join(itemset)] = support
        return finalresult
예제 #49
0
파일: task_common.py 프로젝트: ybbaigo/tea
def jieba_seg(filepath):

    cnt = Counter()
    cnt['line'] = 0

    segmentor = segment()

    with open(filepath + '.seg', 'wb') as outputfile:
        writer = unicodecsv.writer(outputfile, delimiter='\t', encoding='utf-8')

        with open(filepath) as inputfile:

            cnt['line'] += 1
            logging.info('line count')
                
            for transaction in csv.reader(inputfile, delimiter='\t'):
                assert len(transaction) == 1, "\n%s" % (str(transaction))

                cleanedstr = string_process.remove_characters(unicode(transaction[0], 'utf-8'))
                segmentres = segmentor.jieba_segment(cleanedstr)
                cleanedres = string_process.remove_invalid_string(segmentres)
                writer.writerow(cleanedres)
def wheat_variety_analysis(microscopy_collection, output_dir):
    """Analyse all series in microscopy collection."""

    csv_fpath = os.path.join(output_dir, "cell_lengths.csv")
    with open(csv_fpath, "w") as csv_fh:
        write_csv_header(csv_fh)

        for s in microscopy_collection.series:
            print("Analysing series {}".format(s))

            # Write the CSV file.
            image = get_image(microscopy_collection, s)
            segmentation, angle = segment(image)

            for l in get_lengths(segmentation):
                write_csv_row(s, l, csv_fh)

            # Create annotated image.
            image = rotate(image, angle)
            annotation = annotate_segmentation(image, segmentation)

            im_fpath = os.path.join(output_dir, "series_{:03d}.png".format(s))
            with open(im_fpath, "wb") as im_fh:
                im_fh.write(annotation.png())
예제 #51
0
#!/usr/bin/python

import sys
import segment
import pyPdf
import cv2

def get_pngs(source):
    (path, fn) = os.path.split(source)
    (name, ext) = os.path.splitext(fn)
    if ext.lower() == 'pdf':
        reader = pyPdf.PdfFileReader(open("foo.pdf"))
        pages = reader.getNumPages()



if len(sys.argv) > 1:
    source = sys.argv[1]
else:
    print("Please give a filename to convert")
    exit(-1);


systems = segment.segment(source)

for (i, system) in enumerate(systems):
    for (j, bar) in enumerate(system['bar_images']):
        fn = "bar_%03d_%03d.png" % (i, j)
        print "writing " + fn
        cv2.imwrite(fn, bar['image'])
예제 #52
0
import segment
import os

video_root_dir = '/data3/gesture/ConGD_files/ConGD_phase_1/valid'
with open('con_list/valid.txt') as input:
    valid_list = input.readlines()
output = open('con_list/valid_segmented.list', 'w')

for line in valid_list:
    video_path = os.path.join(video_root_dir, line[:-1] + '.M.avi')
    try:
        ret = segment.segment(video_path, L=92, threshold=50, tail_length=8, play=False, plot=False)
    except:
        ret = False
    if ret:
        s = '%s' % line[:-1]
        for fragment in ret:
            s += ' %d,%d:0' % fragment
        print s
        output.write(s + '\n')
    else:
        s = '%s %d error' % (line[:-1], 0)
        print s
        raise Exception
예제 #53
0
파일: leave_one_out.py 프로젝트: masahi/mos
import numpy as np
import cPickle
from conf import *
from register import register_all
from build_pa import build_pa
from segment import segment
from jaccard import compute_jaccard
from intensity_model import learn_forest
import time

with open("atlas_list") as f:
    atlas = cPickle.load(f)

n_atlas = len(atlas)
jac = np.zeros((n_atlas, n_labels))

for i,a in enumerate(atlas):
    t = time.time()
    print "Segmenting " + a
    register_all(a, atlas)
    build_pa(a, atlas)
    learn_forest(a, atlas)
    segment(a, atlas)
    jac[i] = compute_jaccard(a)
    print jac[i]
    

np.save("jaccard_score.npy", jac)
mean_jac = np.mean(jac, axis=0)
print mean_jac
예제 #54
0
파일: task_lqs.py 프로젝트: ybbaigo/tea
    jieba.add_word('有什么')
    jieba.add_word('用什么')
    jieba.add_word('看什么')
    jieba.add_word('玩什么')
    jieba.add_word('爱什么')
    jieba.add_word('干什么')
    jieba.add_word('整什么')
    jieba.add_word('为什么')

    jieba.add_word('什么事')

#    unittest()

    results = []

    segmentor = segment()
#    for questions, answers in convert_to_qa('/Users/yy/code/ruyi-data/data/longquan/data-xianer-qa.raw.txt'):
#
#        results.append((convert_to_str(questions[0]) + '\tRAWQ\t' + questions[0]).encode('utf-8'))
#        for question in questions[1:]:
#            results.append((convert_to_str(question) + '\tVARQ\t' + question).encode('utf-8'))
##        for answer in answers:
##            results.append((convert_to_str(answer) + '\tRAWA\t' + answer).encode('utf-8'))
#
#
#    for questions in utility.read_file('../data/log-query.csv', '\t'):
#        assert len(questions) == 1
#
#        unistr = unicode(questions[0], "utf-8")
#        results.append((convert_to_str(unistr) + '\tLOGQ\t' + unistr).encode('utf-8'))
예제 #55
0
def search(location1Id, location2Id, departureDate, departureTime,
        arrivalDate, arrivalTime):
    resultFound = True
    resultNum = 0

    trips = []

    while(resultFound):
        r = requests.get("http://www.yathra.se/finder.php?" + \
                "avgnr=" + str(resultNum) + "&" +  \
                "from=" + location1Id + "&" + \
                "to=" + location2Id + "&" + \
                "departureDate=" + departureDate + "&" + \
                "departureTime=" + departureTime + "&" + \
                "arrivalDate=" + arrivalDate + "&" + \
                "arrivalTime=" + arrivalTime)
        r.encoding = 'ISO-8859-1'

        #f = codecs.open('result.json','w','utf-8')
        #f.write(json.dumps(r.json(), sort_keys=True, indent=2))
        #f.close()

        data = json.loads(r.text)


        if (data.keys() != [u'error']):
            for i in range (len(data['timetableresult']['ttitem'])):

                trip = ttitem.ttitem()
                trip.totalPrice = float(data['timetableresult']['ttitem'][i]['price'])
                trip.sellerName = data['timetableresult']['ttitem'][i]['sellername']
                trip.totalTravelTime = data['timetableresult']['ttitem'][i]['traveltimetotal']
                trip.URL = data['timetableresult']['ttitem'][i]['url']
                trip.segments = []

                for j in range (len(data['timetableresult']['ttitem'][i]['segment'])):
                    currentSegment = segment.segment()
                    currentSegment.arrivalTime = \
                        time.strptime(data['timetableresult']['ttitem'][i]['segment'][j]['arrival']['datetime'], \
                            "%Y-%m-%d %H:%M")
                    currentSegment.arrivalLocation = location.location(
                             data['timetableresult']['ttitem'][i]['segment'][j]['arrival']['location']['id'], \
                             data['timetableresult']['ttitem'][i]['segment'][j]['arrival']['location']['name'], \
                             data['timetableresult']['ttitem'][i]['segment'][j]['arrival']['location']['x'], \
                             data['timetableresult']['ttitem'][i]['segment'][j]['arrival']['location']['y'] \
                             )
                    currentSegment.departureTime = \
                        time.strptime(data['timetableresult']['ttitem'][i]['segment'][j]['departure']['datetime'], \
                            "%Y-%m-%d %H:%M")
                    currentSegment.departureLocation = location.location(
                             data['timetableresult']['ttitem'][i]['segment'][j]['departure']['location']['id'], \
                             data['timetableresult']['ttitem'][i]['segment'][j]['departure']['location']['name'], \
                             data['timetableresult']['ttitem'][i]['segment'][j]['departure']['location']['x'], \
                             data['timetableresult']['ttitem'][i]['segment'][j]['departure']['location']['y'] \
                             )
                    if('direction' in data['timetableresult']['ttitem'][i]['segment'][j].keys()):
                        currentSegment.direction = data['timetableresult']['ttitem'][i]['segment'][j]['direction']
                    currentSegment.lowestPrice = data['timetableresult']['ttitem'][i]['segment'][j]['lowestprice']
                    currentSegment.lowestPriceCompany = data['timetableresult']['ttitem'][i]['segment'][j]['lowestpriceseller']['name']
                    currentSegment.lowestPriceURL = data['timetableresult']['ttitem'][i]['segment'][j]['lowestpriceseller']['url']
                    currentSegment.segmentNumber = j
                    trip.segments.append(currentSegment)
                trips.append(trip)
            resultNum += 1
            print str(int(trip.totalPrice)) + " sek " + \
                    datetime.fromtimestamp(mktime(currentSegment.departureTime)).strftime("%Y-%m-%d %H:%M") + "\n\t" + trip.totalTravelTime + "\n\t" + trip.URL 
        else:
            if data.keys() == [u'error']:
                print data['error']
            resultFound = False
    return trips
예제 #56
0
    data = json.loads(r.text)


    if (data.keys() != [u'error']):
        for i in range (len(data['timetableresult']['ttitem'])):

            trip = ttitem.ttitem()
            trip.totalPrice = float(data['timetableresult']['ttitem'][i]['price'])
            trip.sellerName = data['timetableresult']['ttitem'][i]['sellername']
            trip.totalTravelTime = data['timetableresult']['ttitem'][i]['traveltimetotal']
            trip.URL = data['timetableresult']['ttitem'][i]['url']
            trip.segments = []

            for j in range (len(data['timetableresult']['ttitem'][i]['segment'])):
                currentSegment = segment.segment()
                currentSegment.arrivalTime = \
                    time.strptime(data['timetableresult']['ttitem'][i]['segment'][j]['arrival']['datetime'], \
                        "%Y-%m-%d %H:%M")
                currentSegment.arrivalLocation = location.location(
                         data['timetableresult']['ttitem'][i]['segment'][j]['arrival']['location']['id'], \
                         data['timetableresult']['ttitem'][i]['segment'][j]['arrival']['location']['name'], \
                         data['timetableresult']['ttitem'][i]['segment'][j]['arrival']['location']['x'], \
                         data['timetableresult']['ttitem'][i]['segment'][j]['arrival']['location']['y'] \
                         )
                currentSegment.departureTime = \
                    time.strptime(data['timetableresult']['ttitem'][i]['segment'][j]['departure']['datetime'], \
                        "%Y-%m-%d %H:%M")
                currentSegment.departureLocation = location.location(
                         data['timetableresult']['ttitem'][i]['segment'][j]['departure']['location']['id'], \
                         data['timetableresult']['ttitem'][i]['segment'][j]['departure']['location']['name'], \
def PrepareFinalString(Address,cluster,wordlist,abb_list,vocab_dict):
	from segmentmeta import segmentamt
	from segment import segment
	xyz_score=45
	print(Address)
	Address =Address.lower()
	Address = (''.join( abb_list.get( word, word ) for word in re.split( '(\W+)', Address )) )
	Address = re.sub('[^A-Za-z]+',' ', Address) #extracting on alphabets
	Address= Address.split(' ') #removing all white spaces to make a single long query for segmentation i.e ahmedblockgardentown 
	I_do_not_have_anymore_names=[]	
	for k in xrange(len(Address)):
		temp_segment = segment(Address[k])
		print temp
		if(len(temp_segment)>1):
			for x in xrange(len(temp_segment)):
				answer = doublemetaphone(temp_segment[x])
				if (len(answer[0])>1):
					metaph=answer[0]
					if(cluster.has_key(metaph.upper())):
						I_do_not_have_anymore_names.append(str(MaxiMumInVocab(cluster,metaph.upper())[0]))
					else:
						I_do_not_have_anymore_names.append(str(temp_segment[x]))
				else:
					metaph=answer[1]
					if(cluster.has_key(metaph.upper())):
						I_do_not_have_anymore_names.append(str(MaxiMumInVocab(cluster,metaph.upper())[0]))
					else:
						I_do_not_have_anymore_names.append(str(temp_segment[x]))
				#if not metaph:
					#I_do_not_have_anymore_names.append(temp_segment[x])
		elif (len(temp_segment)==1):
			if(len(temp_segment[0])==1):
				I_do_not_have_anymore_names.append(temp_segment[0])
			else:
				Match_score = SpellCheck(temp_segment[0],wordlist)
				xyz_score = (float(Match_score[0][1])*100)
				if(xyz_score<85):
					answer = doublemetaphone(temp_segment[0])
					if (len(answer[0])>1):
						metaph=answer[0]
					else:
						metaph=answer[1]
					if not metaph:
						I_do_not_have_anymore_names.append(str(temp_segment[0]))
					segmented_meta=(segmentamt(metaph))
				#print (segmented_meta)
					for y in xrange(len(segmented_meta)):
						if(cluster.has_key(segmented_meta[y].upper())):
							I_do_not_have_anymore_names.append(str(MaxiMumInVocab(cluster,segmented_meta[y].upper())[0]))
						else:
							I_do_not_have_anymore_names.append(str(temp_segment))
				else:
					I_do_not_have_anymore_names.append(str(Match_score[0][0]))
		#print(I_do_not_have_anymore_names)
	Address =" ".join(I_do_not_have_anymore_names)
	Address = (''.join( abb_list.get( word, word ) for word in re.split( '(\W+)', Address )) )
	try:
		remove_list = RemoveList(vocab_dict)
		Address = (''.join( remove_list.get( word, word ) for word in re.split( '(\W+)', Address)))
	except:
		Address= re.sub(' +',' ',Address)
	return Address.strip()
예제 #58
0
def annotate(image):
    """Return annotated image."""
    segmentation, angle = segment(image)

    image = rotate(image, angle)
    return annotate_segmentation(image, segmentation)
예제 #59
0
            if (content[pos] != '<' and is_tag == False):
                if content[pos] != '' and content[pos] != '\t' and content[pos] != '\n':
                    word_content += content[pos]
                continue
            if (content[pos] == '<'):
                is_tag = True
                continue
            if (content[pos] == '>'):
                is_tag = False
                continue
        content = word_content
        word_content = ""
        is_tag = False
        for pos in range(0,len(content)):
            if (content[pos] != '{' and is_tag == False):
                if content[pos] != '' and content[pos] != '\t' and content[pos] != '\n':
                    word_content += content[pos]
                continue
            if (content[pos] == '{'):
                is_tag = True
                continue
            if (content[pos] == '}'):
                is_tag = False
                continue
        word_content = word_content.replace("&nbsp"," ")
        content = content.lower()
        segment_words = segment(content)
        line = "/".join(segment_words)
        output_file.write(str(docId)+"\n")
        output_file.write(line+"\n")
예제 #60
0
파일: test.py 프로젝트: lwd8cmd/Mitupead
    ob = yuv.astype("uint32")
    fragmented = colors_lookup[ob[:, :, 0] + ob[:, :, 1] * 0x100 + ob[:, :, 2] * 0x10000]
    t_ball = (fragmented == 1).view("uint8")
    t_gatey = (fragmented == 2).view("uint8")
    t_gateb = (fragmented == 3).view("uint8")
print(time.time() - timea)
print(fragmented.shape)

while True:
    cv2.imshow("tava", t_ball * 255)
    if cv2.waitKey(1) & 0xFF == ord("q"):
        break

mshape = (480, 640)
segmented = np.zeros(mshape, dtype=np.uint8)
is_ball = np.zeros(mshape, dtype=np.uint8)
is_gatey = np.zeros(mshape, dtype=np.uint8)
is_gateb = np.zeros(mshape, dtype=np.uint8)

time.sleep(1)

timea = time.time()
for i in xrange(60):
    segment.segment(yuv, segmented, is_ball, is_gatey, is_gateb)
print(time.time() - timea)

while True:
    cv2.imshow("tava", is_ball)
    if cv2.waitKey(1) & 0xFF == ord("q"):
        break