def show_anno(root, coco_anno_file):

    CLASS_2_COLOR, COLOR_2_CLASS = GenColorMap(200)
    #coco-like, but not coco
    with open(coco_anno_file, "r") as read_file:
        coco = json.load(read_file)
    # import pdb
    # pdb.set_trace()
    for i in range(len(coco)):
        item = coco[i]
        image_name = item['imgInfo']['img_name']
        file_path = os.path.join(root, image_name)

        im = cv2.imread(file_path)
        im_h, im_w, _ = im.shape
        skel_di5 = item['unit']['skel_di5']
        canvas = np.zeros(im.shape, dtype=np.float32)
        canvas = im
        print(image_name)
        for idx, ann in enumerate(skel_di5):
            color = CLASS_2_COLOR[idx + 1]

            mask = pologons_to_mask(ann, im.shape[:-1])

            #canvas = draw_mask(canvas, mask, color)
            canvas = draw_mask(canvas, mask, color)

            #cv2.rectangle(im, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
            #cv2.putText(im, str(category_id), (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, 2, color, 2)

        im = cv2.resize(im, (int(im_w), int(im_h)),
                        interpolation=cv2.INTER_CUBIC)
        im_nobox = cv2.resize(canvas, (int(im_w), int(im_h)),
                              interpolation=cv2.INTER_CUBIC)

        gray = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)
        ret, bw = cv2.threshold(gray, 0.000001, 255, cv2.THRESH_BINARY)
        # print (bw)
        bw = np.asarray(bw)
        bw = bw.astype(np.uint8)

        cv2.imshow("skel", canvas)
        cv2.waitKey(0)
def trans_anno(img_root, anno_root, output_root, ori_file,
               target_file):  #anno_root is the dir of input
    file_exist = False
    no_ori = False
    train_anno = os.path.join(output_root, target_file)
    if isfile(train_anno):
        file_exist = True
    ori_anno = os.path.join(anno_root, ori_file)
    if isfile(ori_anno) == False:
        no_ori = True
    if file_exist == False and no_ori == False:
        coco_fiber = COCO(ori_anno)
        coco_ids = coco_fiber.getImgIds()
        catIds = coco_fiber.getCatIds()
        train_data = []
        print('transforming annotations...')
        num_bad_images = 0
        num_good_images = 0

        for img_id in tqdm(coco_ids):
            img_ok = True
            img = coco_fiber.loadImgs(img_id)[0]
            file_path = os.path.join(img_root, img['file_name'])

            this_image = cv2.imread(file_path)
            img_shape = this_image.shape[:-1]

            annIds = coco_fiber.getAnnIds(imgIds=img['id'], catIds=catIds)
            anns = coco_fiber.loadAnns(annIds)

            start_points = []
            control_points = []
            off_sets = []
            start_points_offsets = []

            for idx, ann in enumerate(anns):
                mask = pologons_to_mask(ann['segmentation'], img_shape)

                start_point, start_point_offset, control_point, off_set = get_keypoints(
                    mask, this_image, step=20, crop_edge=25, debug=True)
                start_points += start_point
                start_points_offsets += (start_point_offset)
                control_points += (control_point)
                off_sets += (off_set)

            for i in range(len(start_points)):
                start_points[i] = int(start_points[i])
                start_points_offsets[i] = int(start_points_offsets[i])

            for i in range(len(control_points)):
                control_points[i] = int(control_points[i])
                off_sets[i] = int(off_sets[i])

            # import pdb; pdb.set_trace()
            file_name = img['file_name']

            single_data = {}
            unit = {}
            ###########################
            unit['start_points'] = start_points
            unit['start_points_offsets'] = start_points_offsets
            unit['control_points'] = control_points
            unit['off_sets'] = off_sets

            single_data['unit'] = unit

            imgInfo = {}
            imgInfo['imgID'] = img_id
            imgInfo['img_name'] = file_name
            imgInfo['file_path'] = file_path

            single_data['imgInfo'] = imgInfo

            if img_ok:
                train_data.append(single_data)
                num_good_images = num_good_images + 1
                # print(num_good_images)
            else:
                print(num_bad_images)

        print('saving transformed annotation...')
        with open(train_anno, 'w') as wf:
            json.dump(train_data, wf)
            # json.dumps(coco_fiber.anns, wf)
            # json.dumps(coco_fiber.cats, wf)
            # json.dumps(coco_fiber.imgs, wf)
        print('done')
    if no_ori:
        print('''WARNING! There is no annotation file find   at {}. 
            Make sure you have put annotation files into the right folder.'''.
              format(ori_anno))
def trans_anno(instance_root, binary_root, org_image_folder, output_root, target_file, reverse_seq = False):
	
    train_anno = os.path.join(output_root, target_file)
    reverse_seq = reverse_seq
    idx = 0

    # step_size = 20
    # step_size = 60
    step_size = 15
    binary_images_files = os.listdir(binary_root)
    org_image_files = os.listdir(org_image_folder)
    crop_edge = 0
    train_data = {}
    category_infos = []
    category_info= {}
    category_info['supercategory'] = 'w'
    category_info['id'] = 1
    category_info['name'] = 'worms'
    category_infos.append(category_info)
    train_data['categories'] = category_infos

    print('transforming annotations...')
    num_bad_images = 0
    num_good_images = 0

    anno_id = 0
    images = []
    annotations = []
        # for img_id in tqdm(coco_ids):
    img_id = 0
    img_id_map = {}

    new_image = False
    for filename in os.listdir(instance_root):
        end_points = []
        control_points = []
        f = os.path.join(instance_root, filename)
        if os.path.isfile(f):
            print(f)
        file_name = f.split('/')[-1]
        image_id = file_name.split('_')[0]

        if image_id in img_id_map:
            current_id = img_id_map[image_id]
        else:
            new_image = True
            img_id_map[image_id] = img_id
            img_id += 1
            current_id = img_id_map[image_id]


        for name in binary_images_files:
            if image_id in name:
                binary_image_name = name

        for name in org_image_files:
            if image_id in name:
                if image_id + '_w1' in name:
                    w1 = name
                elif image_id+'_w2' in name:
                    w2 = name


        try:
            instance_id = file_name.split('_')[1]
        except:
            print(file_name)
            continue

        file_path = os.path.join(instance_root, file_name)
        semantic_binary_path = os.path.join(binary_root, binary_image_name)

        w2_path = os.path.join(org_image_folder, w2)

        this_isntance_mask = cv2.imread(file_path)
        this_isntance_mask = cv2.cvtColor(this_isntance_mask, cv2.COLOR_BGR2GRAY);
        this_isntance_mask = (255. * (this_isntance_mask>0)).astype('uint8')


        polygon_this_instance = mask_to_pologons(this_isntance_mask) 

        semantic_binary = cv2.imread(semantic_binary_path)
        semantic_binary = semantic_binary.astype('uint8')
        semantic_binary = cv2.cvtColor(semantic_binary, cv2.COLOR_BGR2GRAY);
        semantic_binary = (255. * (semantic_binary>0)).astype('uint8')        
        
        w2_show = cv2.imread(w2_path, cv2.IMREAD_COLOR)
        w2_show = cv2.normalize(w2_show, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
        img_shape = semantic_binary.shape

        width, height = img_shape[0], img_shape[1]


        unit = {}
        polygon_semantic = mask_to_pologons(semantic_binary)
        polygon_semantic_test = pologons_to_mask(polygon_semantic, semantic_binary.shape)



        _, contours,_ = cv2.findContours(np.uint8(this_isntance_mask),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

        try:
            px = contours[0].squeeze()[:,0]
        except:
            print('invalid')
            continue
        py = contours[0].squeeze()[:,1]

        bbox = [int(np.min(px)),int(np.min(py)),int(np.max(px) - np.min(px)), int(np.max(py) - np.min(py))]

        end_point, control_point, off_sets_prev, off_sets_next= get_keypoints(this_isntance_mask>0, w2_show, step = step_size, crop_edge = crop_edge, debug = False)
        
        if len(control_point) < 1:
            print('no good')
            continue
        control_points_label = np.reshape(np.asarray(control_point), (-1, 2))
        end_points_label = np.reshape(np.asarray(end_point), (-1, 2))
        

        sequence_x = control_points_label[:,0]
        sequence_y = control_points_label[:,1] 
        unit['control_points'] = control_points_label.tolist()
        unit['endpoints'] = end_points_label.tolist()
        unit['seq_x_col'] = control_points_label[:,0].tolist()
        unit['seq_y_row'] = control_points_label[:,1].tolist()                
        

        sequence_x_offset = np.zeros(len(sequence_x))
        sequence_y_offset = np.zeros(len(sequence_y))

        sequence_x_offset[:-1] = sequence_x[1:] - sequence_x[:-1] 
        sequence_y_offset[:-1] = sequence_y[1:] - sequence_y[:-1] 

        sequence_offset = np.stack((sequence_x_offset, sequence_y_offset)).transpose(0,1)
        control_points_of_current_curve= [unit['seq_x_col'],unit['seq_y_row']]
        
        # offset_map = compute_offsets((width, height),sequence_offset,control_points_of_current_curve,\
        #              disc_radius = 25)
        offset_map = 0

        ###########
        #visualize
        ###########
        # test_offset_map = np.load(offset_matrix_normal_dir + '/' + str(anno_id) + '.npy')
        # # test_offset_map = offset_map

        # image = cv2.rectangle(this_image, (int(bbox[0]),int(bbox[1])), (int(bbox[0]+bbox[2]),int(bbox[1]+bbox[3])), (0,0,255), 2)
        # cv2.imshow('this_image', this_image)
        
        # canvas = np.zeros((width, height))
        # canvas = visualize_offset(canvas, test_offset_map[0,:,:], test_offset_map[1,:,:])
        # # combined_show = draw_mask_color(np.tile(this_image,(3,1,1)).transpose(1,2,0), canvas, [0., 255., 0.])
        # combined_show = draw_mask(this_image, canvas, [0., 255., 0.])
        # cv2.imshow('short', combined_show)
        # canvas = np.zeros((width, height))
        # canvas = visualize_offset(canvas, test_offset_map[2,:,:], test_offset_map[3,:,:])
        # # combined_show = draw_mask_color(np.tile(this_image,(3,1,1)).transpose(1,2,0), canvas, [0., 255., 0.])
        # combined_show = draw_mask(this_image, canvas, [0., 255., 0.])

        # cv2.imshow('mid_offsets_map', combined_show)
        # cv2.waitKey(0)

        ###############
        #             #
        ###############
        ###########################
        unit['id'] = int(anno_id)
        unit['image_id'] = int(current_id)
        unit['end_points'] = end_point
        unit['control_point'] = control_point
        unit['segmentation'] = polygon_this_instance
        # unit['segmentation'] = mask_cropped

        unit['reverse'] = int(0)
        
        # unit['control_points'] = control_point
        # unit['off_sets_prevs'] = off_sets_prev
        # unit['off_sets_nexts'] = off_sets_next

        unit['bbox'] =  bbox
        unit['area'] =  int(bbox[2] * bbox[3])
        unit['iscrowd'] =  0
        unit['category_id'] =  1

        annotations.append(unit)
        anno_id += 1  

        ###########
        #visualize
        ###########
        # from copy import deepcopy
        # canvas = deepcopy(w2_show)

        # # cv2.imshow('canvas', canvas)
        # # cv2.waitKey(0)
        # # import pdb; pdb.set_trace()
        # canvas = canvas[:,:]
        # canvas = canvas[crop_edge:canvas.shape[0] - crop_edge, crop_edge : canvas.shape[1] - crop_edge, :]
        
        # sequence_x[:len(unit['seq_x_col'])] = (np.asarray(unit['seq_x_col']))

        # sequence_y[:len(unit['seq_y_row'])] = (np.asarray(unit['seq_y_row']))
        # sequence_x_offset = np.zeros(len(sequence_x))
        # sequence_y_offset = np.zeros(len(sequence_y))

        # sequence_x_offset[:-1] = sequence_x[1:] - sequence_x[:-1] 
        # sequence_y_offset[:-1] = sequence_y[1:] - sequence_y[:-1] 

        # for pp in tqdm(range(len(sequence_x))):
        #     # import pdb; pdb.set_trace()
        #     if pp == 0:
        #         pre = (int(sequence_x[pp]), int(sequence_y[pp]))
        #         canvas = cv2.circle(canvas, (int(sequence_x[pp]), int(sequence_y[pp])), 5, (200,0,0), 1)
        #         continue

        #     cc = 0
        #     if sequence_x[pp] == 9999:
        #         break

        #     canvas = cv2.circle(canvas, (pre[0] + int(sequence_x_offset[pp-1]), pre[1] + int(sequence_y_offset[pp-1])), 5, (200,0,0), 1)
        #     # canvas = cv2.circle(canvas, (int(sequence_x[pp]), int(sequence_y[pp])), 5, (200,0,0), 1)
        #     pre = (pre[0] + int(sequence_x_offset[pp-1]), pre[1] + int(sequence_y_offset[pp-1]))
        #     cv2.imshow('test', canvas)
        #     cv2.waitKey()
        ###############
        #             #
        ###############

        if reverse_seq:
            unit = {}
            unit['control_points'] = control_points_label[::-1].tolist()
            unit['endpoints'] = end_points_label[::-1].tolist()
            unit['seq_x_col'] = control_points_label[::-1,0].tolist()
            unit['seq_y_row'] = control_points_label[::-1,1].tolist()      

            unit['id'] = int(anno_id)
            unit['image_id'] = int(current_id)
            unit['end_points'] = end_point
            unit['control_point'] = control_point
            unit['segmentation'] = polygon_this_instance
            # unit['segmentation'] = mask_cropped

            unit['reverse'] = int(1)
            

            unit['bbox'] =  bbox
            unit['area'] =  int(bbox[2] * bbox[3])
            unit['iscrowd'] =  0
            unit['category_id'] =  1
                                    
            offset_map = 0


            ###########
            #visualize
            ###########
            # from copy import deepcopy
            # canvas = deepcopy(this_image)
            # canvas = canvas[:,:,:3]
            # canvas = canvas[crop_edge:canvas.shape[0] - crop_edge, crop_edge : canvas.shape[1] - crop_edge, :]
            
            # sequence_x[:len(unit['seq_x_col'])] = (np.asarray(unit['seq_x_col']))

            # sequence_y[:len(unit['seq_y_row'])] = (np.asarray(unit['seq_y_row']))
            # sequence_x_offset = np.zeros(len(sequence_x))
            # sequence_y_offset = np.zeros(len(sequence_y))

            # sequence_x_offset[:-1] = sequence_x[1:] - sequence_x[:-1] 
            # sequence_y_offset[:-1] = sequence_y[1:] - sequence_y[:-1] 

            # for pp in tqdm(range(len(sequence_x))):
            #     # import pdb; pdb.set_trace()
            #     if pp == 0:
            #         pre = (int(sequence_x[pp]), int(sequence_y[pp]))
            #         canvas = cv2.circle(canvas, (int(sequence_x[pp]), int(sequence_y[pp])), 5, (200,0,0), 1)
            #         continue

            #     cc = 0
            #     if sequence_x[pp] == 9999:
            #         break

            #     canvas = cv2.circle(canvas, (pre[0] + int(sequence_x_offset[pp-1]), pre[1] + int(sequence_y_offset[pp-1])), 5, (200,0,0), 1)
            #     # canvas = cv2.circle(canvas, (int(sequence_x[pp]), int(sequence_y[pp])), 5, (200,0,0), 1)
            #     pre = (pre[0] + int(sequence_x_offset[pp-1]), pre[1] + int(sequence_y_offset[pp-1]))
            #     cv2.imshow('test', canvas)
            #     cv2.waitKey()                    
            ###############
            annotations.append(unit)
            anno_id += 1  

        for i in range(len(end_point)):
            end_point[i] = int(end_point[i])
            # start_points_offsets[i] = int(start_points_offsets[i])

        for i in range(len(control_point)):
            control_point[i] = int(control_point[i])
            # off_sets_prev[i] = int(off_sets_prev[i])
            # off_sets_next[i] = int(off_sets_next[i])     

        end_points += end_point
        control_points += control_point

                # off_sets_prevs += off_sets_prev
                # off_sets_nexts += off_sets_next




        if new_image:
            img_info ={}


            img_info['id'] = int(current_id)
            img_info['w1'] = w1
            img_info['w2'] = w2
            img_info['binary'] = binary_image_name
            img_info['file_path'] = file_path
            img_info['width'] = int(width)
            img_info['height'] = int(height)
            img_info['flickr_url'] = ''
            img_info['coco_url'] = ''
            img_info['date_captured'] = 'none'
            img_info['cropped_edge'] = int(crop_edge)
            img_info['segmentaion'] = polygon_semantic


            images.append(img_info)

    print('saving transformed annotation...')
    train_data['images'] = images
    train_data['annotations'] = annotations        
    with open(train_anno,'w') as wf:
        json.dump(train_data, wf)
        # json.dumps(coco_fiber.anns, wf)
        # json.dumps(coco_fiber.cats, wf)
        # json.dumps(coco_fiber.imgs, wf)
    print('done')
def trans_anno(img_root, anno_root, output_root, ori_file, target_file): #anno_root is the dir of input
    file_exist=False
    no_ori=False
    train_anno = os.path.join(output_root, target_file)
    if isfile(train_anno):
        file_exist = True
    ori_anno = os.path.join(anno_root,ori_file)
    if isfile(ori_anno)==False:
        no_ori = True
    if file_exist==False and no_ori==False:
        coco_fiber = COCO(ori_anno)
        coco_ids = coco_fiber.getImgIds()
        catIds = coco_fiber.getCatIds()
        train_data = []
        print('transforming annotations...')
        num_bad_images = 0
        num_good_images = 0

        for img_id in tqdm(coco_ids):
            img_ok = True
            img = coco_fiber.loadImgs(img_id)[0]
            file_path = os.path.join(img_root, img['file_name'])

            this_image = cv2.imread(file_path)
            img_shape = this_image.shape[:-1]

            annIds = coco_fiber.getAnnIds(imgIds=img['id'], catIds=catIds)
            anns = coco_fiber.loadAnns(annIds)

            
            seg = []
            skel_di0_polo = []
            skel_di3_polo = []
            skel_di5_polo = []

            skel_0_15 = []
            skel_15_30 = []
            skel_30_45 = []
            skel_45_60 = []
            skel_60_75 = []
            skel_75_90 = []
            skel_90_105 = []
            skel_105_120 = []
            skel_120_135 = []
            skel_135_150 = []
            skel_150_165 = []
            skel_165_180 = []
            final_12 = []

            for idx, ann in enumerate(anns):



                mask = pologons_to_mask(ann['segmentation'], img_shape)

                # skel_di5 = get_skel(mask, 5)
                # skel_di3 = get_skel(mask, 3)
                skel_di0, k = get_skel(mask, 2)
                # print(skel_di0.shape)
                # if len(endpoints) > 0:
                #     unit_vec = [1, 0]
                #     orient_y = (endpoints[0][1] - endpoints[1][1])
                #     orient_x = (endpoints[0][0] - endpoints[1][0])
                #     orient_vec = [orient_x, orient_y]
                    
                    # angle = math.acos(np.dot(orient_vec, unit_vec) / (math.sqrt(np.dot(unit_vec,unit_vec)) * math.sqrt(np.dot(orient_vec,orient_vec))))
                    
                    # angle = math.atan(orient_y / orient_x)
                angle = math.atan(k)* 180 / math.pi
                if angle > 180:
                    angle -= 180
                elif angle < 0:
                    angle += 180
                # import pdb; pdb.set_trace()
                # print(angle)
                


                seg_tmp = ann['segmentation']

                # skel_di5_polo_tmp = mask_to_pologons(skel_di5)
                # skel_di3_polo_tmp = mask_to_pologons(skel_di3)
                # skel_di0_polo_tmp = mask_to_pologons(skel_di0)
                
                # import pdb;pdb.set_trace()
    
                seg.append(seg_tmp)
                

                skel_di0_polo.append(skel_di0_polo_tmp)
                # skel_di5_polo.append(skel_di5_polo_tmp)
                # skel_di3_polo.append(skel_di3_polo_tmp)
                
                if (angle <= 15):
                    skel_0_15.append(skel_di0_polo_tmp)
                elif(angle > 15 and angle <= 30):
                    skel_15_30.append(skel_di0_polo_tmp)
                elif(angle > 30 and angle <= 45):
                    skel_30_45.append(skel_di0_polo_tmp)
                elif(angle > 45 and angle <= 60):
                    skel_45_60.append(skel_di0_polo_tmp)
                elif(angle > 60 and angle <= 75):
                    skel_60_75.append(skel_di0_polo_tmp)
                elif(angle > 75 and angle <= 90):
                    skel_75_90.append(skel_di0_polo_tmp)
                elif(angle > 105 and angle <= 120):
                    skel_105_120.append(skel_di0_polo_tmp)
                elif(angle > 120 and angle <= 135):
                    skel_120_135.append(skel_di0_polo_tmp)
                elif(angle > 135 and angle <= 150):
                    skel_135_150.append(skel_di0_polo_tmp)
                elif(angle > 150 and angle <= 165):
                    skel_150_165.append(skel_di0_polo_tmp)
                elif(angle > 165 and angle <= 180):
                    skel_165_180.append(skel_di0_polo_tmp)
    # import pdb;pdb.set_trace()

                try:
                    # testing1 = pologons_to_mask(skel_di5_polo_tmp,img_shape)
                    # testing2 = pologons_to_mask(skel_di3_polo_tmp,img_shape)
                    testing3 = pologons_to_mask(skel_di0_polo_tmp,skel_di0.shape)
                except:
                    num_bad_images += 1
                    img_ok = False
                
            final_12.append(skel_0_15)
            final_12.append(skel_15_30)
            final_12.append(skel_30_45)
            final_12.append(skel_45_60)
            final_12.append(skel_60_75)
            final_12.append(skel_75_90)
            final_12.append(skel_90_105)
            final_12.append(skel_105_120)
            final_12.append(skel_120_135)
            final_12.append(skel_135_150)
            final_12.append(skel_150_165)
            final_12.append(skel_165_180)
            
            bbox = ann['bbox']
            file_name = img['file_name']

            single_data = {}
            unit = {}
            # unit['bbox'] = bbox
            ###########################
            unit['segmentation'] = seg
            # unit['skel_di0'] = skel_di0_polo
            # unit['sk15'] = skel_0_15
            # unit['sk30'] = skel_15_30
            # unit['sk45'] = skel_30_45
            # unit['sk60'] = skel_45_60
            # unit['sk75'] = skel_60_75
            # unit['sk90'] = skel_75_90
            # unit['sk105'] = skel_90_105
            # unit['sk120'] = skel_105_120
            # unit['sk135'] = skel_120_135
            # unit['sk150'] = skel_135_150
            # unit['sk165'] = skel_150_165
            # unit['sk180'] = skel_165_180
            ######################################
            
            unit['final_12'] = final_12


            # unit['skel_di3'] = skel_di3_polo
            # unit['skel_di5'] = skel_di5_polo
            single_data ['unit'] = unit


            imgInfo ={}
            imgInfo ['imgID'] = img_id
            imgInfo ['img_name'] = file_name
            single_data ['imgInfo'] = imgInfo
            
            if img_ok:
                train_data.append(single_data)
                num_good_images = num_good_images + 1 
                # print(num_good_images)
            else:
                print(num_bad_images)
            # else:

                # print(num_bad_images)

            # new_ann['segmentation'] = skel_di5_polo
            # import pdb
            # pdb.set_trace()
            # coco_fiber.anns[annIds[idx]]['segmentation'] = skel_di5_polo

        print('saving transformed annotation...')
        with open(train_anno,'w') as wf:
            json.dump(train_data, wf)
            # json.dumps(coco_fiber.anns, wf)
            # json.dumps(coco_fiber.cats, wf)
            # json.dumps(coco_fiber.imgs, wf)
        print('done')
    if no_ori:
        print('''WARNING! There is no annotation file find   at {}. 
            Make sure you have put annotation files into the right folder.'''
            .format(ori_anno))
Beispiel #5
0
def show_anno(root, coco_anno_file):
    CLASS_2_COLOR, COLOR_2_CLASS = GenColorMap(200)
    coco = COCO(coco_anno_file)
    ids = list(coco.imgs.keys())
    for img_id in ids:
        file_path = coco.loadImgs(img_id)[0]['file_name']
        file_path = os.path.join(root, file_path)
        ann_ids = coco.getAnnIds(imgIds=img_id)
        anns = coco.loadAnns(ann_ids)
        im = cv2.imread(file_path)
        im_h, im_w, _ = im.shape

        canvas = np.zeros(im.shape, dtype=np.float32)
        for idx, ann in enumerate(anns):
            color = CLASS_2_COLOR[idx + 1]
            bbox = ann['bbox']
            category_id = ann['category_id']

            mask = pologons_to_mask(ann['segmentation'], im.shape[:-1])

            # import pdb
            # pdb.set_trace()
            x1, y1, w, h = bbox
            x2 = x1 + w
            y2 = y1 + h

            #im = draw_mask(im, mask, color)

            #canvas = draw_mask(canvas, mask, color)
            canvas = draw_mask(canvas, skel, color)

            #cv2.rectangle(im, (int(x1), int(y1)), (int(x2), int(y2)), color, 2)
            #cv2.putText(im, str(category_id), (int(x1), int(y1)), cv2.FONT_HERSHEY_SIMPLEX, 2, color, 2)

        im = cv2.resize(im, (int(im_w), int(im_h)),
                        interpolation=cv2.INTER_CUBIC)
        im_nobox = cv2.resize(canvas, (int(im_w), int(im_h)),
                              interpolation=cv2.INTER_CUBIC)

        gray = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)
        ret, bw = cv2.threshold(gray, 0.000001, 255, cv2.THRESH_BINARY)
        # print (bw)
        bw = np.asarray(bw)
        bw = bw.astype(np.uint8)
        prefix = coco.loadImgs(img_id)[0]['file_name']
        prefix = os.path.splitext(prefix)[0]

        print(prefix)
        cv2.imshow("skel", canvas)
        cv2.waitKey(0)
        # cv2.destroyWindow('skel')

        cv2.imwrite(
            "/home/yiliu/work/fiberPJ/data/fiber_labeled_data/skel_mix_di3/" +
            prefix + "_label.png", im_nobox)
        cv2.imwrite(
            "/home/yiliu/work/fiberPJ/data/fiber_labeled_data/skel_mix_di3/" +
            prefix + "_rgb.png", im)
        cv2.imwrite(
            "/home/yiliu/work/fiberPJ/data/fiber_labeled_data/skel_mix_di3/" +
            prefix + "_bw.png", bw)
Beispiel #6
0
def trans_anno(img_root, anno_root, output_root, ori_file,
               target_file):  #anno_root is the dir of input
    file_exist = False
    no_ori = False
    train_anno = os.path.join(output_root, target_file)
    if isfile(train_anno):
        file_exist = True
    ori_anno = os.path.join(anno_root, ori_file)
    if isfile(ori_anno) == False:
        no_ori = True
    if file_exist == False and no_ori == False:
        coco_fiber = COCO(ori_anno)
        coco_ids = coco_fiber.getImgIds()
        catIds = coco_fiber.getCatIds()
        train_data = []
        print('transforming annotations...')
        num_bad_images = 0
        num_good_images = 0

        # for img_id in tqdm(coco_ids):
        for img_id in tqdm(coco_ids):
            img_ok = True
            img = coco_fiber.loadImgs(img_id)[0]
            file_path = os.path.join(img_root, img['file_name'])

            this_image = cv2.imread(file_path)
            img_shape = this_image.shape[:-1]

            annIds = coco_fiber.getAnnIds(imgIds=img['id'], catIds=catIds)
            anns = coco_fiber.loadAnns(annIds)

            end_points = []
            control_points = []
            off_sets_prevs = []
            off_sets_nexts = []
            instances_anno = []
            overlapping_area = np.zeros(img_shape)
            for idx, ann in enumerate(anns):
                new_ann = {}

                mask = pologons_to_mask(ann['segmentation'], img_shape)
                overlapping_area = mask + overlapping_area
                # print(np.max(overlapping_area))
                # import pdb; pdb.set_trace()

                end_point, control_point, off_sets_prev, off_sets_next, mask_pologons = get_keypoints(
                    mask, this_image, step=50, crop_edge=25, debug=False)

                for i in range(len(end_point)):
                    end_point[i] = int(end_point[i])

                for i in range(len(control_point)):
                    control_point[i] = int(control_point[i])
                    off_sets_prev[i] = int(off_sets_prev[i])
                    off_sets_next[i] = int(off_sets_next[i])

                if len(control_point) == 0:
                    continue

                new_ann['end_points'] = end_point
                new_ann['control_points'] = control_point
                new_ann['off_sets_prevs'] = off_sets_prev
                new_ann['off_sets_nexts'] = off_sets_next
                new_ann['segmentation'] = mask_pologons
                new_ann['instance_id'] = idx
                instances_anno.append(new_ann)

            overlapping_area = overlapping_area > 1
            crop_edge = 25
            overlapping_area = overlapping_area[
                crop_edge:overlapping_area.shape[0] - crop_edge,
                crop_edge:overlapping_area.shape[1] - crop_edge]
            overlapping_area_pologon = mask_to_pologons(overlapping_area)
            # import pdb; pdb.set_trace()

            file_name = img['file_name']

            single_data = {}
            unit = {}
            ###########################
            unit['instances_anno'] = instances_anno
            unit['overlapping_area_pologon'] = overlapping_area_pologon

            single_data['unit'] = unit

            imgInfo = {}
            imgInfo['imgID'] = img_id
            imgInfo['img_name'] = file_name
            imgInfo['file_path'] = file_path

            single_data['imgInfo'] = imgInfo

            if img_ok:
                train_data.append(single_data)
                num_good_images = num_good_images + 1
                # print(num_good_images)
            else:
                print(num_bad_images)

        print('saving transformed annotation...')
        with open(train_anno, 'w') as wf:
            json.dump(train_data, wf)
            # json.dumps(coco_fiber.anns, wf)
            # json.dumps(coco_fiber.cats, wf)
            # json.dumps(coco_fiber.imgs, wf)
        print('done')
    if no_ori:
        print('''WARNING! There is no annotation file find   at {}.
            Make sure you have put annotation files into the right folder.'''.
              format(ori_anno))