Exemplo n.º 1
0
def AveragePrecisionOnImages(gt_info,
                             predictions_info,
                             num_gt_example,
                             min_overlap=0.5,
                             validate_input=False):
    result = []
    used_info = {}
    # set 'used' tag for each ground truch bbox
    for file_id in gt_info:
        boxes = gt_info[file_id]
        size = boxes.shape[0]
        used = np.zeros(shape=[size]).astype(np.bool)
        used_info[file_id] = used

    for idx, prediction in enumerate(predictions_info):
        file_id = prediction['file_id']
        box_pred = prediction['bbox']

        boxes_gt = gt_info[file_id]
        iou = IoU(box_pred, boxes_gt)
        idx = iou.argmax()
        used = used_info[file_id][idx]
        confidence = prediction['confidence']

        if iou[idx] >= min_overlap and not used:
            result.append({'TP': True, 'confidence': confidence})
            used_info[file_id][idx] = True
        else:
            result.append({'TP': False, 'confidence': confidence})

    ap, recall, precision = VocAveragePrecision(result,
                                                num_gt_example,
                                                validate=validate_input)
    return ap, recall, precision
Exemplo n.º 2
0
 def gen_data(img_name, bboxes):
     img_path = os.path.join(wider_dir, 'images', img_name)
     img = cv2.imread(img_path)
     img_h, img_w, _ = img.shape
     nboxes = np.array(bboxes, dtype=np.float32).reshape(-1, 4)
     rects = detect_func(img, config.MIN_IMG_SIZE, test_img_size)
     img_dt_indices = defaultdict(int)
     random.shuffle(rects)
     for rect in rects:
         x1, y1, x2, y2, prob = rect
         w, h = x2 - x1 + 1, y2 - y1 + 1
         if max(w, h) < config.MIN_IMG_SIZE / 2\
             or min(x1, y1, x2, y2) < 0\
             or max(x1, x2) >= img_w\
             or max(y1, y2) >= img_h\
             or x1 >= x2 or y1 >= y2:
             indices['filter_out'] += 1
             continue
         rect = convert_to_square(rect)
         iou = IoU(rect, nboxes)
         dt, label = gen_img_label(img, rect, bboxes, iou)
         if dt != '' and config.MAX_EXAMPLES[dt] > img_dt_indices[dt]:
             img_dt_indices[dt] += 1
             indices[dt] += 1
             cropped_img = img[rect[1]:rect[3], rect[0]:rect[2]]
             store_rect_data(save_dir, img_size, dt, cropped_img, label,
                             files[dt], indices[dt])
         else:
             indices['ignore'] += 1
         if all(config.MAX_EXAMPLES[dt] == img_dt_indices[dt]
                for dt in config.DATA_TYPES):
             break
     print("generate %s subimages for %s rects %d" %
           (str(indices), img_path, len(rects)))
Exemplo n.º 3
0
def maxIoU(crop_box, boxes):
    m = 0
    for box in boxes:
        iou = IoU(crop_box, box[np.newaxis, :])
        if iou > m:
            m = iou
    return m
def main():
    path_test = '/home/dongnie/Desktop/myPyTorch/pytorch-SRResNet/res_1106'
    path_test = '/home/dongnie/myPytorchCodes/pytorch-SRResNet23D/resValidCha'
    ids = [1, 2, 3, 4, 6, 7, 8, 10, 11, 12, 13, 29]
    ids = range(45, 50)
    for ind in ids:
        #         mr_test_itk = sitk.ReadImage(os.path.join(path_test,'denseCrf3dSegmMap_pelvic_sub%d.nii.gz'%ind))
        mr_test_itk = sitk.ReadImage(
            os.path.join(
                path_test,
                'preCha_wce_wdice_viewExp_resEhance_fullAD_0110_iter14w_sub%02d.nii.gz'
                % ind))
        #mr_test_itk = sitk.ReadImage(os.path.join(path_test,'denseCrf3dSegmMap_probMaps_model0110_sub%02d.nii.gz'%ind))
        #mr_test_itk = sitk.ReadImage(os.path.join(path_test,'denseCrf3dSegmMap_probMaps_thresh_model0110_sub%02d.nii.gz'%ind))
        crfSeg = sitk.GetArrayFromImage(mr_test_itk)
        ct_test_itk = sitk.ReadImage(
            os.path.join(path_test, 'gt_sub%d.nii.gz' % ind))
        gtSeg = sitk.GetArrayFromImage(ct_test_itk)

        diceBladder = dice(crfSeg, gtSeg, 1)
        #diceProstate = dice(crfSeg,gtSeg,2)
        #diceRectumm = dice(crfSeg,gtSeg,3)

        iouBladder = IoU(crfSeg, gtSeg, 1)
        #iouProstate = IoU(crfSeg,gtSeg,2)
        #iouRectum = IoU(crfSeg,gtSeg,3)
        print 'sub%d' % ind, ' iou1 = ', iouBladder
        #print 'sub%d'%ind,' iou1 = ',iouBladder,' iou2= ',iouProstate,' iou3= ',iouRectum

        print 'sub%d' % ind, 'dice1 = ', diceBladder
        #print 'sub%d'%ind, 'dice1 = ',diceBladder,' dice2= ',diceProstate,' dice3= ',diceRectumm
        print '\n'
Exemplo n.º 5
0
    def gen_data(img_name, bboxes, landm5):
        img_path = os.path.join(lfw_dir, img_name)
        img = cv2.imread(img_path)
        nboxes = np.array(bboxes, dtype=np.float32).reshape(-1, 4)
        nlandm5s = np.array(landm5, dtype=np.float32).reshape(-1, 5, 2)

        # cropped box and bounding box
        # for (cbox, size), bbox in gen_crop_boxes(img, nboxes, img_size):
        #     if cbox is None:
        #         continue
        #     iou = IoU(cbox, nboxes)
        #     store_gen_box(save_dir, img, cbox, size, bbox, iou, files, indices)
        for (cbox, size), bbox, clandm5 in gen_crop_bbox_landm5(
                img, nboxes, nlandm5s, img_size):
            if cbox is None:
                continue
            iou = IoU(cbox, nboxes)
            dt, cropped_img, label = gen_img_label(img, cbox, size, bbox, iou,
                                                   landm5, with_landm5)
            if dt != '':
                store_gen_box_lfw(save_dir, img_size, dt, cropped_img, label,
                                  files[dt], indices[dt])
                indices[dt] += 1
            else:
                indices['ignore'] += 1

        print("generate %s subimages for %s" % (str(indices), img_path))
Exemplo n.º 6
0
def non_max_suppression(bboxes, iou_threshold):
    """
    Applies non maximum suppression to a list of bounding boxes.
    
    Parameters:
        bboxes (list): List of lists containing all bboxes with each bboxes
        specified as [class_pred, conf_score, x1, y1, x2, y2].
        iou_threshold (float): Threshold for the IOU with the ground truth bbox.
        
    Returns:
        bboxes_after_nms (list): bboxes after performing NMS given a specific 
        IoU threshold.
    """

    assert type(bboxes) == list

    bboxes = sorted(bboxes, key=lambda x: x[1], reverse=True)
    bboxes_after_nms = []

    while bboxes:
        chosen_box = bboxes.pop(0)

        bboxes = [
            box for box in bboxes if box[0] != chosen_box[0]
            or IoU(chosen_box[2:], box[2:]) < iou_threshold
        ]

        bboxes_after_nms.append(chosen_box)

    return bboxes_after_nms
Exemplo n.º 7
0
    def find_best_box(self, sample, cell_h, cell_w):
        """
        Finds the bounding box with the highest IoU with respect to the 
        ground-truth bounding box.
        
        Parameters:
            sample (int): The index of the current sample from the batch.
            cell_h (int): Index of the cell coordinate.
            cell_w (int): Index of the cell coordinate.
            
        Returns:
            best_box (int): The index of the bounding box with the highest IoU.
        """

        # Transform the box coordinates into the corner format
        t_box_coords = MidtoCorner(self.targets[sample, cell_h, cell_w, 1:5],
                                   cell_h, cell_w, self.cell_dim)

        best_box = 0
        max_iou = 0.
        for box in range(self.num_boxes):
            # Transform the box coordinates into the corner format
            p_box_coords = MidtoCorner(
                self.predictions[sample, cell_h, cell_w,
                                 1 + box * 5:5 + box * 5], cell_h, cell_w,
                self.cell_dim)

            box_score = IoU(t_box_coords, p_box_coords)
            if box_score > max_iou:
                max_iou = box_score
                best_box = box  # Store the box index with the highest IoU

        return best_box
Exemplo n.º 8
0
def gen_pneg_data(img,
                  boxes_gd,
                  top_point,
                  neg_save_dir,
                  ng_id,
                  f2,
                  pneg_hold=5,
                  img_size=12,
                  neg_iou=0.3):
    n_idx = ng_id
    x1, y1, h, w, height, width, _, _ = top_point
    for i in range(pneg_hold):
        size = npr.randint(12, min(width, height) / 2)
        # delta_x and delta_y are offsets of (x1, y1)
        delta_x = npr.randint(max(-size, -x1), w)
        delta_y = npr.randint(max(-size, -y1), h)
        nx1 = int(max(0, x1 + delta_x))
        ny1 = int(max(0, y1 + delta_y))
        if nx1 + size > width - 1 or ny1 + size > height - 1:
            continue
        crop_box = np.array([nx1, ny1, nx1 + size, ny1 + size])
        Iou = IoU(crop_box, boxes_gd)
        cropped_im = img[ny1:ny1 + size, nx1:nx1 + size, :]
        resized_im = cv2.resize(cropped_im, (img_size, img_size),
                                interpolation=cv2.INTER_LINEAR)
        if np.max(Iou) < neg_iou:
            # Iou with all gts must below 0.3
            save_file = os.path.join(neg_save_dir, "%s.jpg" % n_idx)
            f2.write(save_file + ' 0\n')
            cv2.imwrite(save_file, resized_im)
            n_idx += 1
            #cnt_pos_neg +=1
    return n_idx
Exemplo n.º 9
0
def gen_pos_part_data(img,
                      boxes_gd,
                      top_point,
                      pos_save_dir,
                      part_save_dir,
                      p_id,
                      d_id,
                      f1,
                      f3,
                      pos_hold=20,
                      img_size=12,
                      piou=0.65,
                      paiou=0.4,
                      neg_iou=0.3):
    x1, y1, h, w, height, width, x2, y2 = top_point
    p_idx = p_id
    d_idx = d_id
    for i in range(pos_hold):
        # pos and part face size [minsize*0.8,maxsize*1.25]
        size = npr.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h)))
        # delta here is the offset of box center
        delta_x = npr.randint(-w * 0.2, w * 0.2)
        delta_y = npr.randint(-h * 0.2, h * 0.2)
        #show this way: nx1 = max(x1+w/2-size/2+delta_x)
        nx1 = int(max(x1 + w / 2 + delta_x - size / 2, 0))
        #show this way: ny1 = max(y1+h/2-size/2+delta_y)
        ny1 = int(max(y1 + h / 2 + delta_y - size / 2, 0))
        nx2 = nx1 + size
        ny2 = ny1 + size
        if nx2 > width - 1 or ny2 > height - 1:
            continue
        crop_box = np.array([nx1, ny1, nx2, ny2])
        #yu gt de offset
        offset_x1 = (x1 - nx1) / float(size)
        offset_y1 = (y1 - ny1) / float(size)
        offset_x2 = (x2 - nx2) / float(size)
        offset_y2 = (y2 - ny2) / float(size)
        #crop
        cropped_im = img[ny1:ny2, nx1:nx2, :]
        #resize
        resized_im = cv2.resize(cropped_im, (img_size, img_size),
                                interpolation=cv2.INTER_LINEAR)
        box_ = boxes_gd.reshape(1, -1)
        iou_idx = IoU(crop_box, box_)
        if iou_idx >= piou:
            save_file = os.path.join(pos_save_dir, "%s.jpg" % p_idx)
            f1.write(save_file + ' 1 %.2f %.2f %.2f %.2f\n' %
                     (offset_x1, offset_y1, offset_x2, offset_y2))
            cv2.imwrite(save_file, resized_im)
            p_idx += 1
        #elif IoU(crop_box, box_) >= paiou:
        elif iou_idx <= paiou and iou_idx > neg_iou:
            save_file = os.path.join(part_save_dir, "%s.jpg" % d_idx)
            f3.write(save_file + ' -1 %.2f %.2f %.2f %.2f\n' %
                     (offset_x1, offset_y1, offset_x2, offset_y2))
            cv2.imwrite(save_file, resized_im)
            d_idx += 1
    return p_idx, d_idx
Exemplo n.º 10
0
    def __getitem__(self, index):
        img_file, bbox, _, catgory_id = self.dataset.getRandom()

        img = cv2.imread(img_file, cv2.IMREAD_COLOR)
        bbox = to_percentage_coords(bbox, img.shape)
        img_z, bbox_z, _ = self.transform(img, bbox)

        bbox_xprev = jitter_transform(bbox)
        img_x, bbox_x, bbox_xprev = self.transform(img, bbox, bbox_xprev)

        H, W, _ = img_x.shape
        abs_bbox = to_absolute_coords(bbox_x, img_x.shape)
        w, h = abs_bbox[2:] - abs_bbox[:2]

        # Import a distractor (an instance from the same category)
        img_file2, bbox2, mask2, _ = self.dataset.getFromCategory(catgory_id)
        img2 = cv2.imread(img_file2, cv2.IMREAD_COLOR)
        bbox2 = bbox2.astype(int)
        w2, h2 = bbox2[2:] - bbox2[:2]

        cropped_img = img2[bbox2[1]:bbox2[3], bbox2[0]:bbox2[2]]
        cropped_mask = mask2[bbox2[1]:bbox2[3], bbox2[0]:bbox2[2]]

        # Scale the distractor image so that it has the same size as the first one.
        ratio = np.sqrt(w * h) / np.sqrt(w2 * h2)

        if np.isinf(ratio) or np.isnan(ratio):
            return self[np.random.randint(len(self))]

        w2, h2 = min(int(w2 * ratio), W - 1), min(int(h2 * ratio), H - 1)
        cropped_img = cv2.resize(cropped_img, (w2, h2))
        cropped_mask = cv2.resize(cropped_mask, (w2, h2)).astype(np.bool)

        # max trails (10)
        for _ in range(10):
            x = np.random.randint(W - w2)
            y = np.random.randint(H - h2)
            bbox2 = to_percentage_coords(np.array([x, y, x + w2, y + h2]), img_x.shape)

            # Avoid too difficult cases where the distractor completely occludes the main instance
            if IoU(bbox_x, bbox2) < 0.30:
                break

        img_x[y:y + h2, x:x + w2][cropped_mask] = cropped_img[cropped_mask]

        img_z = cv2.cvtColor(img_z, cv2.COLOR_BGR2RGB) / 255.
        img_z = torch.from_numpy(img_z).permute(2, 0, 1).float()
        img_x = cv2.cvtColor(img_x, cv2.COLOR_BGR2RGB) / 255.
        img_x = torch.from_numpy(img_x).permute(2, 0, 1).float()

        bbox_z, bbox_x = torch.from_numpy(bbox_z).float(), torch.from_numpy(bbox_x).float()
        bbox_xprev = torch.from_numpy(bbox_xprev).float()

        return img_z, img_x, bbox_z, bbox_x, bbox_xprev
Exemplo n.º 11
0
def main():
    path_test = '/home/dongnie/Desktop/myPyTorch/pytorch-SRResNet/res_1106'
    ids = [1,2,3,4,6,7,8,10,11,12,13,29]
#     ids = [1,2,29]
    for ind in ids:    
#         mr_test_itk = sitk.ReadImage(os.path.join(path_test,'denseCrf3dSegmMap_pelvic_sub%d.nii.gz'%ind))
        mr_test_itk = sitk.ReadImage(os.path.join(path_test,'preSub_wce_wdice_adImpo_viewExp_1106_sub%d.nii.gz'%ind))
        crfSeg = sitk.GetArrayFromImage(mr_test_itk)
        ct_test_itk = sitk.ReadImage(os.path.join(path_test,'gt_sub%d.nii.gz'%ind))
        gtSeg = sitk.GetArrayFromImage(ct_test_itk)
                
        diceBladder = dice(crfSeg,gtSeg,1)
        diceProstate = dice(crfSeg,gtSeg,2)
        diceRectumm = dice(crfSeg,gtSeg,3)
     
        iouBladder = IoU(crfSeg,gtSeg,1)
        iouProstate = IoU(crfSeg,gtSeg,2)
        iouRectum = IoU(crfSeg,gtSeg,3)
        print 'sub%d'%ind,' iou1 = ',iouBladder,' iou2= ',iouProstate,' iou3= ',iouRectum
     
        print 'sub%d'%ind, 'dice1 = ',diceBladder,' dice2= ',diceProstate,' dice3= ',diceRectumm 
        print '\n'   
Exemplo n.º 12
0
 def __init__(self, anchors, labels, class_preds):
     # anchors : [batch_size, total number of anchors, 4]
     # labels shape :
     # [batch_size, num_of_objects_per_image(-1 for no object), 5(class + bbox coords)]
     # class_preds : [batch_size, total number of anchors, num_classes + 1]
     super(LabelAnchors, self).__init__()
     self.iou_threshold = IoU_threshold
     self.iou = IoU.IoU()
     self.anchors = anchors
     self.labels = labels
     self.class_preds = class_preds
     self.batch_size = labels.shape[0]
     self.num_anchors = anchors.shape[1]
Exemplo n.º 13
0
    def forward(self, x):
        """
    @Args
      x: (Tensor) detection feature map, with size [bs, num_bboxes, [x,y,w,h,p_obj]+num_classes]
    @Returns
      detections: (Tensor) detection result with size [num_bboxes, [image_batch_idx, 4 offsets, p_obj, max_conf, cls_idx]]
    """
        bs, num_bboxes, num_attrs = x.size()
        detections = torch.Tensor().cuda()

        for idx in range(bs):
            pred = x[idx]

            try:
                non_zero_pred = pred[pred[:, 4] > self.conf_thresh]
                non_zero_pred[:, :4] = transform_coord(non_zero_pred[:, :4],
                                                       src='center',
                                                       dst='corner')
                max_score, max_idx = torch.max(non_zero_pred[:, 5:], 1)
                max_idx = max_idx.float().unsqueeze(1)
                max_score = max_score.float().unsqueeze(1)
                non_zero_pred = torch.cat(
                    (non_zero_pred[:, :5], max_score, max_idx), 1)
                # from IPython import embed
                # embed()
                non_zero_pred = non_zero_pred[
                    non_zero_pred[:, 5] > self.cls_thresh]
                classes = torch.unique(non_zero_pred[:, -1])
            except Exception as e:  # no object detected
                print("map error", e)
                continue

            for cls in classes:
                cls_pred = non_zero_pred[non_zero_pred[:, -1] == cls]
                conf_sort_idx = torch.sort(cls_pred[:, 4], descending=True)[1]
                cls_pred = cls_pred[conf_sort_idx]
                max_preds = []
                while cls_pred.size(0) > 0:
                    max_preds.append(cls_pred[0].unsqueeze(0))
                    ious = IoU(max_preds[-1], cls_pred)
                    cls_pred = cls_pred[ious < self.nms_thresh]

                if len(max_preds) > 0:
                    max_preds = torch.cat(max_preds).data
                    batch_idx = max_preds.new(max_preds.size(0), 1).fill_(idx)
                    seq = (batch_idx, max_preds)
                    detections = torch.cat(
                        seq, 1) if detections.size(0) == 0 else torch.cat(
                            (detections, torch.cat(seq, 1)))

        return detections
Exemplo n.º 14
0
    def create_pairwise_hypotheses(self, detections, scene_start, scene_end):
        # This simply creates pairwise hypotheses for all pairs
        # of detections that are at most dt apart, and between
        # which we can interpolate so that IoU of hypotheses
        # is > 0. This is obviously an overcomplete set.
        self.logger.info("Creating pairwise hypotheses...")

        pairwise_hypothesis = []
        if len(detections) == 0:
            return pairwise_hypothesis

        detections = sorted(detections, key=lambda detection: detection.time)
        detections_by_time = [[detections[0]]]
        for detection in detections[1:]:
            if detection.time == detections_by_time[-1][0].time:
                detections_by_time[-1].append(detection)
            else:
                detections_by_time.append([detection])

        for idx_now, detections_now in enumerate(detections_by_time):
            for detections_nxt in detections_by_time[idx_now + 1:]:
                if len(detections_now) == 0 or len(detections_nxt) == 0:
                    continue
                if detections_nxt[0].time - detections_now[0].time >\
                        self.pairwise_max_dt:
                    break
                for detection_now in detections_now:
                    for detection_nxt in detections_nxt:

                        tracklet = interpolate_tracklet(
                            detection_now, detection_nxt)

                        bboxes = np.vstack(
                            [detection.bbox for detection in tracklet])

                        ious = IoU(bboxes[:-1], bboxes[1:])
                        if np.min(ious) >= self.pairwise_min_iou:
                            tracklet_fin = [
                                None for when in range(scene_start, scene_end)
                            ]
                            for did, det in enumerate(tracklet):
                                tracklet_fin[det.time - scene_start] = det
                                if did > 0 and did < len(tracklet) - 1:
                                    det.confidence = \
                                        LabelStorage.instance.min_det_confidence
                            pairwise_hypothesis.append(
                                Hypothesis(tracklet=tracklet_fin))

        self.logger.info("Done: total %d", len(pairwise_hypothesis))
        return pairwise_hypothesis
Exemplo n.º 15
0
    def forward(self, x):
        """
        Args
          x: (Tensor) detection feature map, with size [bs, num_bboxes, 5 + nC]

        Returns
          detections: (Tensor) detection result with size [num_bboxes, [image_batch_idx, 4 offsets, p_obj, max_conf, cls_idx]]
        """
        bs, num_bboxes, num_attrs = x.size()
        detections = torch.Tensor().cuda()

        for idx in range(bs):
            pred = x[idx]

            try:
                non_zero_pred = pred[pred[:, 4] > self.conf_thresh]
                non_zero_pred[:, :4] = xywh2xyxy(non_zero_pred[:, :4])
                max_score, max_idx = torch.max(non_zero_pred[:, 5:], 1)
                max_idx = max_idx.float().unsqueeze(1)
                max_score = max_score.float().unsqueeze(1)
                non_zero_pred = torch.cat(
                    (non_zero_pred[:, :5], max_score, max_idx), 1)
                classes = torch.unique(non_zero_pred[:, -1])
            except Exception:  # no object detected
                continue

            for cls in classes:
                cls_pred = non_zero_pred[non_zero_pred[:, -1] == cls]
                conf_sort_idx = torch.sort(cls_pred[:, 5], descending=True)[1]
                cls_pred = cls_pred[conf_sort_idx]
                max_preds = []
                while cls_pred.size(0) > 0:
                    max_preds.append(cls_pred[0].unsqueeze(0))
                    ious = IoU(max_preds[-1], cls_pred)
                    cls_pred = cls_pred[ious < self.nms_thresh]

                if len(max_preds) > 0:
                    max_preds = torch.cat(max_preds).data
                    batch_idx = max_preds.new(max_preds.size(0), 1).fill_(idx)
                    seq = (batch_idx, max_preds)
                    detections = torch.cat(
                        seq, 1) if detections.size(0) == 0 else torch.cat(
                            (detections, torch.cat(seq, 1)))

        return detections
Exemplo n.º 16
0
    def gen_data(img_name, bboxes):
        img_path = os.path.join(wider_dir, 'images', img_name)
        img = cv2.imread(img_path)
        nboxes = np.array(bboxes, dtype=np.float32).reshape(-1, 4)

        # cropped box and bounding box
        for (cbox, size), bbox in gen_crop_boxes(img, nboxes, img_size):
            if cbox is None:
                continue
            iou = IoU(cbox, nboxes)
            dt, cropped_img, label = gen_img_label(img, cbox, size, bbox, iou,
                                                   None, False)
            if dt != '':
                store_gen_box(save_dir, img_size, dt, cropped_img, label,
                              files[dt], indices[dt])
                indices[dt] += 1
            else:
                indices['ignore'] += 1
        print("generate %s subimages for %s" % (str(indices), img_path))
Exemplo n.º 17
0
def _GenImgData(file_id):
    num_gt = np.random.randint(low=1, high=6)
    gt_boxes = _GenRandomBoxes(size=num_gt)
    gt_key = file_id
    gt_value = gt_boxes

    prediction = []
    for idx in range(num_gt):
        if np.random.random() < 0.2:
            continue
        fp = True if np.random.random()<0.2 else False
        pred_box = _GenPredictionBox(gt_boxes[idx], fp)
        iou = np.max(IoU(pred_box, gt_boxes))
        dc = (np.random.random()-0.5)/3
        confidence = iou + dc
        result = {'bbox':pred_box, 'confidence':confidence, 'file_id':file_id}
        prediction.append(result)
        
    return gt_key, gt_value, prediction, num_gt
Exemplo n.º 18
0
def gen_neg_data(img,
                 boxes_gd,
                 height,
                 width,
                 neg_save_dir,
                 ng_id,
                 f2,
                 neg_hold=50,
                 img_size=12,
                 neg_iou=0.3):
    neg_num = 0
    #1---->50
    n_idx = ng_id
    while neg_num < neg_hold:
        #neg_num's size [40,min(width, height) / 2],min_size:40
        size_h = npr.randint(40, min(width, height) / 2)
        size_w = np.int(size_h / 2)
        #top_left
        nx = npr.randint(0, width - size_w)
        ny = npr.randint(0, height - size_h)
        #random crop
        crop_box = np.array([nx, ny, nx + size_w, ny + size_h])
        #cal iou
        Iou = IoU(crop_box, boxes_gd)
        #Iou = IoU_self(crop_box, boxes_gd)

        cropped_im = img[ny:ny + size_h, nx:nx + size_w, :]
        resized_im = cv2.resize(cropped_im, (img_size, img_size),
                                interpolation=cv2.INTER_LINEAR)

        if np.max(Iou) < neg_iou:
            # Iou with all gts must below 0.3
            save_file = os.path.join(neg_save_dir, "%s.jpg" % n_idx)
            #f2.write("12/negative/%s.jpg"%n_idx + ' 0\n')
            f2.write(save_file + ' 0\n')
            cv2.imwrite(save_file, resized_im)
            n_idx += 1
            neg_num += 1
    return n_idx
Exemplo n.º 19
0
    boxes = np.array(bbox, dtype=np.float32).reshape(-1, 4)
    img = cv2.imread(os.path.join(im_dir, im_path + '.jpg'))
    idx += 1
    if idx % 100 == 0:
        print idx, "images done"

    height, width, channel = img.shape

    neg_num = 0
    while neg_num < 50:
        size = npr.randint(40, min(width, height) / 2)
        nx = npr.randint(0, width - size)
        ny = npr.randint(0, height - size)
        crop_box = np.array([nx, ny, nx + size, ny + size])

        Iou = IoU(crop_box, boxes)

        cropped_im = img[ny:ny + size, nx:nx + size, :]
        resized_im = cv2.resize(cropped_im, (48, 48),
                                interpolation=cv2.INTER_LINEAR)

        if np.max(Iou) < 0.3:
            # Iou with all gts must below 0.3
            save_file = os.path.join(neg_save_dir, "%s.jpg" % n_idx)
            f2.write("48/negative/%s" % n_idx + ' 0\n')
            cv2.imwrite(save_file, resized_im)
            n_idx += 1
            neg_num += 1
    for box in boxes:
        # box (x_left, y_top, x_right, y_bottom)
        x1, y1, x2, y2 = box
Exemplo n.º 20
0
def proces_pic(size, pic_path, anno_path):

    stdsize = size

    hasPts = True
    if size == 12:
        hasPts = False

    root_path = pic_path  #gen data path
    anno_file = anno_path  #raw label file
    #im_dir = "D:/facedetect-like/CelebA/Img/img_celeba.7z/train/"   #raw data path

    pos_save_dir = root_path + "/positive"
    part_save_dir = root_path + "/part"
    neg_save_dir = root_path + "/negative"

    def mkr(dr):
        if not os.path.exists(dr):
            os.mkdir(dr)

    mkr(root_path)
    mkr(pos_save_dir)
    mkr(part_save_dir)
    mkr(neg_save_dir)

    f1 = open(os.path.join(root_path, 'pos_' + str(stdsize) + '.txt'), 'w')
    f2 = open(os.path.join(root_path, 'neg_' + str(stdsize) + '.txt'), 'w')
    f3 = open(os.path.join(root_path, 'part_' + str(stdsize) + '.txt'), 'w')
    with open(anno_file, 'r') as f:
        annotations = f.readlines()
    num = len(annotations)
    print "%d pics in total" % num
    p_idx = 0  # positive
    n_idx = 0  # negative
    d_idx = 0  # dont care
    idx = 0
    box_idx = 0

    with_points = 14
    without_point = 4
    rect_point_num = 4
    key_point_num = 10

    for annotation in annotations:
        annotation = annotation.strip().split(' ')
        im_path = annotation[0]
        #bbox = map(float, annotation[1:5])
        annotation_1 = map(float, annotation[1:])
        boxes = np.array(annotation_1,
                         dtype=np.float32).reshape(-1, with_points)
        #print boxes
        bbox = np.zeros((boxes.shape[0], rect_point_num))  #n*4 dim
        pts = np.zeros((boxes.shape[0], key_point_num))  #n*10 dim

        for boxes_num in xrange(boxes.shape[0]):
            s_bbox = map(
                float,
                annotation[1 + boxes_num * with_points:5 +
                           boxes_num * with_points])  # single box has points
            bbox[boxes_num:] = np.array(s_bbox, dtype=np.float32).reshape(
                -1, rect_point_num)

            s_pts = map(
                float, annotation[5 + boxes_num * with_points:15 +
                                  boxes_num * with_points])
            pts[boxes_num:] = np.array(s_pts, dtype=np.float32).reshape(
                -1, key_point_num)

        #multi faces in single pic with points
        #boxes = np.array(bbox, dtype=np.float32).reshape(-1, 14)

        img = cv2.imread(im_path)
        idx += 1
        if idx % 100 == 0:
            print idx, "images done"

        height, width, channel = img.shape

        idx_of_pts = 0
        backupPts = pts[:]
        for box in boxes:

            neg_num = 0
            while neg_num < 30:
                baseNum = 40
                while baseNum >= min(width, height) / 2:
                    baseNum -= 5
                size = npr.randint(baseNum, min(width, height) / 2)
                #size = npr.randint(40, min(width, height) / 2)
                nx = npr.randint(0, width - size)
                ny = npr.randint(0, height - size)
                crop_box = np.array([nx, ny, nx + size, ny + size])

                Iou = IoU(crop_box, bbox)

                cropped_im = img[ny:ny + size, nx:nx + size, :]
                resized_im = cv2.resize(cropped_im, (stdsize, stdsize),
                                        interpolation=cv2.INTER_LINEAR)

                if np.max(Iou) < 0.3:
                    # Iou with all gts must below 0.3
                    save_file = os.path.join(neg_save_dir, "%s.jpg" % n_idx)
                    f2.write(neg_save_dir + "/%s" % n_idx + ' 0\n')
                    cv2.imwrite(save_file, resized_im)
                    n_idx += 1
                    neg_num += 1

            # box (x_left, y_top, x_right, y_bottom)
            x1, y1, x2, y2 = box[0:4]
            w = x2 - x1 + 1
            h = y2 - y1 + 1

            # ignore small faces
            # in case the ground truth boxes of small faces are not accurate
            if max(w, h) < 12 or x1 < 0 or y1 < 0:
                continue

            # generate positive examples and part faces
            # print "pts: ", backupPts[idx_of_pts]
            for i in range(20):
                pts0 = backupPts[idx_of_pts]
                size = npr.randint(int(min(w, h) * 0.8),
                                   np.ceil(1.25 * max(w, h)))

                # delta here is the offset of box center
                delta_x = npr.randint(-w * 0.2, w * 0.2)
                delta_y = npr.randint(-h * 0.2, h * 0.2)

                nx1 = max(x1 + w / 2 + delta_x - size / 2, 0)
                ny1 = max(y1 + h / 2 + delta_y - size / 2, 0)
                nx2 = nx1 + size
                ny2 = ny1 + size

                if nx2 > width or ny2 > height:
                    continue
                crop_box = np.array([nx1, ny1, nx2, ny2])

                offset_x1 = (x1 - nx1) / float(size)
                offset_y1 = (y1 - ny1) / float(size)
                offset_x2 = (x2 - nx1) / float(size)
                offset_y2 = (y2 - ny1) / float(size)

                if hasPts == True:
                    pts_res = np.zeros(len(pts0))
                    for k in range(len(pts0) / 2):
                        pts_res[k * 2] = (pts0[k * 2] - nx1) / float(size)
                        pts_res[k * 2 +
                                1] = (pts0[k * 2 + 1] - ny1) / float(size)

                cropped_im = img[int(ny1):int(ny2), int(nx1):int(nx2), :]
                resized_im = cv2.resize(cropped_im, (stdsize, stdsize),
                                        interpolation=cv2.INTER_LINEAR)

                box_ = box.reshape(1, -1)
                if IoU(crop_box, box_) >= 0.65:
                    save_file = os.path.join(pos_save_dir, "%s.jpg" % p_idx)
                    f1.write(pos_save_dir + "/%s" % p_idx + ' 1 %f %f %f %f' %
                             (offset_x1, offset_y1, offset_x2, offset_y2))

                    if hasPts == True:
                        for k in range(len(pts_res)):
                            f1.write(" %f" % pts_res[k])
                    f1.write("\n")
                    cv2.imwrite(save_file, resized_im)
                    p_idx += 1
                elif IoU(crop_box, box_) >= 0.4:
                    save_file = os.path.join(part_save_dir, "%s.jpg" % d_idx)
                    f3.write(part_save_dir + "/%s" % d_idx +
                             ' -1 %f %f %f %f' %
                             (offset_x1, offset_y1, offset_x2, offset_y2))

                    if hasPts == True:
                        for k in range(len(pts_res)):
                            f3.write(" %f" % pts_res[k])
                    f3.write("\n")
                    cv2.imwrite(save_file, resized_im)
                    d_idx += 1

            idx_of_pts += 1
            box_idx += 1
            print "%s images %s part done, pos: %s part: %s neg: %s" % (
                idx, idx_of_pts, p_idx, d_idx, n_idx)

    f1.close()
    f2.close()
    f3.close()
Exemplo n.º 21
0
def GenerateData(ftxt,data_path,net,argument=False):
    '''

    :param ftxt: name/path of the text file that contains image path,
                bounding box, and landmarks

    :param output: path of the output dir
    :param net: one of the net in the cascaded networks
    :param argument: apply augmentation or not
    :return:  images and related landmarks
    '''
    if net == "PNet":
        size = 12
    elif net == "RNet":
        size = 24
    elif net == "ONet":
        size = 48
    else:
        print('Net type error')
        return
    image_id = 0
    #
    f = open(join(OUTPUT,"landmark_%s_aug.txt" %(size)),'w')
    #dstdir = "train_landmark_few"
    # get image path , bounding box, and landmarks from file 'ftxt'
    data = getDataFromTxt(ftxt,data_path=data_path)
    idx = 0
    #image_path bbox landmark(5*2)
    for (imgPath, bbox, landmarkGt) in data:
        #print imgPath
        F_imgs = []
        F_landmarks = []
        print(imgPath)
        img = cv2.imread(imgPath)

        assert(img is not None)
        img_h,img_w,img_c = img.shape
        gt_box = np.array([bbox.left,bbox.top,bbox.right,bbox.bottom])
        #get sub-image from bbox
        f_face = img[bbox.top:bbox.bottom+1,bbox.left:bbox.right+1]
        # resize the gt image to specified size
        f_face = cv2.resize(f_face,(size,size))
        #initialize the landmark
        landmark = np.zeros((5, 2))

        #normalize land mark by dividing the width and height of the ground truth bounding box
        # landmakrGt is a list of tuples
        for index, one in enumerate(landmarkGt):
            # (( x - bbox.left)/ width of bounding box, (y - bbox.top)/ height of bounding box
            rv = ((one[0]-gt_box[0])/(gt_box[2]-gt_box[0]), (one[1]-gt_box[1])/(gt_box[3]-gt_box[1]))
            # put the normalized value into the new list landmark
            landmark[index] = rv
        
        F_imgs.append(f_face)
        F_landmarks.append(landmark.reshape(10))
        landmark = np.zeros((5, 2))        
        if argument:
            idx = idx + 1
            if idx % 100 == 0:
                print(idx, "images done")
            x1, y1, x2, y2 = gt_box
            #gt's width
            gt_w = x2 - x1 + 1
            #gt's height
            gt_h = y2 - y1 + 1        
            if max(gt_w, gt_h) < 40 or x1 < 0 or y1 < 0:
                continue
            #random shift
            for i in range(10):
                bbox_size = npr.randint(int(min(gt_w, gt_h) * 0.8), np.ceil(1.25 * max(gt_w, gt_h)))
                delta_x = npr.randint(-gt_w * 0.2, gt_w * 0.2)
                delta_y = npr.randint(-gt_h * 0.2, gt_h * 0.2)
                nx1 = int(max(x1+gt_w/2-bbox_size/2+delta_x,0))
                ny1 = int(max(y1+gt_h/2-bbox_size/2+delta_y,0))

                nx2 = nx1 + bbox_size
                ny2 = ny1 + bbox_size
                if nx2 > img_w or ny2 > img_h:
                    continue
                crop_box = np.array([nx1,ny1,nx2,ny2])


                cropped_im = img[ny1:ny2+1,nx1:nx2+1,:]
                resized_im = cv2.resize(cropped_im, (size, size))
                #cal iou
                iou = IoU(crop_box, np.expand_dims(gt_box,0))
                if iou > 0.65:
                    F_imgs.append(resized_im)
                    #normalize
                    for index, one in enumerate(landmarkGt):
                        rv = ((one[0]-nx1)/bbox_size, (one[1]-ny1)/bbox_size)
                        landmark[index] = rv
                    F_landmarks.append(landmark.reshape(10))
                    landmark = np.zeros((5, 2))
                    landmark_ = F_landmarks[-1].reshape(-1,2)
                    bbox = BBox([nx1,ny1,nx2,ny2])                    

                    #mirror                    
                    if random.choice([0,1]) > 0:
                        face_flipped, landmark_flipped = flip(resized_im, landmark_)
                        face_flipped = cv2.resize(face_flipped, (size, size))
                        #c*h*w
                        F_imgs.append(face_flipped)
                        F_landmarks.append(landmark_flipped.reshape(10))
                    #rotate
                    if random.choice([0,1]) > 0:
                        face_rotated_by_alpha, landmark_rotated = rotate(img, bbox, \
                                                                         bbox.reprojectLandmark(landmark_), 5)#逆时针旋转
                        #landmark_offset
                        landmark_rotated = bbox.projectLandmark(landmark_rotated)
                        face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (size, size))
                        F_imgs.append(face_rotated_by_alpha)
                        F_landmarks.append(landmark_rotated.reshape(10))
                
                        #flip
                        face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                        face_flipped = cv2.resize(face_flipped, (size, size))
                        F_imgs.append(face_flipped)
                        F_landmarks.append(landmark_flipped.reshape(10))                
                    
                    #anti-clockwise rotation
                    if random.choice([0,1]) > 0: 
                        face_rotated_by_alpha, landmark_rotated = rotate(img, bbox, \
                                                                         bbox.reprojectLandmark(landmark_), -5)#顺时针旋转
                        landmark_rotated = bbox.projectLandmark(landmark_rotated)
                        face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (size, size))
                        F_imgs.append(face_rotated_by_alpha)
                        F_landmarks.append(landmark_rotated.reshape(10))
                
                        face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                        face_flipped = cv2.resize(face_flipped, (size, size))
                        F_imgs.append(face_flipped)
                        F_landmarks.append(landmark_flipped.reshape(10)) 
                    
            F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)
            #print F_imgs.shape
            #print F_landmarks.shape
            for i in range(len(F_imgs)):
                #if image_id % 100 == 0:

                    #print('image id : ', image_id)

                if np.sum(np.where(F_landmarks[i] <= 0, 1, 0)) > 0:
                    continue

                if np.sum(np.where(F_landmarks[i] >= 1, 1, 0)) > 0:
                    continue

                cv2.imwrite(join(dstdir,"%d.jpg" %(image_id)), F_imgs[i])
                landmarks = map(str,list(F_landmarks[i]))
                f.write(join(dstdir,"%d.jpg" %(image_id))+" -2 "+" ".join(landmarks)+"\n")
                image_id = image_id + 1
            
    #print F_imgs.shape
    #print F_landmarks.shape
    #F_imgs = processImage(F_imgs)
    #shuffle_in_unison_scary(F_imgs, F_landmarks)
    
    f.close()
    return F_imgs,F_landmarks
def _process_one(annotation):
    p_idx = 0  # positive
    n_idx = 0  # negative
    d_idx = 0  # dont care
    pos_cls_list = []
    pos_roi_list = []
    neg_cls_list = []
    part_roi_list = []

    annotation = annotation.strip().split(' ')
    im_path = annotation[0]
    bbox = list(map(float, annotation[1:]))
    boxes = np.array(bbox, dtype=np.float32).reshape(-1, 4)
    img = cv2.imread(os.path.join(im_dir, im_path + '.jpg'))
    #idx += 1
    #if idx % 100 == 0:
    #    print (idx, "images done")

    height, width, channel = img.shape

    for box in boxes:
        # box (x_left, y_top, x_right, y_bottom)
        x1, y1, x2, y2 = box
        w = x2 - x1 + 1
        h = y2 - y1 + 1

        # ignore small faces
        # in case the ground truth boxes of small faces are not accurate
        if max(w, h) < 40 or x1 < 0 or y1 < 0:
            continue

        # generate positive examples and part faces
        for i in range(20):
            size = npr.randint(int(min(w, h) * 0.8), np.ceil(1.25 * max(w, h)))

            # delta here is the offset of box center
            delta_x = npr.randint(int(-w * 0.2), int(w * 0.2))
            delta_y = npr.randint(int(-h * 0.2), int(h * 0.2))

            nx1 = max(int(x1 + w / 2 + delta_x - size / 2), 0)
            ny1 = max(int(y1 + h / 2 + delta_y - size / 2), 0)
            nx2 = nx1 + size
            ny2 = ny1 + size

            if nx2 > width or ny2 > height:
                continue
            crop_box = np.array([nx1, ny1, nx2, ny2])

            offset_x1 = (x1 - nx1) / float(size)
            offset_y1 = (y1 - ny1) / float(size)
            offset_x2 = (x2 - nx2) / float(size)
            offset_y2 = (y2 - ny2) / float(size)

            cropped_im = img[ny1:ny2, nx1:nx2, :]
            resized_im = cv2.resize(cropped_im, (image_size, image_size),
                                    interpolation=cv2.INTER_LINEAR)

            box_ = box.reshape(1, -1)
            if IoU(crop_box, box_
                   ) >= 0.65:  # output both position vector and class label
                save_file = os.path.join(pos_save_dir, "%s.jpg" % p_idx)
                #f1.write(r"xx/positive/%s"%p_idx + ' 1 %.2f %.2f %.2f %.2f\n'%(offset_x1, offset_y1, offset_x2, offset_y2))

                im = resized_im
                h, w, ch = resized_im.shape
                if h != image_size or w != image_size:
                    im = cv2.resize(im, (image_size, image_size))
                im = np.swapaxes(im, 0, 2)
                im = (im - 127.5) / 127.5
                label = 1
                roi = [-1, -1, -1, -1]
                pts = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
                pos_cls_list.append([im, label, roi])

                roi = [
                    float(offset_x1),
                    float(offset_y1),
                    float(offset_x2),
                    float(offset_y2)
                ]
                pos_roi_list.append([im, label, roi])
                # success = cv2.imwrite(save_file, resized_im)
                # if not success:
                #     raise Exception('Not writing file!')
                p_idx += 1
            elif IoU(crop_box, box_) >= 0.4:  # output class label only
                save_file = os.path.join(part_save_dir, "%s.jpg" % d_idx)
                #f3.write(r"xx/part/%s"%d_idx + ' -1 %.2f %.2f %.2f %.2f\n'%(offset_x1, offset_y1, offset_x2, offset_y2))
                im = resized_im
                h, w, ch = resized_im.shape
                if h != image_size or w != image_size:
                    im = cv2.resize(im, (image_size, image_size))
                im = np.swapaxes(im, 0, 2)
                # im -= 127
                im = (im - 127.5) / 127.5  #  it is wrong in original code
                label = -1
                roi = [
                    float(offset_x1),
                    float(offset_y1),
                    float(offset_x2),
                    float(offset_y2)
                ]
                pts = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
                part_roi_list.append([im, label, roi])

                # success = cv2.imwrite(save_file, resized_im)
                # if not success:
                #     raise Exception('Not writing file!')
                d_idx += 1
        #box_idx += 1
        #print ("{:d} images done, positive: {:d} part: {:d} negative: {:d}".format(idx, p_idx, d_idx, n_idx))

    neg_num = 0
    while neg_num < 50:
        size = npr.randint(40, int(min(width, height) / 2))
        nx = npr.randint(0, width - size)
        ny = npr.randint(0, height - size)
        crop_box = np.array([nx, ny, nx + size, ny + size])

        Iou = IoU(crop_box, boxes)

        cropped_im = img[ny:ny + size, nx:nx + size, :]
        resized_im = cv2.resize(cropped_im, (image_size, image_size),
                                interpolation=cv2.INTER_LINEAR)

        if np.max(Iou) < 0.15:  # output class label only
            # Iou with all gts must below 0.3
            # save_file = os.path.join(neg_save_dir, "%s.jpg"%n_idx)
            #f2.write(r"xx/negative/%s"%n_idx + ' 0\n')
            im = resized_im
            h, w, ch = resized_im.shape
            if h != image_size or w != image_size:
                im = cv2.resize(im, (image_size, image_size))
            im = np.swapaxes(im, 0, 2)
            im = (im - 127.5) / 127.5
            label = 0
            roi = [-1, -1, -1, -1]
            pts = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
            neg_cls_list.append([im, label, roi])
            n_idx += 1
            neg_num += 1
            # success = cv2.imwrite(save_file, resized_im)
            # if not success:
            #     raise Exception('Not writing file!')
    return pos_cls_list, pos_roi_list, neg_cls_list, part_roi_list
Exemplo n.º 23
0
def gen_onet_data(data_dir, anno_file, p_model_path, r_model_path, prefix=''):
    '''

	:param data_dir: train dataset dir
	:param anno_file: annotation file
	:param pnet_model_file: pnet model file
	:param rnet_model_file: rnet model file
	:param prefix_path: origin image root dir
	:return:
	'''
    neg_save_dir = os.path.join(data_dir, '48_train/negative')
    pos_save_dir = os.path.join(data_dir, '48_train/positive')
    part_save_dir = os.path.join(data_dir, '48_train/part')

    neg_save_dir_val = os.path.join(data_dir, '48_val/negative')
    pos_save_dir_val = os.path.join(data_dir, '48_val/positive')
    part_save_dir_val = os.path.join(data_dir, '48_val/part')

    neg_save_dir_test = os.path.join(data_dir, '48_test/negative')
    pos_save_dir_test = os.path.join(data_dir, '48_test/positive')
    part_save_dir_test = os.path.join(data_dir, '48_test/part')

    per_train = 0.7
    per_val = 0.2
    per_test = 0.1

    image_size = 48

    for dir_path in [
            neg_save_dir, pos_save_dir, part_save_dir, neg_save_dir_val,
            pos_save_dir_val, part_save_dir_val, neg_save_dir_test,
            pos_save_dir_test, part_save_dir_test
    ]:
        if not os.path.exists(dir_path):
            os.mkdir(dir_path)

    post_save_file = os.path.join(config.ANNO_STORE_DIR,
                                  config.ONET_POSITIVE_ANNO_FILENAME)
    neg_save_file = os.path.join(config.ANNO_STORE_DIR,
                                 config.ONET_NEGATIVE_ANNO_FILENAME)
    part_save_file = os.path.join(config.ANNO_STORE_DIR,
                                  config.ONET_PART_ANNO_FILENAME)

    post_save_test_file = os.path.join(config.ANNO_STORE_DIR,
                                       config.ONET_POSITIVE_TEST_ANNO_FILENAME)
    neg_save_test_file = os.path.join(config.ANNO_STORE_DIR,
                                      config.ONET_NEGATIVE_TEST_ANNO_FILENAME)
    part_save_test_file = os.path.join(config.ANNO_STORE_DIR,
                                       config.ONET_PART_TEST_ANNO_FILENAME)

    post_save_val_file = os.path.join(config.ANNO_STORE_DIR,
                                      config.ONET_POSITIVE_VALID_ANNO_FILENAME)
    neg_save_val_file = os.path.join(config.ANNO_STORE_DIR,
                                     config.ONET_NEGATIVE_VALID_ANNO_FILENAME)
    part_save_val_file = os.path.join(config.ANNO_STORE_DIR,
                                      config.ONET_PART_VALID_ANNO_FILENAME)

    f1 = open(post_save_file, 'w')
    f2 = open(neg_save_file, 'w')
    f3 = open(part_save_file, 'w')

    f1_test = open(post_save_test_file, 'w')
    f2_test = open(neg_save_test_file, 'w')
    f3_test = open(part_save_test_file, 'w')

    f1_val = open(post_save_val_file, 'w')
    f2_val = open(neg_save_val_file, 'w')
    f3_val = open(part_save_val_file, 'w')

    with open(anno_file, 'r') as f:
        annotations = f.readlines()
        random.shuffle(annotations)

    num = len(annotations)

    pnet, rnet = creat_prnet(p_model_path, r_model_path, 'cuda: 1')
    prnetDetector = PRnetDetector(pnet=pnet, rnet=rnet, min_face_size=12)

    p_idx = 0
    n_idx = 0
    d_idx = 0
    image_done = 0

    all_boxes = list()
    for annotation in annotations[:10000]:
        try:
            annotation = annotation.strip().split(' ')
            path = os.path.join(prefix, annotation[0])
            bbox = list(map(float, annotation[1:])
                        )  # generate boxes randomly to get negtive images
            boxes = np.array(bbox, dtype=np.float32).reshape(-1, 4)
            per = random.randint(0, 10000)
            img = cv2.imread(path)

            b, boxes_align = prnetDetector.detect_face(img)
            if isinstance(boxes_align, tuple):
                continue
            if boxes_align is None:
                continue
            if boxes_align.shape[0] == 0:
                continue
            all_boxes.append(boxes_align)
            if image_done % 100 == 0:
                print("%d images done" % image_done)
            image_done += 1
            dets = convert_to_square(boxes_align)
            dets[:, 0:4] = np.round(dets[:, 0:4])
            for box in dets:
                x_left, y_top, x_right, y_bottom = box[0:4].astype(int)
                width = x_right - x_left + 1
                height = y_bottom - y_top + 1

                if width < 20 or x_left < 0 or y_top < 0 or x_right > img.shape[
                        1] - 1 or y_bottom > img.shape[0] - 1:
                    continue

                Iou = IoU(box, boxes)
                cropped_im = img[y_top:y_bottom + 1, x_left:x_right + 1, :]
                resized_im = cv2.resize(cropped_im, (image_size, image_size),
                                        interpolation=cv2.INTER_LINEAR)

                # save negative images and write label
                if np.max(Iou) < 0.3:
                    # Iou with all gts must below 0.3
                    if per < 1000:
                        save_file = os.path.join(neg_save_dir_test,
                                                 "%s.jpg" % n_idx)
                        f2_test.write(save_file + ' 0\n')
                        cv2.imwrite(save_file, resized_im)
                    elif per < 3000:
                        save_file = os.path.join(neg_save_dir_val,
                                                 "%s.jpg" % n_idx)
                        f2_val.write(save_file + ' 0\n')
                        cv2.imwrite(save_file, resized_im)
                    else:
                        save_file = os.path.join(neg_save_dir,
                                                 "%s.jpg" % n_idx)
                        f2.write(save_file + ' 0\n')
                        cv2.imwrite(save_file, resized_im)
                    n_idx += 1
                else:
                    # find gt_box with the highest iou
                    idx = np.argmax(Iou)
                    assigned_gt = boxes[idx]
                    x1, y1, x2, y2 = assigned_gt

                    # compute bbox reg label
                    offset_x1 = (x1 - x_left) / float(width)
                    offset_y1 = (y1 - y_top) / float(height)
                    offset_x2 = (x2 - x_right) / float(width)
                    offset_y2 = (y2 - y_bottom) / float(height)

                    # save positive and part-face images and write labels
                    if np.max(Iou) >= 0.65:
                        if per < 1000:
                            save_file = os.path.join(pos_save_dir_test,
                                                     "%s.jpg" % p_idx)
                            f1_test.write(
                                save_file + ' 1 %.2f %.2f %.2f %.2f\n' %
                                (offset_x1, offset_y1, offset_x2, offset_y2))
                            cv2.imwrite(save_file, resized_im)
                        elif per < 3000:
                            save_file = os.path.join(pos_save_dir_val,
                                                     "%s.jpg" % p_idx)
                            f1_val.write(
                                save_file + ' 1 %.2f %.2f %.2f %.2f\n' %
                                (offset_x1, offset_y1, offset_x2, offset_y2))
                            cv2.imwrite(save_file, resized_im)
                        else:
                            save_file = os.path.join(pos_save_dir,
                                                     "%s.jpg" % p_idx)
                            f1.write(
                                save_file + ' 1 %.2f %.2f %.2f %.2f\n' %
                                (offset_x1, offset_y1, offset_x2, offset_y2))
                            cv2.imwrite(save_file, resized_im)
                        p_idx += 1

                    elif np.max(Iou) >= 0.4:
                        if per < 1000:
                            save_file = os.path.join(part_save_dir_test,
                                                     "%s.jpg" % p_idx)
                            f3_test.write(
                                save_file + ' -1 %.2f %.2f %.2f %.2f\n' %
                                (offset_x1, offset_y1, offset_x2, offset_y2))
                            cv2.imwrite(save_file, resized_im)
                        elif per < 3000:
                            save_file = os.path.join(part_save_dir_val,
                                                     "%s.jpg" % p_idx)
                            f3_val.write(
                                save_file + ' -1 %.2f %.2f %.2f %.2f\n' %
                                (offset_x1, offset_y1, offset_x2, offset_y2))
                            cv2.imwrite(save_file, resized_im)
                        else:
                            save_file = os.path.join(part_save_dir,
                                                     "%s.jpg" % p_idx)
                            f3.write(
                                save_file + ' -1 %.2f %.2f %.2f %.2f\n' %
                                (offset_x1, offset_y1, offset_x2, offset_y2))
                            cv2.imwrite(save_file, resized_im)
                        d_idx += 1
        except RuntimeError as e:
            if 'out of memory' in str(e):
                print('| WARNING: ran out of memory')
                if hasattr(torch.cuda, 'empty_cache'):
                    torch.cuda.empty_cache()
            else:
                raise e

    f1.close()
    f2.close()
    f3.close()
    f1_val.close()
    f2_val.close()
    f3_val.close()
    f1_test.close()
    f2_test.close()
    f3_test.close()
Exemplo n.º 24
0
        continue

    assert (len(bbox) == 4), str(bbox.shape)
    assert (len(animoji) == 140), str(animoji.shape)
    assert (boxes_pred.shape[0] == landmarks_pred.shape[0]), str(
        boxes_pred.shape) + str(landmarks_pred.shape)
    assert (boxes_pred.shape[0] == animojis_pred.shape[0]), str(
        boxes_pred.shape) + str(animojis_pred.shape)
    assert (boxes_pred.shape[1] == 5), str(boxes_pred.shape)
    assert (landmarks_pred.shape[1:] == (5, 2)), str(landmarks_pred.shape)
    assert (animojis_pred.shape[1:] == (70, 2)), str(animojis_pred.shape)
    assert (len(boxes_pred.shape) == 2), str(boxes_pred.shape)
    assert (len(landmarks_pred.shape) == 3), str(landmarks_pred.shape)
    assert (len(animojis_pred.shape) == 3), str(animojis_pred.shape)

    iou, _ = IoU(bbox, boxes_pred)
    idx = np.argmax(iou)

    if iou[idx] < 0.4:
        error_detect_face.append(filename)
        continue

    box_pred = boxes_pred[idx][:-1]
    landmark_pred = landmarks_pred[idx]
    animoji_pred = animojis_pred[idx]
    animoji = animoji.reshape(70, 2)
    animoji = Animoji(animoji)
    animoji_pred = Animoji(animoji_pred)

    left_eye_point = animoji.left_eye
    right_eye_point = animoji.right_eye
Exemplo n.º 25
0
    else:
        face_up_down_label = -1


    neg_num = 0
    if DEBUG:
        negative_image_path = os.path.join(negative_image_dir, a_image_name)
        negative_image = a_image.copy()

    while neg_num < 60:
        size = npr.randint(40, min(width, height) / 2)
        nx = npr.randint(0, width - size)
        ny = npr.randint(0, height - size)
        crop_box = np.array([nx, ny, nx + size, ny + size])

        Iou = IoU(crop_box, bboxes)

        cropped_im = a_image[ny : ny + size, nx : nx + size, :].copy()
        resized_im = cv2.resize(cropped_im, (IMAGE_SIZE, IMAGE_SIZE), interpolation=cv2.INTER_LINEAR)

        if DEBUG:
            cv2.rectangle(negative_image, (nx, ny), (nx + size, ny + size), (0, 255, 0), 2)

        if np.max(Iou) < 0.3:
            # Iou with all gts must below 0.3
            save_file = os.path.join(neg_save_dir, "%s.jpg"%n_idx)
            f2.write("24/negative/%s.jpg"%n_idx + ' 0 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1\n')
            cv2.imwrite(save_file, resized_im)
            n_idx += 1
            neg_num += 1
Exemplo n.º 26
0
def save_hard_example(target, data, save_path):
    # load ground truth from annotation file
    # format of each line: image/path [x1,y1,x2,y2] for each gt_box in this image
    image_size = cfg.resize[target]
    im_idx_list = data['images']
    gt_boxes_list = data['bboxes']
    num_of_images = len(im_idx_list)

    neg_file = open(join(cfg.path_output_txt, '%s_neg.txt' % target), 'w')
    pos_file = open(join(cfg.path_output_txt, '%s_pos.txt' % target), 'w')
    part_file = open(join(cfg.path_output_txt, '%s_part.txt' % target), 'w')

    dirs = ['neg', 'part', 'pos']
    dirs = [join(cfg.path_output_files, '%s_%s' % (target, d)) for d in dirs]
    for d in dirs:
        if not os.path.exists(d): os.makedirs(d)

    det_boxes = pickle.load(open(save_path, 'rb'))

    assert len(
        det_boxes) == num_of_images, "incorrect detections or ground truths"

    # index of neg, pos and part face, used as their image names
    n_idx = 0
    p_idx = 0
    d_idx = 0
    # image_done = 0
    #im_idx_list image index(list)
    #det_boxes detect result(list)
    #gt_boxes_list gt(list)
    for im_idx, dets, gts in tqdm(zip(im_idx_list, det_boxes, gt_boxes_list),
                                  total=num_of_images):
        gts = np.array(gts, dtype=np.float32).reshape(-1, 4)

        if dets.shape[0] == 0:
            continue
        img = cv2.imread(im_idx)
        #change to square
        dets = convert_to_square(dets)
        dets[:, 0:4] = np.round(dets[:, 0:4])
        neg_num = 0
        for box in dets:
            x_left, y_top, x_right, y_bottom, _ = box.astype(int)
            width = x_right - x_left + 1
            height = y_bottom - y_top + 1

            # ignore box that is too small or beyond image border
            if width < 20 or x_left < 0 or y_top < 0 or x_right > img.shape[
                    1] - 1 or y_bottom > img.shape[0] - 1:
                continue

            # compute intersection over union(IoU) between current box and all gt boxes
            Iou = IoU(box, gts)
            cropped_im = img[y_top:y_bottom + 1, x_left:x_right + 1, :]
            resized_im = cv2.resize(cropped_im, (image_size, image_size),
                                    interpolation=cv2.INTER_LINEAR)

            # save negative images and write label
            # Iou with all gts must below 0.3

            if np.max(Iou) < 0.3 and neg_num < 60:
                content = '0\n'
                SaveTxt(neg_file, target, n_idx, content, 'neg')
                SaveImg(resized_im, target, n_idx, 'neg')

                n_idx += 1
                neg_num += 1
            else:
                # find gt_box with the highest iou
                idx = np.argmax(Iou)
                assigned_gt = gts[idx]
                x1, y1, x2, y2 = assigned_gt

                # compute bbox reg label
                offset_x1 = (x1 - x_left) / float(width)
                offset_y1 = (y1 - y_top) / float(height)
                offset_x2 = (x2 - x_right) / float(width)
                offset_y2 = (y2 - y_bottom) / float(height)

                # save positive and part-face images and write labels
                if np.max(Iou) >= 0.65:
                    content = '1 %.2f %.2f %.2f %.2f\n' % (
                        offset_x1, offset_y1, offset_x2, offset_y2)
                    SaveTxt(pos_file, target, p_idx, content, 'pos')
                    SaveImg(resized_im, target, p_idx, 'pos')
                    p_idx += 1

                elif np.max(Iou) >= 0.4:
                    content = '-1 %.2f %.2f %.2f %.2f\n' % (
                        offset_x1, offset_y1, offset_x2, offset_y2)
                    SaveTxt(part_file, target, d_idx, content, 'part')
                    SaveImg(resized_im, target, d_idx, 'part')
                    d_idx += 1
    neg_file.close()
    part_file.close()
    pos_file.close()
Exemplo n.º 27
0
        output = torch.sigmoid(model(x))

        optimizer.zero_grad()

        loss = mse_loss(output, y)

        loss.backward()
        optimizer.step()

        scheduler.step()

        sum_loss += loss.item()

        predict = output.cpu().detach().numpy().clip(min=0, max=1)
        y = y.cpu().detach().numpy()
        iou += IoU(predict, y)

    print(time.localtime().tm_hour,
          time.localtime().tm_min,
          time.localtime().tm_sec)
    print('Epoch : %d Train loss : %.5f Train metric : %.4f' %
          (i, sum_loss / (n + 1), iou / (n + 1)))

    model.eval()
    sum_loss = 0
    iou = 0

    with torch.no_grad():
        for n, (x, y) in enumerate(test_loader):

            if gpu:
Exemplo n.º 28
0
    if idx % 100 == 0:
        print idx, 'depth images is Done'

    height, width = depth_img.shape
    # Here might be a problem, if what read in is not a cv::Mat

    # Generating negative
    neg_num = 0
    while neg_num < 50:
        size = npr.randint(int(min(width, height) / 2), min(width, height))

        nx = npr.randint(0, width - size)
        ny = npr.randint(0, height - size)
        crop_box = np.array([nx, ny, nx + size, ny + size])

        Iou = IoU(crop_box, box)

        cropped_depth_im = depth_img[ny:ny + size, nx:nx + size]
        # NOTE: its img[y: y + h, x: x + w] and *not* img[x: x + w, y: y + h]
        resized_depth_im = cv2.resize(cropped_depth_im, (out_size, out_size),
                                      interpolation=cv2.INTER_LINEAR)

        if len(Iou) == 0:
            print("Something wrong and got no Iou in this pic: " + depth_path)
            continue
        if Iou[0] < 0.3:
            # print "Iou[0]: %d" % (Iou[0])
            save_path = os.path.join(neg_img_dir, '%d.npy' % n_id)
            f2.write("negative/%s.npy 0\n" % n_id)

            # fs = cv2.FileStorage(save_path, cv2.FileStorage_WRITE)
Exemplo n.º 29
0
def gen_data(anno_file, save_dir, img_dir):

    size = 48
    image_id = 0

    landmark_imgs_save_dir = os.path.join(save_dir + 'landmark')
    if not os.path.exists(landmark_imgs_save_dir):
        os.makedirs(landmark_imgs_save_dir)

    f1 = open(os.path.join(save_dir, 'landmark_48.txt'), 'w')

    with open(anno_file, 'r') as f:
        annotations = f.readlines()

    num = len(annotations)
    print("%d total images" % num)

    l_idx = 0
    idx = 0
    # image_path bbox landmark(5*2)
    for annotation in annotations:
        # print imgPath

        annotation = annotation.strip().split(' ')

        im_path = os.path.join(img_dir, annotation[0].replace("\\", "/"))

        gt_box = list(map(float, annotation[1:5]))
        # gt_box = [gt_box[0], gt_box[2], gt_box[1], gt_box[3]]

        gt_box = np.array(gt_box, dtype=np.int32)

        landmark = list(map(float, annotation[5:]))
        landmark = np.array(landmark, dtype=np.float)

        img = cv2.imread(im_path)

        height, width, channel = img.shape
        # crop_face = img[gt_box[1]:gt_box[3]+1, gt_box[0]:gt_box[2]+1]
        # crop_face = cv2.resize(crop_face,(size,size))

        idx = idx + 1
        if idx % 100 == 0:
            print("%d images done, landmark images: %d" % (idx, l_idx))
        # print(im_path)
        # print(gt_box)
        x1, x2, y1, y2 = gt_box
        gt_box[1] = y1
        gt_box[2] = x2
        # time.sleep(5)

        # gt's width
        w = x2 - x1 + 1
        # gt's height
        h = y2 - y1 + 1
        if max(w, h) < 40 or x1 < 0 or y1 < 0:
            continue
        # random shift
        for i in range(10):
            bbox_size = np.random.randint(int(min(w, h) * 0.8),
                                          np.ceil(1.25 * max(w, h)))
            delta_x = np.random.randint(-w * 0.2, w * 0.2)
            delta_y = np.random.randint(-h * 0.2, h * 0.2)
            nx1 = max(x1 + w / 2 - bbox_size / 2 + delta_x, 0)
            ny1 = max(y1 + h / 2 - bbox_size / 2 + delta_y, 0)

            nx2 = nx1 + bbox_size
            ny2 = ny1 + bbox_size
            if nx2 > width or ny2 > height:
                continue
            crop_box = np.array([nx1, ny1, nx2, ny2])
            cropped_im = img[int(ny1):int(ny2) + 1, int(nx1):int(nx2) + 1, :]
            resized_im = cv2.resize(cropped_im, (size, size),
                                    interpolation=cv2.INTER_LINEAR)

            offset_x1 = (x1 - nx1) / float(bbox_size)
            offset_y1 = (y1 - ny1) / float(bbox_size)
            offset_x2 = (x2 - nx2) / float(bbox_size)
            offset_y2 = (y2 - ny2) / float(bbox_size)

            offset_left_eye_x = (landmark[0] - nx1) / float(bbox_size)
            offset_left_eye_y = (landmark[1] - ny1) / float(bbox_size)

            offset_right_eye_x = (landmark[2] - nx1) / float(bbox_size)
            offset_right_eye_y = (landmark[3] - ny1) / float(bbox_size)

            offset_nose_x = (landmark[4] - nx1) / float(bbox_size)
            offset_nose_y = (landmark[5] - ny1) / float(bbox_size)

            offset_left_mouth_x = (landmark[6] - nx1) / float(bbox_size)
            offset_left_mouth_y = (landmark[7] - ny1) / float(bbox_size)

            offset_right_mouth_x = (landmark[8] - nx1) / float(bbox_size)
            offset_right_mouth_y = (landmark[9] - ny1) / float(bbox_size)

            # cal iou
            iou = IoU(crop_box.astype(np.float),
                      np.expand_dims(gt_box.astype(np.float), 0))
            # print(iou)
            if iou > 0.65:
                save_file = os.path.join(landmark_imgs_save_dir,
                                         "%s.jpg" % l_idx)
                cv2.imwrite(save_file, resized_im)

                f1.write( "landmark/%s.jpg" % l_idx + ' -2 %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f %.2f \n' % \
                (offset_x1, offset_y1, offset_x2, offset_y2,offset_left_eye_x,offset_left_eye_y,offset_right_eye_x,offset_right_eye_y,offset_nose_x,offset_nose_y,offset_left_mouth_x,offset_left_mouth_y,offset_right_mouth_x,offset_right_mouth_y))
                # print(save_file)
                # print(save_landmark_anno)
                l_idx += 1
    f1.close()
Exemplo n.º 30
0
def GetLandmark(target):
    assert target in ['pnet', 'rnet', 'onet']
    resize = cfg.resize[target]
    landmark_save_dir = join(cfg.path_output_files, target + '_landmark')
    if not exists(landmark_save_dir): os.mkdir(landmark_save_dir)

    
    image_id = 0
    file = open(join(cfg.path_output_txt, target + '_landmark.txt'), 'w')
    data = ReadLandmarkData(cfg.path_landmark_labels, cfg.path_landmark_imgs)

    
    if cfg.debug:
        if len(data)>200: data = data[:200]
    
    #image_path bbox landmark(5*2)
    for (imgPath, bbox, landmarkGt) in tqdm(data):
        #print imgPath
        F_imgs = []
        F_landmarks = []  
        img = cv2.imread(imgPath)
        assert(img is not None)
        img_h,img_w,img_c = img.shape
        gt_box = np.array([bbox.left,bbox.top,bbox.right,bbox.bottom])
        f_face = img[bbox.top:bbox.bottom+1,bbox.left:bbox.right+1]
        f_face = cv2.resize(f_face,(resize,resize))
        landmark = np.zeros((5, 2))
        #normalize
        for index, one in enumerate(landmarkGt):
            rv = ((one[0]-gt_box[0])/(gt_box[2]-gt_box[0]), (one[1]-gt_box[1])/(gt_box[3]-gt_box[1]))
            landmark[index] = rv
        
        F_imgs.append(f_face)
        F_landmarks.append(landmark.reshape(10))
        landmark = np.zeros((5, 2))


        x1, y1, x2, y2 = gt_box
        #gt's width
        gt_w = x2 - x1 + 1
        #gt's height
        gt_h = y2 - y1 + 1        
        if max(gt_w, gt_h) < 40 or x1 < 0 or y1 < 0: continue
        #random shift
        for i in range(10):
            bbox_size = npr.randint(int(min(gt_w, gt_h) * 0.8), np.ceil(1.25 * max(gt_w, gt_h)))
            delta_x = npr.randint(-gt_w * 0.2, gt_w * 0.2)
            delta_y = npr.randint(-gt_h * 0.2, gt_h * 0.2)
            nx1 = int(max(x1+gt_w/2-bbox_size/2+delta_x,0))
            ny1 = int(max(y1+gt_h/2-bbox_size/2+delta_y,0))
            
            nx2 = nx1 + bbox_size
            ny2 = ny1 + bbox_size
            if nx2 > img_w or ny2 > img_h: continue
            crop_box = np.array([nx1,ny1,nx2,ny2])
            cropped_im = img[ny1:ny2+1,nx1:nx2+1,:]
            resized_im = cv2.resize(cropped_im, (resize, resize))
            #cal iou
            iou = IoU(crop_box, np.expand_dims(gt_box,0))
            if iou < 0.65: continue

            F_imgs.append(resized_im)
            #normalize
            for index, one in enumerate(landmarkGt):
                rv = ((one[0]-nx1)/bbox_size, (one[1]-ny1)/bbox_size)
                landmark[index] = rv

            F_landmarks.append(landmark.reshape(10))
            landmark = np.zeros((5, 2))
            landmark_ = F_landmarks[-1].reshape(-1,2)
            bbox = BBox([nx1,ny1,nx2,ny2])                    

            #mirror                    
            if random.choice([0,1]) > 0:
                face_flipped, landmark_flipped = flip(resized_im, landmark_)
                face_flipped = cv2.resize(face_flipped, (resize, resize))
                #c*h*w
                F_imgs.append(face_flipped)
                F_landmarks.append(landmark_flipped.reshape(10))
            #rotate
            if random.choice([0,1]) > 0:
                face_rotated_by_alpha, landmark_rotated = rotate(img, bbox, \
                                                                 bbox.reprojectLandmark(landmark_), 5)#逆时针旋转
                #landmark_offset
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (resize, resize))
                F_imgs.append(face_rotated_by_alpha)
                F_landmarks.append(landmark_rotated.reshape(10))
        
                #flip
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (resize, resize))
                F_imgs.append(face_flipped)
                F_landmarks.append(landmark_flipped.reshape(10))                
            
            #inverse clockwise rotation
            if random.choice([0,1]) > 0: 
                face_rotated_by_alpha, landmark_rotated = rotate(img, bbox, \
                                                                 bbox.reprojectLandmark(landmark_), -5)#顺时针旋转
                landmark_rotated = bbox.projectLandmark(landmark_rotated)
                face_rotated_by_alpha = cv2.resize(face_rotated_by_alpha, (resize, resize))
                F_imgs.append(face_rotated_by_alpha)
                F_landmarks.append(landmark_rotated.reshape(10))
        
                face_flipped, landmark_flipped = flip(face_rotated_by_alpha, landmark_rotated)
                face_flipped = cv2.resize(face_flipped, (resize, resize))
                F_imgs.append(face_flipped)
                F_landmarks.append(landmark_flipped.reshape(10)) 
                
        F_imgs, F_landmarks = np.asarray(F_imgs), np.asarray(F_landmarks)

        for i in range(len(F_imgs)):

            if np.sum(F_landmarks[i] <= 0) > 0: continue

            if np.sum(F_landmarks[i] >= 1) > 0: continue

            cv2.imwrite(join(landmark_save_dir,'%d.jpg' %(image_id)), F_imgs[i])
            landmarks = map(str,list(F_landmarks[i]))
            name = join('%s_landmark'%target, '%d.jpg'%image_id)
            file.write(name +" -2 "+" ".join(landmarks)+"\n")
            image_id = image_id + 1
        
    file.close()