Пример #1
0
    def processForHuman(self):
        n = 0
        for imgs, labels, bboxes in zip(self.image_group_list, self.label_group_list, self.bbox_group_list):
            core_index = 0
            core_image, core_label, core_bbox= imgs[core_index], labels[core_index], bboxes[core_index]
            if core_bbox[2] - core_bbox[0] > core_image.shape[1] *0.5:
                continue
            flag = True
            left = core_bbox[0]
            right = core_image.shape[1] - core_bbox[2]
            right_flag = False
            if right > left:
                right_flag = True

            if len(imgs) == 3:
                core_image, core_label = self._processWithThreePerson(imgs, labels, bboxes, core_image, core_label, core_bbox)
            else:
                core_image, core_label = self._processWithTwoPerson(imgs, labels, bboxes, core_image, core_label, core_bbox, right_flag)
            
            if core_image is None:
                continue
            self.generated_image_list.append(core_image)
            self.generated_label_list.append(core_label)
            cv2.imwrite('{}/{}.jpg'.format(self.config.output_image_dir, n+1), core_image)
            cv2.imwrite('{}/{}.png'.format(self.config.output_label_dir, n+1), core_label)
            n += 1
            if n % 10 == 0:
                LOG.logI('Generate {} images...'.format(n))
Пример #2
0
    def _filterImageForClothes(self):
        self.image_list = []
        self.label_list = []
        self.human_mask_list = []

        for i, (img_path, label_path, human_mask_path) in enumerate(
                zip(self.image_path_list, self.label_path_list,
                    self.human_mask_path_list)):

            if i % 100 == 0 and i != 0:
                LOG.logI('Filter {} images...'.format(i))

            img = cv2.imread(img_path)
            label = cv2.imread(label_path)
            human_mask = cv2.imread(human_mask_path)
            clo_bbox = self._clothesMask2Box(
                cv2.cvtColor(label, cv2.COLOR_BGR2GRAY))
            person_bbox = self._personMask2Box(
                cv2.cvtColor(human_mask, cv2.COLOR_BGR2GRAY))
            if person_bbox is None:
                continue
            src_bbox = [
                min(clo_bbox[0], person_bbox[0]),
                min(clo_bbox[1], person_bbox[1]),
                max(clo_bbox[2], person_bbox[2]),
                max(clo_bbox[3], person_bbox[3])
            ]
            if src_bbox[2] - src_bbox[0] > 0.5 * img.shape[1]:
                continue
            self.human_bbox_list.append(src_bbox)
            self.human_mask_list.append(human_mask)
            self.bbox_list.append(clo_bbox)
            self.image_list.append(img)
            self.label_list.append(label)
Пример #3
0
 def colorFillImg(self, img):
     h, w = img.shape[:2]
     if h < self.min_block_size or w < self.min_block_size:
         mode = random.choice(self.mode_list)
         if mode == "origin":
             return img
         elif mode == "pure":
             bg_color = random.choice(self.fillcolors)
             r_channel = np.ones(img.shape[:2], dtype=np.uint8) * bg_color[0]
             g_channel = np.ones(img.shape[:2], dtype=np.uint8) * bg_color[1]
             b_channel = np.ones(img.shape[:2], dtype=np.uint8) * bg_color[2]
             pure = cv2.merge((b_channel, g_channel, r_channel))
             return pure
         elif mode == "image":
             filled_img = cv2.imread(os.path.join(self.images_dir, self.images[np.random.randint(0, self.images_num)]))
             return cv2.resize(filled_img, (img.shape[1], img.shape[0]))
         else:
             LOG.logE("colorFillImg mode error: {} not in ['origin', 'pure', 'image']".format(mode))
     max_side = max(h, w)
     borderline = np.random.randint(int(max_side/self.split_ratio), int(2*max_side/self.split_ratio))
     if h > w:
         img_top = self.colorFillImg(img[0:borderline, 0:w])
         img_bottom = self.colorFillImg(img[borderline:h, 0:w])
         img_res = np.vstack([img_top, img_bottom])
     else:
         img_left = self.colorFillImg(img[0:h, 0:borderline])
         img_right = self.colorFillImg(img[0:h, borderline:w])
         img_res = np.hstack([img_left, img_right])
     return img_res
Пример #4
0
    def forward(self, imgs):
        img, label, _, fn = imgs
        sample = self.composer(img).unsqueeze(0).to(self.device)
        with torch.no_grad():
            pred_fusion, _, _ = self.net(sample)
            pred_resize = cv2.resize(
                pred_fusion.squeeze().cpu().numpy().argmax(0),
                (label.shape[1], label.shape[0]),
                interpolation=cv2.INTER_NEAREST)
        cls_iou = 0
        for cls_idx in range(1, self.cls_num):
            mask_label = label == cls_idx
            if np.sum(mask_label) == 0:
                continue
            mask_pred = pred_resize == cls_idx
            tmp = np.sum(mask_label & mask_pred) / np.sum(mask_label
                                                          | mask_pred)
            cls_iou += tmp
        try:
            mean_iou = cls_iou / (len(np.unique(label)) - 1)
        except Exception as e:
            LOG.logE(
                "{} has no annotation yet. Please remove it from dataset.")
            mean_iou = 0

        if mean_iou < self.min_iou:
            LOG.logI("Image {} has risk on rule {} with cls_iou = {}".format(
                fn, self.name(), mean_iou))
            self.write(img, label, fn)
        return imgs
Пример #5
0
 def __call__(self):
     for i in range(self.total_num):
         self.buildScene(i)
         self.buildTextWithScene(i)
         self.dumpTextImg(i)
         if i % 5000 == 0:
             LOG.logI('{}/{}'.format(i, self.total_num))
Пример #6
0
    def doTest(self):
        input_image_dir = self.config.input_image_dir
        input_hat_image_dir = self.config.input_hat_image_dir
        input_hat_mask_dir = self.config.input_hat_mask_dir
        output_image_dir = self.config.output_image_dir
        output_anno_dir = self.config.output_anno_dir

        if not os.path.exists(self.config.output_image_dir):
            os.makedirs(self.config.output_image_dir)
        if not os.path.exists(self.config.output_anno_dir):
            os.makedirs(self.config.output_anno_dir)
        
        imgs = os.listdir(input_image_dir)
        hats = os.listdir(input_hat_image_dir)
    
        for i, img in enumerate(imgs):
            LOG.logI('num: {} ---- path: {}'.format(i, os.path.join(input_image_dir, img)))
        
            img_raw = cv2.imread('{}/{}'.format(input_image_dir, img))
        
            name = hats[random.randint(0, len(hats)-1)]

            rgb_hat = cv2.imread('{}/{}'.format(input_hat_image_dir, name))
            a = cv2.imread('{}/{}.png'.format(input_hat_mask_dir, os.path.splitext(name)[0]))
        
            new_img, mask_label = self._synthesisData(img_raw, rgb_hat, a)
        
            if new_img is None:
                continue
        
            cv2.imwrite("{}/{}".format(output_image_dir, img),new_img)
            cv2.imwrite("{}/{}.png".format(output_anno_dir, os.path.splitext(img)[0]),mask_label)
        self.config.sample = torch.rand((1, 3, 112, 112))
Пример #7
0
    def dumpTextImg(self,i):
        box_num = 0
        txt_name = os.path.join(self.output_dir, '{}_{}.txt'.format(self.dump_prefix,str(i).zfill(6)))
        fw = open(txt_name, 'w')
        for idx, fg_image in enumerate(self.fg_images):
            box = self.text_boxes[idx]
            if box is None:
                continue
            self.pil_img.paste(fg_image, (box[0], box[1]), fg_image.split()[3])

            coord = self.text_coordinates_in_fg[idx] + [box[0], box[1]]

            s = ""
            for pt in coord:
                x, y = pt
                s = s+str(x)+','+str(y)+','
            s += '0'
            fw.write(s+'\n')
            box_num += 1

        fw.close()

        pasted_img = cv2.cvtColor(np.asarray(self.pil_img),cv2.COLOR_RGB2BGR)
        image_name = '{}_{}.jpg'.format(self.dump_prefix,str(i).zfill(6))
        self.dumpImgToPath(image_name, pasted_img)

        if box_num<=0:
            LOG.logW("Image {} paste no text.".format(i))
Пример #8
0
    def _accumulateMeanStd(self, image_path, label_path):
        img_file = os.path.join(self.sample_path_prefix, image_path.strip())
        label_file = os.path.join(self.sample_path_prefix, label_path.strip())
        label_img = self._buildLabelFromPath(label_file)
        unique_values = np.unique(label_img)

        max_val = max(unique_values)
        min_val = min(unique_values)

        self.max_val_al = max(max_val, self.max_val_al)
        self.min_val_al = min(min_val, self.min_val_al)

        hist = np.histogram(label_img, self.classes)
        self.global_hist += hist[0]

        rgb_img = self._buildSampleFromPath(img_file)
        self.mean[0] += np.mean(rgb_img[:, :, 0])
        self.mean[1] += np.mean(rgb_img[:, :, 1])
        self.mean[2] += np.mean(rgb_img[:, :, 2])

        self.std[0] += np.std(rgb_img[:, :, 0])
        self.std[1] += np.std(rgb_img[:, :, 1])
        self.std[2] += np.std(rgb_img[:, :, 2])

        if max_val > (self.classes - 1) or min_val < 0:
            LOG.logE(
                'Some problem with labels. Please check image file: {}. Labels can take value between 0 and number of classes {}.'
                .format(label_file, self.classes - 1),
                exit=True)
Пример #9
0
 def postEpoch(self):
     if self.is_train:
         return
     self.pr_curve = dataset_pr_info(1000, self.pr_curve, self.face_count)
     propose = self.pr_curve[:, 0]
     recall = self.pr_curve[:, 1]
     self.accuracy = voc_ap(recall, propose)
     LOG.logI('Test accuray: {:.4f}'.format(self.accuracy))
Пример #10
0
 def __init__(self, deepvac_config):
     super(DeepvacPseTest, self).__init__(deepvac_config)
     if len(sys.argv) != 1:
         assert len(sys.argv) == 2, 'You can only pass a image path !'
         LOG.logI('Find image: {}'.format(sys.argv[1]))
         self.conf.test.use_fileline = False
         self.conf.test.image_path = sys.argv[1]
     self.initTestLoader()
Пример #11
0
    def __call__(self, image):
        self._pre_process(image)

        tic = time.time()
        preds = self.config.net(self.input_tensor)
        end = time.time() - tic
        LOG.logI('net forward time: {:.4f}'.format(time.time() - tic))

        return self._post_process(preds)
Пример #12
0
    def doTest(self):
        for idx, (org_img, img) in enumerate(self.config.test_loader):
            LOG.logI('progress: %d / %d' %
                     (idx + 1, len(self.config.test_loader)))
            org_img = org_img.numpy().astype('uint8')[0]

            img = img.to(self.config.device)
            start_time = time.time()
            outputs = self.config.net(img)
            print(time.time() - start_time)

            score = torch.sigmoid(outputs[:, 0, :, :])
            outputs = (torch.sign(outputs - self.config.binary_th) + 1) / 2

            text = outputs[:, 0, :, :]
            kernels = outputs[:, 0:self.config.kernel_num, :, :] * text

            score = score.data.cpu().numpy()[0].astype(np.float32)
            text = text.data.cpu().numpy()[0].astype(np.uint8)
            kernels = kernels.data.cpu().numpy()[0].astype(np.uint8)

            # c++ version pse
            pred = pse(
                kernels, self.config.min_kernel_area /
                (self.config.scale * self.config.scale))

            scale = (org_img.shape[1] * 1.0 / pred.shape[1],
                     org_img.shape[0] * 1.0 / pred.shape[0])
            label = pred
            label_num = np.max(label) + 1
            bboxes = []
            for i in range(1, label_num):
                points = np.array(np.where(label == i)).transpose(
                    (1, 0))[:, ::-1]

                if points.shape[0] < self.conf.test.min_area / (
                        self.config.scale * self.config.scale):
                    continue

                score_i = np.mean(score[label == i])
                if score_i < self.config.min_score:
                    continue

                rect = cv2.minAreaRect(points)
                crop_box = cv2.boxPoints(rect)
                crop_box *= scale
                crop_box[:, 0] = np.clip(crop_box[:, 0], 0, org_img.shape[1])
                crop_box[:, 1] = np.clip(crop_box[:, 1], 0, org_img.shape[0])
                x_max, y_max = np.max(crop_box, axis=0)
                x_min, y_min = np.min(crop_box, axis=0)
                org_img = cv2.rectangle(org_img, (x_min, y_min),
                                        (x_max, y_max), (255, 0, 0), 2)
            cv2.imwrite(
                os.path.join(self.config.output_dir,
                             str(idx).zfill(3) + '.jpg'), org_img)
        self.config.sample = img
Пример #13
0
    def __call__(self):
        if os.path.isfile(self.cached_data_file):
            return pickle.load(open(self.cached_data_file, "rb"))

        data = self.processData()
        if data is None:
            LOG.logE('Error while pickling data. Please check.', exit=True)
        LOG.logI('Process train dataset finished.')
        LOG.logI('Your train dataset mean: {}'.format(data['mean']))
        LOG.logI('Your train dataset std: {}'.format(data['std']))
        LOG.logI('Your train dataset classWeights: {}'.format(
            data['classWeights']))
        return data
Пример #14
0
 def earlyIter(self):
     start = time.time()
     self.sample = self.sample.to(self.device)
     self.target = [anno.to(self.device) for anno in self.target]
     if not self.is_train:
         return
     self.data_cpu2gpu_time.update(time.time() - start)
     try:
         self.addGraph(self.sample)
     except:
         LOG.logW(
             "Tensorboard addGraph failed. You network foward may have more than one parameters?"
         )
         LOG.logW("Seems you need reimplement preIter function.")
Пример #15
0
 def __call__(self):
     start_time = time.time()
     for i in range(self.total_num):
         # prepare for background image
         self.buildScene(i)
         # prepare for front transparent images (including define s, font, font_size, generate transparent images)
         self.buildTextWithScene(i)
         # prepare background image coordinate to paste transparent images
         self.buildPasteArea(i)
         # paste transparent images to background image and save results
         self.dumpTextImg(i)
         if i%5000==0:
             LOG.logI('{}/{}'.format(i,self.total_num))
     LOG.logI("Total time: {}".format(time.time() - start_time))
Пример #16
0
    def doLoss(self):
        if not self.config.train_loader.is_last_loader:
            return

        loss1, loss2 = self.config.criterion(
            self.config.output[0],
            self.config.target), self.config.criterion(self.config.output[1],
                                                       self.config.target)
        loss3, loss4 = self.config.criterion(
            self.config.teacher.output[0],
            self.config.target), self.config.criterion(
                self.config.teacher.output[1], self.config.target)
        self.config.loss = loss1 + loss2
        self.config.teacher.loss = loss3 + loss4
        LOG.logI('loss1: {}, loss2: {}, loss3: {}, loss4: {}'.format(
            loss1, loss2, loss3, loss4))
Пример #17
0
    def _readFile(self):
        self.global_hist = np.zeros(self.classes, dtype=np.float32)

        no_files = 0
        self.min_val_al = 0
        self.max_val_al = 0

        for image_path, label_path in self.samples:
            if no_files % 100 == 0:
                LOG.logI('accumulateMeanStd: {}'.format(no_files))
            self._accumulateMeanStd(image_path, label_path)
            no_files += 1

        self.mean /= no_files
        self.std /= no_files

        self._compute_class_weights(self.global_hist)
Пример #18
0
    def postEpoch(self):
        if not self.config.train_loader.is_last_loader:
            return
        average_epoch_loss = sum(self.config.epoch_loss) / len(
            self.config.epoch_loss)

        if self.config.phase == 'TRAIN':
            overall_acc, per_class_acc, per_class_iu, mIOU = self.iou_eval_train.getMetric(
            )
        else:
            overall_acc, per_class_acc, per_class_iu, mIOU = self.iou_eval_val.getMetric(
            )
            self.config.acc = mIOU
        LOG.logI("Epoch : {} Details".format(self.config.epoch))
        LOG.logI("\nEpoch No.: %d\t%s Loss = %.4f\t %s mIOU = %.4f\t" %
                 (self.config.epoch, self.config.phase, average_epoch_loss,
                  self.config.phase, mIOU))
Пример #19
0
    def _filterImageForHuman(self):      
        self.image_list = []
        self.label_list = []

        for i, (img_path, label_path) in enumerate(zip(self.image_path_list, self.label_path_list)):
            if i % 100 == 0 and i != 0:
                LOG.logI('Filter {} images...'.format(i))

            img = cv2.imread(img_path)
            label = cv2.imread(label_path)

            person_bbox = self._personMask2Box(cv2.cvtColor(label, cv2.COLOR_BGR2GRAY))
            if person_bbox is None:
                continue
            if person_bbox[2] - person_bbox[0] > 0.5 * img.shape[1]:
                continue
            self.bbox_list.append(person_bbox)
            self.image_list.append(img)
            self.label_list.append(label)
Пример #20
0
 def _getImageAndLabelPathList(self):
     files = os.listdir(self.config.input_image_dir)
     for i, f in enumerate(files):
         if os.path.isdir(os.path.join(self.config.input_image_dir, f)):
             LOG.logE('{} is a dir, {} contain a sub dir, you must get rid of it.'.format(os.path.join(self.config.input_image_dir, f), self.config.input_image_dir), exit=True)
         if os.path.isdir(os.path.join(self.config.input_label_dir, f.replace('jpg', 'png'))):
             LOG.logE('{} is a dir, {} contain a sub dir, you must get rid of it.'.format(os.path.join(self.config.input_label_dir, f.replace('jpg', 'png')), self.config.input_label_dir), exit=True)
         if self.is_clothes_task and os.path.isdir(os.path.join(self.config.portrait_mask_output_dir, f.replace('jpg', 'png'))):
             LOG.logE('{} is a dir, {} contain a sub dir, you must get rid of it.'.format(os.path.join(self.config.portrait_mask_output_dir, f.replace('jpg', 'png')), self.config.portrait_mask_output_dir), exit=True)
         self.image_path_list.append(os.path.join(self.config.input_image_dir, f))
         self.label_path_list.append(os.path.join(self.config.input_label_dir, f.replace('jpg', 'png')))
         if self.is_clothes_task:
             self.human_mask_path_list.append(os.path.join(self.config.portrait_mask_output_dir, f.replace('jpg', 'png')))
     LOG.logI('Length of image_path_list is {} ...'.format(len(self.image_path_list)))
Пример #21
0
    def forward(self, imgs):
        img, label, _, fn = imgs
        hat_y = np.where(label == 1)[0]
        up_y = np.where(label == 2)[0]
        down_y = np.where(label == 3)[0]
        hat_num = len(hat_y)
        up_num = len(up_y)
        down_num = len(down_y)
        if (hat_num > up_num and up_num != 0) or (hat_num > down_num
                                                  and down_num != 0):
            LOG.logI(
                "Image {} has risk on rule {} with hat_num = {}, up_num = {} and down_num = {}"
                .format(fn, self.name(), hat_num, up_num, down_num))
            self.write(img, label, fn)

        if up_num != 0 and down_num != 0 and np.max(up_y) > np.max(down_y):
            LOG.logI(
                "Image {} has risk on rule {} with up_y = {} and down_y = {}".
                format(fn, self.name(), np.max(up_y), np.max(down_y)))
            self.write(img, label, fn)

        if hat_num != 0 and up_num != 0 and np.max(hat_y) > np.max(up_y):
            LOG.logI(
                "Image {} has risk on rule {} with hat_y = {} and up_y = {}".
                format(fn, self.name(), np.max(hat_y), np.max(up_y)))
            self.write(img, label, fn)
        return imgs
Пример #22
0
    def pickFgColor(self, i, s):
        left = self.font_offset[0]
        up = self.font_offset[1]
        right = self.font_offset[0] + len(s) * self.max_font
        below = self.font_offset[1] + self.max_font
        dominant = Haishoku.getDominant(
            self.pil_img.crop((left, up, right, below)))

        k = i % len(self.fg_color)
        fg_lst = self.fg_color[k:] + self.fg_color[:k]
        max_dis = 0
        for fg in fg_lst:
            distance = abs(dominant[0] -
                           fg[0]) + abs(dominant[1] -
                                        fg[1]) + abs(dominant[2] - fg[2])
            if distance > self.distance:
                return fg
            if distance > max_dis:
                max_dis_fg = fg
                max_dis = distance
        LOG.logI("No fg_color is suitable for image {} !!!".format(i))
        return max_dis_fg
Пример #23
0
    def postIter(self):
        preds = self.config.output[
            self.config.output[..., 4] >
            self.config.conf_thres]  # filter with classifier confidence
        if not preds.size(0):
            LOG.logW("file: {0} >>> non - detect".format(self.config.filepath))
            self.config.non_det_num += 1
            return
        # Compute conf
        preds[:, 5:] *= preds[:, 4:5]  # conf = obj_conf * cls_conf
        # Box (center x, center y, width, height) to (x1, y1, x2, y2)
        preds[:, 0] = preds[:, 0] - preds[:, 2] / 2.0
        preds[:, 1] = preds[:, 1] - preds[:, 3] / 2.0
        preds[:, 2] += preds[:, 0]
        preds[:, 3] += preds[:, 1]
        # Detections matrix nx6 (xyxy, conf, cls)
        conf, idx = preds[:, 5:].max(dim=1, keepdim=True)
        preds = torch.cat(
            (preds[:, :4], conf, idx),
            dim=1)[conf.view(-1) >
                   self.config.conf_thres]  # filter with bbox confidence
        if not preds.size(0):
            LOG.logW("file: {0} >>> non - detect".format(self.config.filepath))
            self.config.non_det_num += 1
            return
        # nms on per class
        max_side = 4096
        class_offset = preds[:, 5:6] * max_side
        boxes, scores = preds[:, :4] + class_offset, preds[:, 4]
        idxs = ops.nms(boxes, scores, self.config.iou_thres)
        preds = torch.stack([preds[i] for i in idxs], dim=0)
        if not preds.size(0):
            LOG.logW("file: {0} >>> non - detect".format(self.config.filepath))
            self.config.non_det_num += 1
            return
        # coords scale
        classes = preds[:, -1].long().tolist()
        scores = [i.item() for i in preds[:, -2]]
        coords = preds[:, :4]
        coords -= self.config.pad
        coords /= self.config.ratio
        coords = coords.long().tolist()

        LOG.logI(
            "file: {0} >>> class: {1} >>> score: {2} >>> coord: {3}".format(
                self.config.filepath, classes, scores, coords))
        self.plotRectangle(self.config.filepath, (classes, scores, coords),
                           self.config.show_output_dir)
Пример #24
0
    def process(self):
        #Here the wa to let us just get 2048d feature before fc
        self.net.fc = nn.Sequential()
        # iterate all images
        for filename in self.dataset():
            #if 4 channel, to 3 channels
            self.sample = Image.open(filename).convert('RGB')
            # if use cv2 to read image
            #self.sample = AugBase.cv2pillow(self.sample)
            self.sample = self.conf.test.transform_op(
                self.sample).unsqueeze(0).to(self.conf.device)
            # forward
            self.output = self.net(self.sample)
            LOG.logI("feature shape: {}".format(self.output.shape))
            self.addEmb2DB(self.output)
            LOG.logI("feature db shape: {}".format(self.xb.shape))

        emb_file = "resnet50_gemfield_test.emb"
        LOG.logI("prepare to save db to {}".format(emb_file))
        self.saveDB(emb_file)
        LOG.logI("db file {} saved successfully.".format(emb_file))
Пример #25
0
 def test(self):
     self.config.non_det_num = 0
     super(Yolov5Test, self).test()
     LOG.logW("\nnon - detect: {} ".format(self.config.non_det_num) +
              ' * ' * 70)
Пример #26
0
    def postEpoch(self):
        if self.config.is_train:
            return

        self.accuracy = self.score_text['Mean Acc']
        LOG.logI('Test accuray: {:.4f}'.format(self.accuracy))
Пример #27
0
def paste(src_img, dst_img, src_mask, portrait_mask=None):
    original_src_bbox = src_mask_to_box(
        cv2.cvtColor(src_mask, cv2.COLOR_BGR2GRAY))
    portrait_bbox = None
    #if use portrait to help
    if portrait_mask is None:
        src_bbox = original_src_bbox
        final_mask = src_mask
    else:
        portrait_bbox = portrait_mask_to_box(
            cv2.cvtColor(portrait_mask, cv2.COLOR_BGR2GRAY))
        if portrait_bbox is None:
            return None, None
        src_bbox = [
            min(original_src_bbox[0], portrait_bbox[0]),
            min(original_src_bbox[1], portrait_bbox[1]),
            max(original_src_bbox[2], portrait_bbox[2]),
            max(original_src_bbox[3], portrait_bbox[3])
        ]
        final_mask = np.where(portrait_mask != 0, portrait_mask, src_mask)

    src_bbox_h, src_bbox_w = src_bbox[3] - src_bbox[1], src_bbox[2] - src_bbox[
        0]
    h, w, _ = dst_img.shape
    dst_mask = np.zeros((h, w, 3))
    x = random.randint(1, w // 2)
    y = random.randint(1, h // 2)
    #random scale the human
    scale = random.random() * 2
    scale = scale if scale > 0.5 else 0.5
    scale_src_bbox_w = int(src_bbox_w * scale)
    scale_src_bbox_h = int(src_bbox_h * scale)
    scale_src_bbox_w = scale_src_bbox_w if x + scale_src_bbox_w < w else (w -
                                                                          1 -
                                                                          x)
    scale_src_bbox_h = scale_src_bbox_h if y + scale_src_bbox_h < h else (h -
                                                                          1 -
                                                                          y)

    if scale_src_bbox_w * scale_src_bbox_h < h * w / 100:
        LOG.logI('Target person is too small for dst image: {} vs {}'.format(
            scale_src_bbox_w * scale_src_bbox_h, h * w))
        return None, None

    src_img_crop = src_img[src_bbox[1]:src_bbox[1] + src_bbox_h,
                           src_bbox[0]:src_bbox[0] + src_bbox_w, :]
    src_mask_crop = src_mask[src_bbox[1]:src_bbox[1] + src_bbox_h,
                             src_bbox[0]:src_bbox[0] + src_bbox_w, :]
    final_mask_crop = final_mask[src_bbox[1]:src_bbox[1] + src_bbox_h,
                                 src_bbox[0]:src_bbox[0] + src_bbox_w, :]
    src_img_crop = cv2.resize(src_img_crop,
                              (scale_src_bbox_w, scale_src_bbox_h),
                              interpolation=cv2.INTER_NEAREST)
    src_mask_crop = cv2.resize(src_mask_crop,
                               (scale_src_bbox_w, scale_src_bbox_h),
                               interpolation=cv2.INTER_NEAREST)
    final_mask_crop = cv2.resize(final_mask_crop,
                                 (scale_src_bbox_w, scale_src_bbox_h),
                                 interpolation=cv2.INTER_NEAREST)

    #paste the src img to dst img
    dst_img_crop = dst_img[y:y + scale_src_bbox_h, x:x + scale_src_bbox_w, :]
    result_img = np.where(final_mask_crop != 0, src_img_crop, dst_img_crop)
    dst_img[y:y + scale_src_bbox_h, x:x + scale_src_bbox_w, :] = result_img
    dst_mask[y:y + scale_src_bbox_h, x:x + scale_src_bbox_w, :] = src_mask_crop
    return dst_img, dst_mask
Пример #28
0
                         os.path.splitext(img_name)[0] + '.png'))[:, :, 0]
        overlayed = getOverlayFromSegMask(img, mask)
        cv2.imwrite(os.path.join(synthesis_output_show_dir, img_name),
                    overlayed)


if __name__ == "__main__":
    from config import config as deepvac_config

    #original image dir
    if len(sys.argv) >= 2:
        deepvac_config.original_image_label_dir = sys.argv[1]

    if len(sys.argv) >= 3:
        deepvac_config.synthesis_output_dir = sys.argv[2]

    #step1, gen portrait mask
    if deepvac_config.use_portrait_mask:
        LOG.logI('STEP1: gen portrait mask start')
        genPortraitMask(deepvac_config)
    else:
        LOG.logI('omit STEP1: gen portrait mask start.')

    #step2, synthesis
    LOG.logI('STEP2: synthesis start.')
    synthesis(deepvac_config)

    # step3, show mask
    LOG.logI('STEP3: show result start.')
    showMask(deepvac_config)
Пример #29
0
import glob

import cv2
from tqdm import tqdm
import torch

from deepvac import LOG
from deepvac.datasets import CocoCVSegDataset

if __name__ == "__main__":
    from config import config
    json_path_glob = sys.argv[1] + "/*.json"

    json_paths = glob.glob(json_path_glob)
    sample_path_prefixs = [os.path.splitext(jp)[0] for jp in json_paths]
    LOG.logI("All json_paths: {} \n All sample_path_prefixs: {}".format(
        json_paths, sample_path_prefixs))

    for sample_path_prefix, json_path in zip(sample_path_prefixs, json_paths):
        if not os.path.exists(sample_path_prefix):
            LOG.logE("Path {} not exists !".format(sample_path_prefix),
                     exit=True)
        config.test_dataset = CocoCVSegDataset(config, sample_path_prefix,
                                               json_path, config.cat2idx)
        config.test_loader = torch.utils.data.DataLoader(config.test_dataset,
                                                         batch_size=1,
                                                         shuffle=False,
                                                         num_workers=0,
                                                         pin_memory=False)

        for idx, (img, label, _,
                  file_path) in tqdm(enumerate(config.test_loader),
Пример #30
0
import torch

from deepvac import LOG
from deepvac.datasets import CocoCVSegDataset, FileLineCvSegAuditDataset

if __name__ == "__main__":
    from config import config
    if len(sys.argv) >= 2:
        config.file_path = sys.argv[1]

    if len(sys.argv) >= 3:
        config.sample_path_prefix = sys.argv[2]

    config.test_dataset = FileLineCvSegAuditDataset(
        config,
        fileline_path=config.file_path,
        delimiter=config.delimiter,
        sample_path_prefix=config.sample_path_prefix)
    config.test_loader = torch.utils.data.DataLoader(config.test_dataset,
                                                     batch_size=1,
                                                     shuffle=False,
                                                     num_workers=0,
                                                     pin_memory=False)

    for idx, (img, label, _,
              file_path) in tqdm(enumerate(config.test_loader),
                                 total=config.test_loader.__len__()):
        pass

    LOG.logI("fileline file {} analyze done!".format(config.file_path))