示例#1
0
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes, mosaic=False):
    '''data generator for fit_generator'''
    n = len(annotation_lines)
    i = 0
    flag = True
    while True:
        image_data = []
        box_data = []
        for b in range(batch_size):
            if i==0:
                np.random.shuffle(annotation_lines)
            if mosaic:
                if flag and (i+4) < n:
                    image, box = get_random_data_with_Mosaic(annotation_lines[i:i+4], input_shape)
                    i = (i+4) % n
                else:
                    image, box = get_random_data(annotation_lines[i], input_shape)
                    i = (i+1) % n
                flag = bool(1-flag)
            else:
                image, box = get_random_data(annotation_lines[i], input_shape)
                i = (i+1) % n
            image_data.append(image)
            box_data.append(box)
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
        yield [image_data, *y_true], np.zeros(batch_size)
示例#2
0
def generate_arrays_from_file(lines, batch_size, input_shape, num_classes, random=True):
    n = len(lines)
    i = 0
    while 1:
        X_train = []
        Y_train = []
        for _ in range(batch_size):
            if i==0:
                np.random.shuffle(lines)
            annotation_path = lines[i].split(';')[1].split()[0]
            img = Image.open(annotation_path)
            
            if random:
                img = get_random_data(img, [input_shape[0],input_shape[1]])
            else:
                img = letterbox_image(img, [input_shape[0],input_shape[1]])

            img = np.array(img).astype(np.float32)
            img = _preprocess_input(img)

            X_train.append(img)
            Y_train.append(int(lines[i].split(';')[0]))
            i = (i+1) % n

        X_train = np.array(X_train)
        X_train = X_train.reshape([-1,input_shape[0],input_shape[1],input_shape[2]])
        Y_train = np_utils.to_categorical(np.array(Y_train),num_classes=num_classes)   
        yield (X_train, Y_train)
示例#3
0
def generate_arrays_from_file(lines, batch_size, train):
    # 获取总长度
    n = len(lines)
    i = 0
    while 1:
        X_train = []
        Y_train = []
        # 获取一个batch_size大小的数据
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(lines)
            name = lines[i].split(';')[0]
            # 从文件中读取图像
            img = Image.open(r".\data\image\train" + '/' + name)
            if train == True:
                img = np.array(get_random_data(img, [HEIGHT, WIDTH]),
                               dtype=np.float64)
            else:
                img = np.array(letterbox_image(img, [HEIGHT, WIDTH]),
                               dtype=np.float64)
            X_train.append(img)
            Y_train.append(lines[i].split(';')[1])
            # 读完一个周期后重新开始
            i = (i + 1) % n
        # 处理图像
        X_train = preprocess_input(
            np.array(X_train).reshape(-1, HEIGHT, WIDTH, 3))

        Y_train = np_utils.to_categorical(np.array(Y_train),
                                          num_classes=NUM_CLASSES)
        yield (X_train, Y_train)
示例#4
0
def data_generator(annotation_lines,
                   batch_size,
                   input_shape,
                   anchors,
                   num_classes,
                   random=True):
    n = len(annotation_lines)
    i = 0
    while True:
        image_data = []
        box_data = []
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(annotation_lines)
            #---------------------------------------------------#
            #   训练时进行数据的随机增强
            #   验证时不进行数据的随机增强
            #---------------------------------------------------#
            image, box = get_random_data(annotation_lines[i],
                                         input_shape,
                                         random=random)
            image_data.append(image)
            box_data.append(box)
            i = (i + 1) % n
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        y_true = preprocess_true_boxes(box_data, input_shape, anchors,
                                       num_classes)
        yield [image_data, *y_true], np.zeros(batch_size)
示例#5
0
def data_generator_rand(annotation_lines, batch_size, input_shape, anchors,
                        num_classes):
    '''data generator for fit_generator'''
    n = len(annotation_lines)
    i = 0
    while True:
        image_data = []
        box_data = []
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(annotation_lines)
            image, box = get_random_data(annotation_lines[i],
                                         input_shape,
                                         random=True)
            image_data.append(image)
            box_data.append(box)
            i = (i + 1) % n
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        y_true = preprocess_true_boxes(box_data, input_shape, anchors,
                                       num_classes)
        l_true = [
            np.zeros(shape=(batch_size, 416 // {
                0: 32,
                1: 16,
                2: 8
            }[l], 416 // {
                0: 32,
                1: 16,
                2: 8
            }[l], 9 // 3, 20 + 5)) for l in range(3)
        ]

        yield [image_data, *y_true, *l_true], np.zeros(batch_size)
示例#6
0
def data_generator(annotation_lines, batch_size, input_shape, anchors,
                   num_classes):
    '''data generator for fit_generator'''
    n = len(annotation_lines)
    i = 0
    while True:
        # with Pool(batch_size) as p:
        #     res = np.array(p.map(get_random_data, annotation_lines[i:i+batch_size]))
        #     image_data = np.array([r for r in res[:,0]])
        #     box_data = np.array([r for r in res[:,1]])
        # p.close()
        # p.join()
        #         res = np.array([get_random_data(l) for l in lines[:8]])
        #         box_data = np.array([r for r in res[:,1]])
        #         image_data = np.array([r for r in res[:,0]])
        #print(x.shape, y.shape)
        image_data = []
        box_data = []
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(annotation_lines)
            image, box = get_random_data(annotation_lines[i],
                                         input_shape,
                                         random=True)
            image_data.append(image)
            box_data.append(box)
            i = (i + 1) % n
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        i = (i + batch_size) % n
        y_true = preprocess_true_boxes(box_data, input_shape, anchors,
                                       num_classes)
        yield [image_data, *y_true], np.zeros(batch_size)
示例#7
0
def generate_arrays_from_file(lines, batch_size, train):
    # 获取总长度
    n = len(lines)
    i = 0
    while 1:
        X_train = []  #300x300x3
        Y_train = []  #label
        # 获取一个batch_size大小的数据
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(lines)
            name = lines[i].split(';')[0]
            label_name = (lines[i].split(';')[1]).strip('\n')
            file_name = str(Name.get(int(label_name)))
            # 从文件中读取图像
            img = Image.open(r".\data\face" + "\\" + file_name + "\\" + name)
            if train == True:
                img = np.array(get_random_data(img, [image_h, image_w]),
                               dtype=np.float64)
            else:
                img = np.array(letterbox_image(img, [image_h, image_w]),
                               dtype=np.float64)
            X_train.append(img)
            Y_train.append(label_name)
            # 读完一个周期后重新开始
            i = (i + 1) % n
        # 处理图像
        X_train = preprocess_input(
            np.array(X_train).reshape(-1, image_h, image_w, 3))
        #one-hot
        Y_train = np_utils.to_categorical(np.array(Y_train),
                                          num_classes=num_class)
        yield (X_train, Y_train)
示例#8
0
def data_generator(annotation_lines, batch_size, input_shape, anchors, num_classes):
    '''data generator for fit_generator
    anchors = {ndarray: (9, 2)} 已经设置好的尺寸
    annotation_lines = {list: 3} xml文件的个数,每一行有真实框的坐标和对应物体的类别
    batch_size = {int} 1
    input_shape = {tuple: 2} (416, 416)
    num_classes = {int} 20
    把图片和真实框按批转化为数组,根据真实框和锚框得到标签y_true
    '''
    n = len(annotation_lines)  # n = 3
    i = 0                      # 计数器
    while True:
        # 1 每batch_size的图片和真实框标签
        image_data = [] # 储存图片的列表 [416,416,3]
        box_data = []   # 储存框的列表[1,num_gt,5]
        for b in range(batch_size):
            if i==0:
                np.random.shuffle(annotation_lines)   #
            image, box = get_random_data(annotation_lines[i], input_shape, random=True)  # box.shape = (20,5)
            image_data.append(image)      # imge.shape =【416,416,3】
            box_data.append(box)
            i = (i+1) % n
        # 2 图片和真实框的数据转化为数值
        image_data = np.array(image_data) # image_data.shape=[1,416,416,3] 把列表转化为数组,维度会扩展一维
        box_data = np.array(box_data)  # box.shape=[20,5] (box_data).shape= [1,20,5]
        # 3 根据真实框和锚框的宽高获得标签y_true
        y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes) # box_data(1,20,5),input_shape(416,416), anchors(9,2),num_classes=20
        yield [image_data, *y_true], np.zeros(batch_size) # y_true.list=3 [m,h,w,k,25]
示例#9
0
def data_generator(annotation_lines, batch_size, input_shape, anchors,
                   num_classes):  #fit_generator,用于训练时向模型输入数据
    '''data generator for fit_generator'''
    n = len(annotation_lines)  #读取样本数
    i = 0
    while True:
        image_data = []
        box_data = []
        for b in range(batch_size):  #取batch_size次数据
            if i == 0:  #i==0时意味着所有样本已经都被使用过了,需要重新开始新一轮读取
                np.random.shuffle(annotation_lines)  #将样本顺序打混
            image, box = get_random_data(
                annotation_lines[i], input_shape,
                random=True)  #读取数据并进行数据增强,image-->(416,416,3),box-->(20,5)
            image_data.append(image)  #向image_data内添加处理过的image
            box_data.append(box)  #向box_data内添加处理过的box
            i = (i + 1) % n  #判定的核心程序,当i+1==n时,即所有样本都读取过了,(i+1) % n则=0
        image_data = np.array(image_data)  #将image_data转化为numpy格式
        box_data = np.array(box_data)  #将box_data转化为numpy格式
        y_true = preprocess_true_boxes(
            box_data, input_shape, anchors,
            num_classes)  #通过box_data和anchors计算y_true,#将真实坐标转化为yolo需要输入的坐标
        yield [image_data, *y_true], np.zeros(
            batch_size
        )  #[image_data, *y_true]为真正的输入项,np.zeros(batch_size)为占位置的向量,不参与计算
示例#10
0
def data_generator(annotation_lines, batch_size, input_shape, anchors,
                   num_classes, teacher):
    '''data generator for fit_generator'''
    n = len(annotation_lines)
    i = 0
    while True:
        #print("data"+str(i) )
        image_data = []
        box_data = []
        for b in range(batch_size):
            #if i==0:
            #    np.random.shuffle(annotation_lines)
            image, box = get_random_data(annotation_lines[i],
                                         input_shape,
                                         random=False)
            image_data.append(image)
            box_data.append(box)
            i = (i + 1) % n
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        y_true = preprocess_true_boxes(box_data, input_shape, anchors,
                                       num_classes)
        m_true = teacher.predict(image_data)
        #l_true =  [ np.zeros( shape=( batch_size ,416//{0:32, 1:16, 2:8}[l], 416//{0:32, 1:16, 2:8}[l], 9//3, 20+5) ) for l in range(3) ]

        if len(m_true) == 3:
            anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]
        elif len(m_true) == 2:
            anchor_mask = [[3, 4, 5], [0, 1, 2]]
        else:
            anchor_mask = [[0, 1, 2]]

        for l in range(len(m_true)):

            pred_xy, pred_wh, pred_conf, pred_class = numpy_yolo_head(
                m_true[l], anchors[anchor_mask[l]], input_shape)
            pred_detect = np.concatenate(
                [pred_xy, pred_wh, pred_conf, pred_class], axis=-1)

            #print("inside")
            box = np.where(y_true[l][..., 4] > 0.5)
            box = np.transpose(box)
            for k in range(len(box)):
                m_true[l][tuple(box[k])] = pred_detect[tuple(box[k])]

        yield image_data, y_true, m_true
示例#11
0
def data_generator(annotation_lines,
                   batch_size,
                   input_shape,
                   anchors,
                   num_classes):
    """data generator for fit_generator
    the assignment strategy:
        one gt ---> one anchor
        1.find which anchor(9 anchors) gt belongs to
        2.find which grid gt belongs to

    Args:
        annotation_lines: a list [anno1, anno2, ...]
        batch_size:       batch size
        input_shape:      resolution [h, w]
        anchors:          anchor boxes
        num_classes:      the number of class
        max_boxes:        box_data: [max_boxes, 5]
                          when have a lot of gt to predict, need to  set max_boxes bigger.

    Returns:
        batch data:       [image_data, *y_true], np.zeros(batch_size)

    """
    n = len(annotation_lines)
    i = 0
    while True:
        image_data = []
        box_data = []
        for b in range(batch_size):
            if i == 0:
                # shuffle dataset at begin of epoch
                np.random.shuffle(annotation_lines)
            image, box = get_random_data(annotation_lines[i], input_shape)
            image_data.append(image)
            box_data.append(box)
            i = (i + 1) % n
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        # get true_boxes
        # y_true = preprocess_true_boxes(box_data, input_shape, anchors, num_classes)
        y_true = preprocess_true_boxes_iou_thres(box_data, input_shape, anchors, num_classes,
                                                 iou_threshold=CONFIG.TRAIN.IOU_THRESHOLD)
        # use yield to get generator
        yield [image_data, *y_true], np.zeros(batch_size)
示例#12
0
def data_generator(annotation_lines, batch_size, input_shape, anchors,
                   num_classes):
    '''data generator for fit_generator

    Parameters
    ----------
        annotation_lines: list of str, shape=(m, T, 5)
        Absolute x_min, y_min, x_max, y_max, class_id relative to input_shape.
        batch_size: integer, 
        input_shape: tuple, array-like type,  hw, multiples of 32
        anchors: ndarray, shape=(N, 2), N行2列(wh),N表示先验框个数, the default N is 9
        num_classes: integer,
    Returns
    -------
        [image_data, *y_true]: 矩阵,数组拼接,
            image_data: array, shape like image(445, 445)
            *y_true: list of array, shape like yolo_outputs, xywh are reletive value
        np.zeros(batch_size): array, 
    '''
    n = len(annotation_lines)
    i = 0
    while True:
        image_data = []
        box_data = []
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(annotation_lines)
            image, box = get_random_data(annotation_lines[i],
                                         input_shape,
                                         random=True)
            image_data.append(image)
            box_data.append(box)
            i = (i + 1) % n
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        y_true = preprocess_true_boxes(box_data, input_shape, anchors,
                                       num_classes)
        # 迭代生成器,迭代一次返回yield后面的值
        yield [image_data, *y_true], np.zeros(batch_size)
示例#13
0
 def get_batch(self, datas):
     image_data = []
     box_data = []
     word_data = []
     seg_data = []
     for data in datas:
         image, box, word_vec, seg_map = get_random_data(
             data,
             self.input_shape,
             self.embed,
             self.config,
             train_mode=self.train_mode)
         word_data.append(word_vec)
         image_data.append(image)
         box_data.append(box)
         seg_data.append(seg_map)
     image_data = np.array(image_data)
     box_data = np.array(box_data)
     word_data = np.array(word_data)
     seg_data = np.array(seg_data)
     det_data = preprocess_true_boxes(box_data, self.input_shape,
                                      self.anchors)
     return image_data, word_data, det_data, seg_data
示例#14
0
def data_generator(annotation_lines, batch_size, input_shape, num_classes,
                   maxDis):
    '''data generator for fit_generator'''
    n = len(annotation_lines)
    i = 0
    while True:
        image_data = []
        box_data = []
        for b in range(batch_size):
            # 每次开头打乱数据
            if i == 0:
                np.random.shuffle(annotation_lines)
            # 获取一副图片及其中的box
            image, box = get_random_data(annotation_lines[i],
                                         input_shape,
                                         max_dis=maxDis,
                                         max_boxes=5)
            image_data.append(image)
            box_data.append(box)
            i = (i + 1) % n
        image_data = np.array(image_data)
        box_data = np.array(box_data)
        y_true = prepare_ytrue(box_data, input_shape, num_classes)
        yield [image_data, y_true], np.zeros(batch_size)
示例#15
0
    def evaluate(self, tag='image', is_save_images=False):
        self.boxes, self.scores, self.eval_inputs = yolo_eval_v2(
            self.model.output_shape[0],
            self.anchors,
            self.input_image_shape,
            score_threshold=0.,
            iou_threshold=0.)
        # Add the class predict temp dict
        # pred_tmp = []
        groud_truth = []  # wait
        seg_prec_all = dict()
        id = 0
        seg_iou_all = 0.
        detect_prec_all = 0.
        fd_ts_count = 0.
        td_fs_count = 0.
        fd_fs_count = 0.
        # Predict!!!
        test_batch_size = self.batch_size
        for start in progressbar.progressbar(range(0, len(self.val_data),
                                                   test_batch_size),
                                             prefix='evaluation: '):
            end = start + test_batch_size
            batch_data = self.val_data[start:end]
            images = []
            images_org = []
            files_id = []
            word_vecs = []
            sentences = []
            gt_boxes = []
            gt_segs = []

            for data in batch_data:
                image_data, box, word_vec, image, sentence, seg_map = get_random_data(
                    data,
                    self.input_shape,
                    self.word_embed,
                    self.config,
                    train_mode=False)  # box is [1,5]
                sentences.extend(sentence)
                word_vecs.extend(word_vec)
                # evaluate each sentence corresponding to the same image
                for ___ in range(len(sentence)):
                    # groud_truth.append(box[0, 0:4])
                    gt_boxes.append(box[0, 0:4])
                    images.append(image_data)
                    images_org.append(image)
                    files_id.append(id)
                    gt_segs.append(seg_map)
                    id += 1

            images = np.array(images)
            word_vecs = np.array(word_vecs)
            out_bboxes_1, pred_segs, _ = self.model.predict_on_batch(
                [images, word_vecs])
            pred_segs = self.sigmoid_(pred_segs)  # logit to sigmoid
            for i, out in enumerate(out_bboxes_1):
                # Predict
                out_boxes, out_scores = self.sess.run(  # out_boxes is [1,4]  out_scores is [1,1]
                    [self.boxes, self.scores],
                    feed_dict={
                        # self.eval_inputs: out
                        self.eval_inputs[0]: np.expand_dims(out, 0),
                        self.input_image_shape: np.array(self.input_shape),
                        K.learning_phase(): 0
                    })

                ih = gt_segs[i].shape[0]
                iw = gt_segs[i].shape[1]
                w, h = self.input_shape
                scale = min(w / iw, h / ih)
                nw = int(iw * scale)
                nh = int(ih * scale)
                dx = (w - nw) // 2
                dy = (h - nh) // 2

                # up sample
                pred_seg = cv2.resize(pred_segs[i], self.input_shape)
                #nls
                if self.use_nls:
                    pred_seg = self.nls(
                        pred_seg,
                        self.box_value_fix(out_boxes[0], self.input_shape),
                        out_scores[0])
                #scale to the size of ground-truth
                pred_seg = pred_seg[dy:nh + dy, dx:nw + dx, ...]
                pred_seg = cv2.resize(
                    pred_seg, (gt_segs[i].shape[1], gt_segs[i].shape[0]))
                pred_seg = np.reshape(
                    pred_seg, [pred_seg.shape[0], pred_seg.shape[1], 1])
                # segmentation eval
                seg_iou, seg_prec = self.cal_seg_iou(gt_segs[i], pred_seg,
                                                     self.seg_min_overlap)
                seg_iou_all += seg_iou
                for item in seg_prec:
                    if seg_prec_all.get(item):
                        seg_prec_all[item] += seg_prec[item]
                    else:
                        seg_prec_all[item] = seg_prec[item]
                # detection eval
                pred_box = self.box_value_fix(out_boxes[0], self.input_shape)
                score = out_scores[0]
                detect_prec = self.cal_detect_iou(pred_box, gt_boxes[i],
                                                  self.det_acc_thresh)
                detect_prec_all += detect_prec

                # caulate IE metric
                if detect_prec - seg_prec[0.5] != 0.:
                    if detect_prec > seg_prec[0.5]:
                        td_fs_count += 1.
                    else:
                        fd_ts_count += 1.
                elif detect_prec + seg_prec[0.5] == 0.:
                    fd_fs_count += 1.

                #visualization
                if is_save_images and (files_id[i]
                                       in self.eval_save_images_id):
                    left, top, right, bottom = pred_box
                    # Draw image
                    gt_left, gt_top, gt_right, gt_bottom = (
                        gt_boxes[i]).astype('int32')
                    image = np.array(images[i] * 255.).astype(np.uint8)
                    # segement image for saving
                    seg_image = np.array(
                        cv2.resize(
                            np.array(
                                pred_segs[i] > self.seg_min_overlap).astype(
                                    np.float32), self.input_shape)).astype(
                                        np.uint8) * 255
                    label = '{:%.2f}' % score
                    color = self.colors[0]
                    cv2.rectangle(image, (left, top), (right, bottom), color,
                                  2)
                    cv2.rectangle(image, (gt_left, gt_top),
                                  (gt_right, gt_bottom), self.colors[1], 2)

                    font_size = 0.8

                    cv2.putText(image, label, (left, max(top - 3, 0)),
                                cv2.FONT_HERSHEY_SIMPLEX, font_size, color, 2)
                    cv2.putText(image, 'ground_truth',
                                (gt_left, max(gt_top - 3, 0)),
                                cv2.FONT_HERSHEY_SIMPLEX, font_size,
                                self.colors[1], 2)
                    cv2.putText(image, str(sentences[i]), (20, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, .9, self.colors[2],
                                2)
                    cv2.imwrite('./images/' + str(files_id[i]) + '.jpg', image)
                    log_images(self.tensorboard, tag + '/' + str(files_id[i]),
                               [image], 0)
                    log_images(self.tensorboard,
                               tag + '/' + str(files_id[i]) + '_seg',
                               [seg_image], 0)

        miou_seg = seg_iou_all / id
        miou_detect = detect_prec_all / id
        ie_score = (td_fs_count + fd_ts_count) / id
        for item in seg_prec_all:
            seg_prec_all[item] /= id
        return miou_detect, miou_seg, seg_prec_all, ie_score
示例#16
0
    def evaluate(self, tag='image', is_save_images=False):
        self.boxes, self.scores, self.eval_inputs = yolo_eval_v2(self.model.output_shape[0],self.anchors, self.input_image_shape,
                                                                               score_threshold=0., iou_threshold=0.)
        # Add the class predict temp dict
        # pred_tmp = []
        groud_truth = []  # wait
        seg_prec_all = dict()
        id =0
        seg_iou_all =0.
        detect_prec_all = 0.
        fd_ts_count=0.
        td_fs_count=0.
        fd_fs_count=0.
        # Predict!!!
        test_batch_size =self.batch_size
        for start in progressbar.progressbar(range(0, len(self.val_data), test_batch_size), prefix='evaluation: '):
            end = start +test_batch_size
            batch_data = self.val_data[start:end]
            images = []
            images_org = []
            files_id = []
            word_vecs = []
            sentences = []
            gt_boxes = []
            gt_segs = []

            for data in batch_data:
                image_data, box, word_vec, image, sentence, seg_map = get_random_data(data, self.input_shape,
                                                                                      self.word_embed, self.config,
                                                                                      train_mode=False)  # box is [1,5]
                sentences.extend(sentence)
                word_vecs.extend(word_vec)
                # evaluate each sentence corresponding to the same image
                for ___ in range(len(sentence)):
                    # groud_truth.append(box[0, 0:4])
                    gt_boxes.append(box[0, 0:4])
                    images.append(image_data)
                    images_org.append(image)
                    files_id.append(id)
                    gt_segs.append(seg_map)
                    id += 1

            images = np.array(images)
            word_vecs = np.array(word_vecs)
            out_bboxes_1,_ = self.model.predict_on_batch([images, word_vecs])
            for i, out in enumerate(out_bboxes_1):
                # Predict
                out_boxes, out_scores = self.sess.run(  # out_boxes is [1,4]  out_scores is [1,1]
                    [self.boxes, self.scores],
                    feed_dict={
                        # self.eval_inputs: out
                        self.eval_inputs[0]: np.expand_dims(out, 0),
                        self.input_image_shape: np.array(self.input_shape),
                        K.learning_phase(): 0
                    })

                ih = gt_segs[i].shape[0]
                iw = gt_segs[i].shape[1]
                w, h = self.input_shape
                scale = min(w / iw, h / ih)
                nw = int(iw * scale)
                nh = int(ih * scale)
                dx = (w - nw) // 2
                dy = (h - nh) // 2

                # detection eval
                pred_box = self.box_value_fix(out_boxes[0],self.input_shape)
                score = out_scores[0]
                detect_prec = self.cal_detect_iou(pred_box, gt_boxes[i], self.det_acc_thresh)
                detect_prec_all += detect_prec

                #visualization
                if is_save_images and (files_id[i] in self.eval_save_images_id):
                    left, top, right, bottom = pred_box
                    # Draw image
                    gt_left, gt_top, gt_right, gt_bottom = (gt_boxes[i]).astype('int32')
                    image = np.array(images[i] * 255.).astype(np.uint8)

                    label = '{:%.2f}' % score
                    color = self.colors[0]
                    cv2.rectangle(image, (left, top), (right, bottom), color, 2)
                    cv2.rectangle(image, (gt_left, gt_top), (gt_right, gt_bottom), self.colors[1], 2)

                    font_size = 0.8

                    cv2.putText(image,
                                label,
                                (left, max(top - 3, 0)),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                font_size, color, 2)
                    cv2.putText(image,
                                'ground_truth',
                                (gt_left, max(gt_top - 3, 0)),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                font_size, self.colors[1], 2)
                    cv2.putText(image,
                                str(sentences[i]),
                                (20, 20),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                .9, self.colors[2], 2)
                    cv2.imwrite('./images/'+str(files_id[i])+'.jpg',image)
                    log_images(self.tensorboard, tag + '/' + str(files_id[i]), [image], 0)


        miou_detect = detect_prec_all / id

        return miou_detect
示例#17
0
from utils.utils import get_random_data, prepare_ytrue
import numpy as np
import matplotlib.pyplot as plt

input_shape = (416, 416)
anno1 = "./VOCdevkit/GBG2021/JPEGImages/can(21).jpg 169,129,272,3.13,7 170,263,255,3.12,7"
anno2 = "./VOC/JPEGImages/grasp_garbage00003.jpg 253,344,153,3.021,1 455,466,152,0.02,1"
# get_random_data(anno, input_shape)

box_data = []
box_data.append(get_random_data(anno1, (416, 416), max_boxes=3)[1])
#box_data.append(get_random_data(anno2, (416, 416), max_boxes=3)[1])
box_data = np.array(box_data)
print("box_data.shape : ", box_data.shape)
yt = prepare_ytrue(box_data, input_shape, num_classes=12)

print("yt.shape : ", yt.shape)
for i in range(6):  # yt.shape[-1]
    mater = yt[0, :, :, i]
    plt.matshow(mater, cmap=plt.cm.Blues)
    plt.show()
'''for i in range(yt.shape[-1]):
    mater = yt[1, :, :, i]
    plt.matshow(mater, cmap=plt.cm.Reds)
    plt.show()'''
示例#18
0
generated_path = '/home/qkh/hdd1/data/gang_jin/'  # set the path for saving generated h5 file
input_shape = (544, 544)
aug_scale = 64

class_names = get_classes(classes_path)
num_classes = len(class_names)
anchors = get_anchors(anchors_path)
all_imgs, _, _ = get_pascal_detection_data(input_path=dataset_path)

dataAug = DataAugmentForObjectDetection()
image_data = []
box_data = []
for i in tqdm(range(len(all_imgs))):
    # 获取原始图像
    origin_img, origin_box = get_random_data(all_imgs[i],
                                             input_shape,
                                             random=False)
    origin_img = exposure.rescale_intensity(origin_img, out_range=(0, 255))
    origin_img = np.uint8(origin_img)
    image_data.append(origin_img)
    box_data.append(origin_box)

    # 获取增强数据
    # data augmentation
    for b in range(aug_scale - 1):
        auged_img, auged_box = dataAug.dataAugment(origin_img,
                                                   origin_box[:, :4])
        auged_img = np.uint8(auged_img)
        tmp_box = np.zeros(origin_box.shape)
        tmp_box[:, :4] = auged_box[:, :4]
        tmp_box[:, 4] = origin_box[:, 4]