def Supervision(img_data,out_13,out_26,out_52,turn):
    out_13, out_26, out_52 =out_13.cpu().data, out_26.cpu().data, out_52.cpu().data
    archor = tools.Archor()
    box_13 = Select_Data(out_13, 32, archor[13])
    box_26 = Select_Data(out_26, 16, archor[26])
    box_52 = Select_Data(out_52, 8, archor[52])
    boxes = torch.cat([box_13, box_26, box_52], dim=0)
    boxes = boxes.numpy()
    boxes = tools.NMS(boxes, 0.3)
    #print(save_boxes)

    dict_reverse = {0: '狗', 1: '人', 2: '羊驼', 3: '汽车', 4: '自行车', 5: '海豚', 6: '松鼠', 7: '马', 8: '马'}
    img_data=np.array((img_data+0.5)*255,dtype=np.uint8)
    img_data=np.transpose(img_data[0],(1,2,0))

    img_back=Image.fromarray(img_data,"RGB")
    img_draw=ImageDraw.Draw(img_back)
    Font=ImageFont.truetype((r"F:\jkl\Yolo\simkai.ttf"),20)

    #if turn>300:
    plt.ion()
    plt.clf()
    for box in boxes:
        img_draw.point((float(box[1]), float(box[2])), fill="red")
        img_draw.rectangle((int(box[1] - box[3] / 2), int(box[2] - box[4] / 2), int(box[1] + box[3] / 2),
                            int(box[2] + box[4] / 2)), outline="red", width=2)
        img_draw.text((int(box[1] - box[3] / 2), int(box[2] - box[4] / 2)),
                      text=dict_reverse[int(box[5])] + "{:.3f}".format(box[0]),
                      fill=(255, 0,0), font=Font)
    plt.imshow(img_back)
    img_back.save(r"F:\Reply\YoloV3\Vedio_Ready\{}.jpg".format(turn))
    plt.pause(0.1)
    plt.ioff()
示例#2
0
def Select_Data(out_result,size,archor):
    archor=torch.Tensor(archor)
    out_result = out_result.permute(0, 2, 3, 1)
    out_result = torch.reshape(out_result, shape=(out_result.size(0), out_result.size(1), out_result.size(2), 3, -1))
    mask = out_result[..., 0] > 0.9
    idxes = mask.nonzero()
    if idxes.shape[0] == 0:
        return torch.Tensor([])
    out_have = out_result[mask]
    # print(out_have.shape,out_have)

    x_index=idxes[:,2]
    y_index=idxes[:,1]
    archor_idx=idxes[:,3]

    conf = out_have[:, 0]
    cx=(x_index.float()+out_have[:,1])*size
    cy=(y_index.float()+out_have[:,2])*size
    # print(archor_idx.shape,archor_idx)
    w=archor[archor_idx,0]*torch.exp(out_have[:,3])
    h=archor[archor_idx,1]*torch.exp(out_have[:,4])
    classify_num = torch.argmax(out_have[:,5:],dim=1)
    # print(classify_num)
    # print([conf.float(),cx.float(),cy.float(),w.float(),h.float(),classify_num.float()])
    box_center=torch.stack([conf.float(),cx.float(),cy.float(),w.float(),h.float(),classify_num.float(),archor_idx.float()],dim=1)
    boxes = box_center.numpy()
    save_boxes = tools.NMS(boxes, 0.3)
    return save_boxes
示例#3
0
    def __getitem__(self, index):
        feature_data = {}
        line = self.datasets[0].split()
        img_name = line[0]
        line_data = []
        img_open = Image.open(os.path.join(self.img_path, img_name))
        img_data = tools.Trans_img(img_open)

        for i in line[1:]:
            line_data.append(float(i))
        line_data = np.array(line_data)
        boxes = np.split(line_data, len(line_data) // 5)

        iou_dic = {}
        for feature_size, W_H in tools.Archor().items():
            feature_data[feature_size] = np.zeros(shape=(feature_size,
                                                         feature_size, 3,
                                                         5 + self.all_num))
            # iou_dic[feature_size]=np.zeros(shape=(9,4))
            # print(W_H)
            for box in boxes:
                cx, cy = float(box[1]), float(box[2])
                cx_off, cx_index = math.modf(cx * feature_size / 416)
                cy_off, cy_index = math.modf(cy * feature_size / 416)
                w, h = int(box[3]), int(box[4])
                for i, archor_area in enumerate(tools.Archor_Area().items()):
                    iou = tools.IOU_forlabel(box, W_H[i])
                    # print(iou)
                    t_w = w / W_H[i][0]
                    t_h = h / W_H[i][1]
                    one_hot = tools.One_Hot(int(self.all_num), int(box[0]))
                    iou_dic[iou] = [
                        iou, feature_size,
                        int(cy_index),
                        int(cx_index), i, box[0]
                    ]
                    #print(np.array([iou,cx_off, cy_off, np.log(t_w), np.log(t_h),*one_hot]))
                    feature_data[feature_size][int(cy_index),
                                               int(cx_index), i] = np.array([
                                                   iou, cx_off, cy_off,
                                                   np.log(t_w),
                                                   np.log(t_h), *one_hot
                                               ])
        # print(iou_dic)
        feature_data = tools.IOU_Deal(iou_dic, boxes, feature_data)

        return img_data, torch.Tensor(feature_data[13]), torch.Tensor(
            feature_data[26]), torch.Tensor(feature_data[52])
示例#4
0
        net = torch.load(net_path)

    turn = 0
    while True:
        turn += 1
        for i, (img_data, feature_13, feature_26,
                feature_52) in enumerate(train_data):
            img_data_cuda = img_data.to(device)
            feature_13, feature_26, feature_52 = \
                feature_13.to(device), feature_26.to(device), feature_52.to(device)
            out_13, out_26, out_52 = net(img_data_cuda)

            Supervision(img_data, out_13, out_26, out_52)
            # print(out_13.shape,out_13)
            # out_13, out_26, out_52=out_13.cpu().data, out_26.cpu().data, out_52.cpu().data
            archor = tools.Archor()
            loss_13 = Loss_design(out_13, feature_13, 0.9)
            loss_26 = Loss_design(out_26, feature_26, 0.9)
            loss_52 = Loss_design(out_52, feature_52, 0.9)

            Loss = loss_13 + loss_26 + loss_52

            optimizer.zero_grad()
            Loss.backward()
            optimizer.step()

            print("{0}轮  --  Loss:{1}".format(turn, Loss.cpu().float()))

        if turn % 20 == 0:
            torch.save(net, net_path)
            print("Save Successfully!")
示例#5
0
    boxes = box_center.numpy()
    save_boxes = tools.NMS(boxes, 0.3)
    return save_boxes

if __name__ == '__main__':
    dict_reverse = {0: '狗', 1: '人', 2: '羊驼', 3: '汽车', 4: '自行车', 5: '海豚', 6: '松鼠', 7: '马', 8: '猫'}
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    net = torch.load(r"F:\jkl\Yolo_V3\Net_Save\Yolo_net_wen.pth")
    net = net.to(device)
    font=ImageFont.truetype((r"F:\jkl\Yolo\simkai.ttf"),25)

    img_path = r"F:\Datasets_Dispose\Yolo_Datasets\Wen\1.jpg"
    # for img_name in os.listdir(img_path):
    img_open = Image.open(img_path)
    img_draw = ImageDraw.Draw(img_open)
    img_data = tools.Trans_img(img_open)
    img_data = torch.unsqueeze(img_data, dim=0)
    img_data = img_data.to(device)

    out_13, out_26, out_52 = net(img_data)
    out_13, out_26, out_52 = out_13.cpu().data, out_26.cpu().data, out_52.cpu().data
    # print(out_13.shape,out_13)
    archor = tools.Archor()
    box_13 = Select_Data(out_13, 32, archor[13])
    box_26 = Select_Data(out_26, 16, archor[26])
    box_52 = Select_Data(out_52, 8, archor[52])
    boxes = torch.cat([box_13, box_26, box_52], dim=0)
    boxes = boxes.numpy()
    save_boxes = tools.NMS(boxes, 0.3, True)
    print(save_boxes)
    plt.ion()