Exemple #1
0
def main():
    args = parser.parse_args()
    use_cuda = torch.cuda.is_available() and not args.no_cuda

    model = DarkNet(use_cuda)
    model_source = torch.load(args.weights)
    model.load_state_dict(model_source['model'])
    model.eval()
    if use_cuda:
        model = model.cuda()

    ip = IMGProcess(model_source,
                    use_cuda=use_cuda,
                    img_path=args.images,
                    img_size=args.img_size,
                    confidence=args.confidence,
                    result=args.result)

    print("-" * 57 + "Result" + "-" * 57)
    for batch in ip:
        outputs = ip.predict(model(batch), nms_conf=args.nms_thresh)
        for name, objs in outputs:
            print("Image - {}".format(name))
            print("Detect Objects - [{}]".format(", ".join(objs)))
            print("-" * 120)
Exemple #2
0
batch_size = int(params.bs)
conf = float(params.conf)
gpu = torch.cuda.is_available()

model = DarkNet(params.cfg)  #setup
model.load_weights(params.weights)
print("network loaded")
if gpu:
    model.cuda()

model.network_info['height'] = params.reso
input_dim = int(model.network_info['height'])
if ((input_dim % 32 != 0) or (input_dim <= 32)):
    raise Exception('invalid image size')

model.eval()  #eval mode
read_s = time.time()  #need to note time

try:  #get image locations in list
    image_paths = [
        os.path.join(os.path.realpath('.'), imgs, img)
        for img in os.listdir(imgs)
    ]
except NotADirectoryError:
    image_paths = []
    image_paths.append(os.path.join(os.path.realpath('.'), imgs))
except FileNotFoundError:
    print('no file/dir with name: ', imgs)
    exit()

if not os.path.exists(params.det):  #create if doesn't exist
Exemple #3
0
    num_classes = 80  #For COCO
    classes = load_classes("../data/coco.names")
    classes[0] = 'beautiful girl'

    # 获取network 加载参数
    print("Loading network......")
    model = DarkNet(args.cfgfile)
    model.load_weights(args.weightsfile)
    print('Nerwork successfully loaded')
    model.net_info['height'] = args.reso
    inp_dim = int(model.net_info['height'])
    assert inp_dim % 32 == 0
    assert inp_dim > 32
    if CUDA:
        model = model.cuda()
    model.eval()  # 梯度不计算, drop out #

    # 读取图片
    read_dir = time.time()
    try:
        imlist = [
            osp.join(osp.realpath('.'), images, img)
            for img in os.listdir(images)
        ]
    except NotADirectoryError:
        imlist = []
        imlist.append(osp.join(osp.realpath('.'), images))
    except FileNotFoundError:
        print("No file or directory with the name {}".format(images))
        exit()
    if not os.path.exists(args.det):
Exemple #4
0
    #set up the network
    print('loading the network')
    model = DarkNet(args.cfg_file)
    model.load_weights(args.weight)
    print('load the network successfully')

    model.net_info['height'] = args.reso
    inp_dim = int(model.net_info['height'])
    assert inp_dim % 32 == 0
    assert inp_dim > 32

    if cuda:
        model.cuda()

    #set the model into eval mode
    model.eval()
    read_dir = time.time()

    try:
        imlist = [img for img in os.listdir(images)]

    except NotADirectoryError:
        imlist = []
        imlist.append(osp.join(osp.realpath('.'), images))

    except FileNotFoundError:
        print('No such file or directory with the name {}'.format(images))
        exit()

    if not os.path.exists(args.det):
        os.mkdirs(args.det)
Exemple #5
0
def train(folder="weights"):
    os.makedirs(folder, exist_ok=True)

    args = parser.parse_args()
    use_cuda = torch.cuda.is_available() and not args.no_cuda

    classes = load_classes()
    num_classes = len(classes)

    model = DarkNet(use_cuda, num_classes)
    if use_cuda:
        model = model.cuda()
    optimizer = torch.optim.Adam(model.parameters())

    training_data = Data_loader("data/labels/train2014/",
                                "data/train2014",
                                img_size=args.img_size,
                                max_objects=args.max_objects,
                                batch_size=args.batch_size,
                                is_cuda=use_cuda)

    validation_data = Data_loader("data/labels/val2014/",
                                  "data/val2014",
                                  img_size=args.img_size,
                                  max_objects=args.max_objects,
                                  batch_size=args.batch_size,
                                  is_cuda=use_cuda)

    for epoch in range(args.epoch):

        model.train()
        for batch_i, (imgs, labels) in enumerate(training_data):
            optimizer.zero_grad()
            loss, gather_losses = model(imgs, labels)
            loss.backward()
            optimizer.step()

            print(
                f"""[Epoch {epoch+1}/{args.epoch},Batch {batch_i+1}/{training_data.stop_step}] [Losses: x {gather_losses["x"]:.5f}, y {gather_losses["y"]:.5f}, w {gather_losses["w"]:.5f}, h { gather_losses["h"]:.5f}, conf {gather_losses["conf"]:.5f}, cls {gather_losses["cls"]:.5f}, total {loss.item():.5f}, recall: {gather_losses["recall"]:.5f}, precision: {gather_losses["precision"]:.5f}]"""
            )

        torch.save({
            "model": model.state_dict(),
            "classes": classes
        }, f"{folder}/{epoch}.weights.pt")

        all_detections = []
        all_annotations = []

        model.eval()
        for imgs, labels in validation_data:
            with torch.no_grad():
                prediction = model(imgs)
                outputs = predict(prediction, args.nms_conf, args.confidence)

            labels = labels.cpu()
            for output, annotations in zip(outputs, labels):
                all_detections.append(
                    [np.array([]) for _ in range(num_classes)])
                if output is not None:
                    pred_boxes = output[:, :5].cpu().numpy()
                    scores = output[:, 4].cpu().numpy()
                    pred_labels = output[:, -1].cpu().numpy()

                    sort_i = np.argsort(scores)
                    pred_labels = pred_labels[sort_i]
                    pred_boxes = pred_boxes[sort_i]

                    for label in range(num_classes):
                        all_detections[-1][label] = pred_boxes[pred_labels ==
                                                               label]

                all_annotations.append(
                    [np.array([]) for _ in range(num_classes)])

                if any(annotations[:, -1] > 0):
                    annotation_labels = annotations[annotations[:, -1] > 0,
                                                    0].numpy()
                    _annotation_boxes = annotations[annotations[:, -1] > 0, 1:]

                    annotation_boxes = np.empty_like(_annotation_boxes)
                    annotation_boxes[:,
                                     0] = _annotation_boxes[:,
                                                            0] - _annotation_boxes[:,
                                                                                   2] / 2
                    annotation_boxes[:,
                                     1] = _annotation_boxes[:,
                                                            1] - _annotation_boxes[:,
                                                                                   3] / 2
                    annotation_boxes[:,
                                     2] = _annotation_boxes[:,
                                                            0] + _annotation_boxes[:,
                                                                                   2] / 2
                    annotation_boxes[:,
                                     3] = _annotation_boxes[:,
                                                            1] + _annotation_boxes[:,
                                                                                   3] / 2
                    annotation_boxes *= args.img_size

                    for label in range(num_classes):
                        all_annotations[-1][label] = annotation_boxes[
                            annotation_labels == label, :]

        average_precisions = evaluate(num_classes, all_detections,
                                      all_annotations)

        print(f"""{"-"*40}evaluation.{epoch}{"-"*40}""")
        for c, ap in average_precisions.items():
            print(f"Class '{c}' - AP: {ap}")

        mAP = np.mean(list(average_precisions.values()))
        print(f"mAP: {mAP}")
        print(f"""{"-"*40}end{"-"*40}""")