示例#1
0
def main(data_config, model_def, weights_path, batch_size, img_size, iou_thres,
         conf_thres, nms_thres, nb_cpu):
    data_config = parse_data_config(update_path(data_config))
    valid_path = update_path(data_config["valid"])
    class_names = load_classes(update_path(data_config["names"]))

    # Initiate model
    model = Darknet(model_def).to(DEVICE)
    if weights_path.endswith(".weights"):
        # Load darknet weights
        model.load_darknet_weights(weights_path)
    else:
        # Load checkpoint weights
        model.load_state_dict(torch.load(weights_path))

    print("Compute mAP...")

    precision, recall, AP, f1, ap_class = evaluate_model(
        model,
        path_data=valid_path,
        iou_thres=iou_thres,
        conf_thres=conf_thres,
        nms_thres=nms_thres,
        img_size=img_size,
        batch_size=batch_size,
        nb_cpu=nb_cpu,
    )

    print("Average Precisions:")
    for i, c in enumerate(ap_class):
        print(f"+ Class '{c}' ({class_names[c]}) - AP: {AP[i]}")

    print(f"mAP: {AP.mean()}")
示例#2
0
def main(image_folder, model_def, weights_path, class_path, output_folder, img_size,
         conf_thres, nms_thres, batch_size, nb_cpu):
    # use GPU if it is possible
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # at least one cpu has to be set
    nb_cpu = max(1, nb_cpu)
    # prepare the output folder
    os.makedirs(output_folder, exist_ok=True)

    # Set up model
    model = Darknet(model_def, img_size=img_size).to(device)

    if weights_path.endswith(".weights"):
        # Load darknet weights
        model.load_darknet_weights(weights_path)
    else:
        # Load checkpoint weights
        model.load_state_dict(torch.load(weights_path))

    model.eval()  # Set in evaluation mode

    img_folder = ImageFolder(image_folder, img_size=img_size)
    dataloader = DataLoader(img_folder, batch_size=batch_size, shuffle=False, num_workers=nb_cpu)

    classes = load_classes(class_path)  # Extracts class labels from file

    Tensor = torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor

    img_paths = []  # Stores image paths
    img_detections = []  # Stores detections for each image index

    pbar = tqdm.tqdm(total=len(img_folder), desc='Performing object detection')
    for path_imgs, input_imgs in dataloader:
        # Configure input
        input_imgs = Variable(input_imgs.type(Tensor))

        # Get detections
        with torch.no_grad():
            detects = model(input_imgs)
            detects = non_max_suppression(detects, conf_thres, nms_thres)

        # Save image and detections
        img_paths.extend(path_imgs)
        img_detections.extend(detects)
        pbar.update(len(path_imgs))
    pbar.close()

    # Bounding-box colors
    colors = get_colors(len(classes), "jet")
    # np.random.shuffle(colors)

    _wrap_export = partial(wrap_export_detection, img_size=img_size, colors=colors,
                           classes=classes, output_folder=output_folder)
    with ProcessPool(nb_cpu) as pool:
        # Iterate through images and save plot of detections
        list(tqdm.tqdm(pool.imap(_wrap_export, zip(img_paths, img_detections)),
                       desc='Saving images/detections'))
示例#3
0
def main(data_config, model_def, trained_weights, augment, multiscale,
         img_size, grad_accums, evaluation_interval, checkpoint_interval,
         batch_size, epochs, path_output, nb_cpu, amp):
    path_output = update_path(path_output)
    os.makedirs(path_output, exist_ok=True)

    shutil.copy(data_config, os.path.join(path_output, os.path.basename(data_config)))
    _, _, _, local_vars = inspect.getargvalues(inspect.currentframe())
    args = {arg: local_vars[arg] for arg in inspect.getfullargspec(main).args}
    with open(os.path.join(path_output, 'script-config.yaml'), 'w') as fp:
        yaml.dump(dict(args), fp)

    logger = Logger(os.path.join(path_output, "logs"))

    # Get data configuration
    data_config = parse_data_config(update_path(data_config))
    train_path = update_path(data_config["train"])
    valid_path = update_path(data_config["valid"])
    class_names = load_classes(update_path(data_config["names"]))

    # Initiate model
    assert os.path.isfile(model_def), 'missing: %s' % model_def
    model = Darknet(update_path(model_def)).to(DEVICE)
    model.apply(weights_init_normal)

    # If specified we start from checkpoint
    if trained_weights:
        assert os.path.isfile(trained_weights), 'missing: %s' % trained_weights
        if trained_weights.endswith(".pth"):
            model.load_state_dict(torch.load(trained_weights))
        else:
            model.load_darknet_weights(trained_weights)

    augment = dict(zip(augment, [True] * len(augment))) if augment else {}
    augment["scaling"] = multiscale
    # Get dataloader
    assert os.path.isfile(train_path), 'missing: %s' % train_path
    dataset = ListDataset(train_path, augment=augment, img_size=img_size)
    dataloader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=nb_cpu,
        pin_memory=True,
        collate_fn=dataset.collate_fn,
    )

    for param in model.parameters():
        param.requires_grad = True

    # optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
    optimizer = torch_optimizer.RAdam(model.parameters(), lr=0.0001)
    # optimizer = torch_optimizer.Yogi(model.parameters(), lr=0.0001)
    # optimizer = torch_optimizer.AdaBound(model.parameters(), lr=0.0001)

    # Creates once at the beginning of training
    scaler = torch.cuda.amp.GradScaler() if amp else None

    for epoch in tqdm.tqdm(range(epochs), desc='Training epoch'):
        model.train()
        # start_time = time.time()
        pbar_batch = tqdm.tqdm(total=len(dataloader))
        train_metrics = []
        for batch_i, (_, imgs, targets) in enumerate(dataloader):
            model, batch_metric = training_batch(dataloader, model, optimizer, epochs,
                                                 epoch, batch_i, imgs, targets, grad_accums, img_size, scaler)
            loss = batch_metric['loss']
            pbar_batch.set_description("training batch loss=%.5f" % loss)
            train_metrics.append(batch_metric)
            pbar_batch.update()
        pbar_batch.close()
        pbar_batch.clear()

        train_metrics_all = {m: [] for m in train_metrics[0]}
        _ = [train_metrics_all[k].append(bm[k]) for bm in train_metrics for k in bm]
        logger.list_scalars_summary([(k, np.mean(train_metrics_all[k]))
                                     for k in train_metrics_all], step=epoch, phase='train')

        if epoch % evaluation_interval == 0:
            assert os.path.isfile(valid_path), 'missing: %s' % valid_path
            evaluate_epoch(model, valid_path, img_size, batch_size, epoch, class_names, logger, nb_cpu)

        if epoch % checkpoint_interval == 0:
            torch.save(model.state_dict(), os.path.join(path_output, "yolov3_ckpt_%05d.pth" % epoch))
示例#4
0
def main(data_config, model_def, trained_weights, augment, multiscale,
         img_size, grad_accums, evaluation_interval, checkpoint_interval,
         batch_size, epochs, path_output, nb_cpu):
    path_output = update_path(path_output)
    os.makedirs(path_output, exist_ok=True)

    logger = Logger(os.path.join(path_output, "logs"))

    # Get data configuration
    data_config = parse_data_config(update_path(data_config))
    train_path = update_path(data_config["train"])
    valid_path = update_path(data_config["valid"])
    class_names = load_classes(update_path(data_config["names"]))

    # Initiate model
    assert os.path.isfile(model_def)
    model = Darknet(update_path(model_def)).to(DEVICE)
    model.apply(weights_init_normal)

    # If specified we start from checkpoint
    if trained_weights:
        assert os.path.isfile(trained_weights)
        if trained_weights.endswith(".pth"):
            model.load_state_dict(torch.load(trained_weights))
        else:
            model.load_darknet_weights(trained_weights)

    augment = dict(zip(augment, [True] * len(augment))) if augment else {}
    augment["scaling"] = multiscale
    # Get dataloader
    assert os.path.isfile(train_path)
    dataset = ListDataset(train_path, augment=augment, img_size=img_size)
    dataloader = DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=nb_cpu,
        pin_memory=True,
        collate_fn=dataset.collate_fn,
    )

    optimizer = torch.optim.Adam(model.parameters())

    for epoch in tqdm.tqdm(range(epochs), desc='Training epoch'):
        model.train()
        # start_time = time.time()
        pbar_batch = tqdm.tqdm(total=len(dataloader))
        train_metrics = []
        for batch_i, (_, imgs, targets) in enumerate(dataloader):
            model, batch_metric = training_batch(dataloader, model, optimizer, epochs,
                                                 epoch, batch_i, imgs, targets, grad_accums, logger, img_size)
            loss = batch_metric['loss']
            pbar_batch.set_description("training batch loss=%.5f" % loss)
            train_metrics.append(batch_metric)
            pbar_batch.update()
        pbar_batch.close()
        pbar_batch.clear()

        if epoch % evaluation_interval == 0:
            assert os.path.isfile(valid_path)
            evaluate_epoch(model, valid_path, img_size, batch_size, epoch, class_names, logger, nb_cpu)

        if epoch % checkpoint_interval == 0:
            torch.save(model.state_dict(), os.path.join(path_output, "yolov3_ckpt_%05d.pth" % epoch))

        train_metrics_all = {m: [] for m in train_metrics[0]}
        _ = [train_metrics_all[k].append(bm[k]) for bm in train_metrics for k in bm]
        logger.list_scalars_summary([(k, np.mean(train_metrics_all[k]))
                                     for k in train_metrics_all], step=epoch, phase='train')