コード例 #1
0
def run():
    print_environment_info()
    parser = argparse.ArgumentParser(description="Detect objects on images.")
    parser.add_argument("-m", "--model", type=str, default="config/yolov3.cfg", help="Path to model definition file (.cfg)")
    parser.add_argument("-w", "--weights", type=str, default="weights/yolov3.weights", help="Path to weights or checkpoint file (.weights or .pth)")
    parser.add_argument("-i", "--images", type=str, default="data/samples", help="Path to directory with images to inference")
    parser.add_argument("-c", "--classes", type=str, default="data/coco.names", help="Path to classes label file (.names)")
    parser.add_argument("-o", "--output", type=str, default="output", help="Path to output directory")
    parser.add_argument("-b", "--batch_size", type=int, default=1, help="Size of each image batch")
    parser.add_argument("--img_size", type=int, default=416, help="Size of each image dimension for yolo")
    parser.add_argument("--n_cpu", type=int, default=8, help="Number of cpu threads to use during batch generation")
    parser.add_argument("--conf_thres", type=float, default=0.5, help="Object confidence threshold")
    parser.add_argument("--nms_thres", type=float, default=0.4, help="IOU threshold for non-maximum suppression")
    args = parser.parse_args()
    print(f"Command line arguments: {args}")

    # Extract class names from file
    classes = load_classes(args.classes)  # List of class names

    detect_directory(
        args.model,
        args.weights,
        args.images,
        classes,
        args.output,
        batch_size=args.batch_size,
        img_size=args.img_size,
        n_cpu=args.n_cpu,
        conf_thres=args.conf_thres,
        nms_thres=args.nms_thres)
コード例 #2
0
def run():
    print_environment_info()
    parser = argparse.ArgumentParser(description="Trains the YOLO model.")
    parser.add_argument("-m",
                        "--model",
                        type=str,
                        default="config/yolov3.cfg",
                        help="Path to model definition file (.cfg)")
    parser.add_argument("-d",
                        "--data",
                        type=str,
                        default="config/coco.data",
                        help="Path to data config file (.data)")
    parser.add_argument("-e",
                        "--epochs",
                        type=int,
                        default=300,
                        help="Number of epochs")
    parser.add_argument("-v",
                        "--verbose",
                        action='store_true',
                        help="Makes the training more verbose")
    parser.add_argument(
        "--n_cpu",
        type=int,
        default=8,
        help="Number of cpu threads to use during batch generation")
    parser.add_argument(
        "--pretrained_weights",
        type=str,
        help=
        "Path to checkpoint file (.weights or .pth). Starts training from checkpoint model"
    )
    parser.add_argument("--checkpoint_interval",
                        type=int,
                        default=1,
                        help="Interval of epochs between saving model weights")
    parser.add_argument(
        "--evaluation_interval",
        type=int,
        default=1,
        help="Interval of epochs between evaluations on validation set")
    parser.add_argument("--multiscale_training",
                        action="store_false",
                        help="Allow for multi-scale training")
    parser.add_argument(
        "--iou_thres",
        type=float,
        default=0.5,
        help="Evaluation: IOU threshold required to qualify as detected")
    parser.add_argument("--conf_thres",
                        type=float,
                        default=0.1,
                        help="Evaluation: Object confidence threshold")
    parser.add_argument(
        "--nms_thres",
        type=float,
        default=0.5,
        help="Evaluation: IOU threshold for non-maximum suppression")
    parser.add_argument(
        "--logdir",
        type=str,
        default="logs",
        help="Directory for training log files (e.g. for TensorBoard)")
    parser.add_argument("--seed",
                        type=int,
                        default=-1,
                        help="Makes results reproducable. Set -1 to disable.")
    args = parser.parse_args()
    print(f"Command line arguments: {args}")

    if args.seed != -1:
        provide_determinism(args.seed)

    logger = Logger(args.logdir)  # Tensorboard logger

    # Create output directories if missing
    os.makedirs("output", exist_ok=True)
    os.makedirs("checkpoints", exist_ok=True)

    # Get data configuration
    data_config = parse_data_config(args.data)
    train_path = data_config["train"]
    valid_path = data_config["valid"]
    class_names = load_classes(data_config["names"])
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    # ############
    # Create model
    # ############

    model = load_model(args.model, args.pretrained_weights)

    # Print model
    if args.verbose:
        summary(model,
                input_size=(3, model.hyperparams['height'],
                            model.hyperparams['height']))

    mini_batch_size = model.hyperparams['batch'] // model.hyperparams[
        'subdivisions']

    # #################
    # Create Dataloader
    # #################

    # Load training dataloader
    dataloader = _create_data_loader(train_path, mini_batch_size,
                                     model.hyperparams['height'], args.n_cpu,
                                     args.multiscale_training)

    # Load validation dataloader
    validation_dataloader = _create_validation_data_loader(
        valid_path, mini_batch_size, model.hyperparams['height'], args.n_cpu)

    # ################
    # Create optimizer
    # ################

    params = [p for p in model.parameters() if p.requires_grad]

    if (model.hyperparams['optimizer'] in [None, "adam"]):
        optimizer = optim.Adam(
            params,
            lr=model.hyperparams['learning_rate'],
            weight_decay=model.hyperparams['decay'],
        )
    elif (model.hyperparams['optimizer'] == "sgd"):
        optimizer = optim.SGD(params,
                              lr=model.hyperparams['learning_rate'],
                              weight_decay=model.hyperparams['decay'],
                              momentum=model.hyperparams['momentum'])
    else:
        print("Unknown optimizer. Please choose between (adam, sgd).")

    for epoch in range(args.epochs):

        print("\n---- Training Model ----")

        model.train()  # Set model to training mode

        for batch_i, (_, imgs, targets) in enumerate(
                tqdm.tqdm(dataloader, desc=f"Training Epoch {epoch}")):
            batches_done = len(dataloader) * epoch + batch_i

            imgs = imgs.to(device, non_blocking=True)
            targets = targets.to(device)

            outputs = model(imgs)

            loss, loss_components = compute_loss(outputs, targets, model)

            loss.backward()

            ###############
            # Run optimizer
            ###############

            if batches_done % model.hyperparams['subdivisions'] == 0:
                # Adapt learning rate
                # Get learning rate defined in cfg
                lr = model.hyperparams['learning_rate']
                if batches_done < model.hyperparams['burn_in']:
                    # Burn in
                    lr *= (batches_done / model.hyperparams['burn_in'])
                else:
                    # Set and parse the learning rate to the steps defined in the cfg
                    for threshold, value in model.hyperparams['lr_steps']:
                        if batches_done > threshold:
                            lr *= value
                # Log the learning rate
                logger.scalar_summary("train/learning_rate", lr, batches_done)
                # Set learning rate
                for g in optimizer.param_groups:
                    g['lr'] = lr

                # Run optimizer
                optimizer.step()
                # Reset gradients
                optimizer.zero_grad()

            # ############
            # Log progress
            # ############
            if args.verbose:
                print(
                    AsciiTable([
                        ["Type", "Value"],
                        ["IoU loss", float(loss_components[0])],
                        ["Object loss",
                         float(loss_components[1])],
                        ["Class loss", float(loss_components[2])],
                        ["Loss", float(loss_components[3])],
                        ["Batch loss", to_cpu(loss).item()],
                    ]).table)

            # Tensorboard logging
            tensorboard_log = [("train/iou_loss", float(loss_components[0])),
                               ("train/obj_loss", float(loss_components[1])),
                               ("train/class_loss", float(loss_components[2])),
                               ("train/loss", to_cpu(loss).item())]
            logger.list_of_scalars_summary(tensorboard_log, batches_done)

            model.seen += imgs.size(0)

        # #############
        # Save progress
        # #############

        # Save model to checkpoint file
        if epoch % args.checkpoint_interval == 0:
            checkpoint_path = f"checkpoints/yolov3_ckpt_{epoch}.pth"
            print(f"---- Saving checkpoint to: '{checkpoint_path}' ----")
            torch.save(model.state_dict(), checkpoint_path)

        # ########
        # Evaluate
        # ########

        if epoch % args.evaluation_interval == 0:
            print("\n---- Evaluating Model ----")
            # Evaluate the model on the validation set
            metrics_output = _evaluate(model,
                                       validation_dataloader,
                                       class_names,
                                       img_size=model.hyperparams['height'],
                                       iou_thres=args.iou_thres,
                                       conf_thres=args.conf_thres,
                                       nms_thres=args.nms_thres,
                                       verbose=args.verbose)

            if metrics_output is not None:
                precision, recall, AP, f1, ap_class = metrics_output
                evaluation_metrics = [("validation/precision",
                                       precision.mean()),
                                      ("validation/recall", recall.mean()),
                                      ("validation/mAP", AP.mean()),
                                      ("validation/f1", f1.mean())]
                logger.list_of_scalars_summary(evaluation_metrics, epoch)
コード例 #3
0
ファイル: test.py プロジェクト: nianjiuhuiyi/PyTorch-YOLOv3
def run():
    print_environment_info()
    parser = argparse.ArgumentParser(description="Evaluate validation data.")
    parser.add_argument("-m",
                        "--model",
                        type=str,
                        default="config/yolov3.cfg",
                        help="Path to model definition file (.cfg)")
    parser.add_argument(
        "-w",
        "--weights",
        type=str,
        default="weights/yolov3.weights",
        help="Path to weights or checkpoint file (.weights or .pth)")
    parser.add_argument("-d",
                        "--data",
                        type=str,
                        default="config/coco.data",
                        help="Path to data config file (.data)")
    parser.add_argument("-b",
                        "--batch_size",
                        type=int,
                        default=8,
                        help="Size of each image batch")
    parser.add_argument("-v",
                        "--verbose",
                        action='store_true',
                        help="Makes the validation more verbose")
    parser.add_argument("--img_size",
                        type=int,
                        default=416,
                        help="Size of each image dimension for yolo")
    parser.add_argument(
        "--n_cpu",
        type=int,
        default=8,
        help="Number of cpu threads to use during batch generation")
    parser.add_argument("--iou_thres",
                        type=float,
                        default=0.5,
                        help="IOU threshold required to qualify as detected")
    parser.add_argument("--conf_thres",
                        type=float,
                        default=0.01,
                        help="Object confidence threshold")
    parser.add_argument("--nms_thres",
                        type=float,
                        default=0.4,
                        help="IOU threshold for non-maximum suppression")
    args = parser.parse_args()
    print(f"Command line arguments: {args}")

    # Load configuration from data file
    data_config = parse_data_config(args.data)
    # Path to file containing all images for validation
    valid_path = data_config["valid"]
    class_names = load_classes(data_config["names"])  # List of class names

    precision, recall, AP, f1, ap_class = evaluate_model_file(
        args.model,
        args.weights,
        valid_path,
        class_names,
        batch_size=args.batch_size,
        img_size=args.img_size,
        n_cpu=args.n_cpu,
        iou_thres=args.iou_thres,
        conf_thres=args.conf_thres,
        nms_thres=args.nms_thres,
        verbose=True)