Beispiel #1
0
    def __init__(self):
        poles_net = YOLOv3()
        components_net = YOLOv3()

        # Initialize predicting networks
        self.pole_detector = PolesDetector(predicting_net=poles_net)
        self.components_detector = ComponentsDetector(
            predicting_net=components_net)
Beispiel #2
0
def main():
    model = YOLOv3(num_classes=config.NUM_CLASSES).to(config.DEVICE)
    optimizer = optim.Adam(model.parameters(),
                           lr=config.LEARNING_RATE,
                           weight_decay=config.WEIGHT_DECAY)
    loss_fn = YoloLoss()
    scaler = torch.cuda.amp.GradScaler()

    train_loader, test_loader, train_eval_loader = get_loaders(
        train_csv_path=config.DATASET + "/train.csv",
        test_csv_path=config.DATASET + "/test.csv")

    if config.LOAD_MODEL:
        load_checkpoint(config.CHECKPOINT_FILE, model, optimizer,
                        config.LEARNING_RATE)
        print("loaded model")

    scaled_anchors = (torch.tensor(config.ANCHORS) * torch.tensor(
        config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)).to(config.DEVICE)

    for epoch in range(config.NUM_EPOCHS):
        print(f"\nEpoch [{epoch}]")
        #plot_couple_examples(model, test_loader, 0.6, 0.5, scaled_anchors)
        train_fn(train_loader, model, optimizer, loss_fn, scaler,
                 scaled_anchors)

        #if config.SAVE_MODEL:
        #    save_checkpoint(model, optimizer, filename=f"checkpoint.pth.tar")

        #print("On Train Eval loader:")
        #print("On Train loader:")
        #check_class_accuracy(model, train_loader, threshold=config.CONF_THRESHOLD)

        if epoch > 0 and epoch % 4 == 0:
            valid_fn(train_eval_loader, model, loss_fn, scaled_anchors)
            check_class_accuracy(model,
                                 test_loader,
                                 threshold=config.CONF_THRESHOLD)
            pred_boxes, true_boxes = get_evaluation_bboxes(
                test_loader,
                model,
                iou_threshold=config.NMS_IOU_THRESH,
                anchors=config.ANCHORS,
                threshold=config.CONF_THRESHOLD,
            )
            mapval = mean_average_precision(
                pred_boxes,
                true_boxes,
                iou_threshold=config.MAP_IOU_THRESH,
                box_format="midpoint",
                num_classes=config.NUM_CLASSES,
            )
            print(f"MAP: {mapval.item()}")

            print("\nnResuming Training\n")
            model.train()
Beispiel #3
0
def main():
    model = YOLOv3(num_classes=config.NUM_CLASSES).to(config.DEVICE)
    optimizer = optim.Adam(
        model.parameters(), lr=config.LEARNING_RATE, weight_decay=config.WEIGHT_DECAY
    )
    loss_fn = YoloLoss()
    scaler = torch.cuda.amp.GradScaler()

    train_loader, test_loader, train_eval_loader = get_loaders(
        train_csv_path=config.DATASET + "/train.csv", test_csv_path=config.DATASET + "/test.csv"
    )

    if config.LOAD_MODEL:
        load_checkpoint(
            config.CHECKPOINT_FILE, model, optimizer, config.LEARNING_RATE
        )

    scaled_anchors = (
        torch.tensor(config.ANCHORS)
        * torch.tensor(config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)
    ).to(config.DEVICE)

    for epoch in range(config.NUM_EPOCHS):
        train_fn(train_loader, model, optimizer, loss_fn, scaler, scaled_anchors)
        if epoch > 0 and epoch % 3 == 0:
            check_class_accuracy(model, test_loader, threshold=config.CONF_THRESHOLD)
            pred_boxes, true_boxes = get_evaluation_bboxes(
                test_loader,
                model,
                iou_threshold=config.NMS_IOU_THRESH,
                anchors=config.ANCHORS,
                threshold=config.CONF_THRESHOLD,
            )
            mapval = mean_average_precision(
                pred_boxes,
                true_boxes,
                iou_threshold=config.MAP_IOU_THRESH,
                box_format="midpoint",
                num_classes=config.NUM_CLASSES,
            )
            print(f"MAP: {mapval.item()}")
            model.train()
        if epoch > 99:
            for x, y in train_loader:
            x = x.to(DEVICE)
            for idx in range(8):
                bboxes = cellboxes_to_boxes(model(x))
                bboxes = non_max_suppression(bboxes[idx], iou_threshold=0.5, threshold=0.4)
                plot_image(x[idx].permute(1,2,0).to("cpu"), bboxes, idx)

if __name__ == "__main__":
    main()
Beispiel #4
0
def main():
    model = YOLOv3(num_classes=config.NUM_CLASSES).to(config.DEVICE,
                                                      dtype=torch.float)
    optimizer = optim.Adam(model.parameters(),
                           lr=config.LEARNING_RATE,
                           weight_decay=config.WEIGHT_DECAY)
    loss_fn = YoloLoss().to(config.DEVICE)
    scaler = torch.cuda.amp.GradScaler()

    dataset = YoloDataset(table=table,
                          anchors=anchors,
                          transform=config.transform)
    train_loader = DataLoader(dataset, batch_size=2, shuffle=True)

    for epoch in (range(20)):
        train_fn(train_loader, model, optimizer, loss_fn, scaler, anchors)
Beispiel #5
0
                        type=str,
                        help="Dataset name",
                        choices=['voc', 'coco', 'linemod'])
    parser.add_argument(
        '--weights',
        default='darknet53.conv.74.weights',
        type=str,
        help=".weights file name (stored in checkpoint/darknet)")
    return parser.parse_args()


if __name__ == '__main__':
    args = parse_arg()
    weight_path = opj(config.CKPT_ROOT, 'darknet', args.weights)
    print("[LOG] Loading weights from", weight_path)
    model = YOLOv3(config.network[args.dataset]['cfg'], 416).cuda()

    if len(args.weights.split('.')) > 2:  # with cutoff
        cutoff = int(args.weights.split('.')[-2])
        model.load_weights(weight_path, cutoff=cutoff)
        save_checkpoint(opj(config.CKPT_ROOT, args.dataset), 0, 0, {
            'epoch': 0,
            'iteration': 0,
            'state_dict': model.state_dict(),
        })
    else:  # pretrained
        model.load_weights(weight_path)
        save_checkpoint(opj(config.CKPT_ROOT, args.dataset), -1, -1, {
            'epoch': -1,
            'iteration': -1,
            'state_dict': model.state_dict(),
Beispiel #6
0
# TODO: Implement LATER

from model import YOLOv3
import utils
import tensorflow as tf

CFG_PATH = 'cfg/yolov3_tiny_traffic_inference.cfg'
DARKNET_WEIGHTS_PATH = '/home/diendl/tiny_model/yolov3_tiny_traffic_train_4000.weights'
LABELMAP = 'cfg/coco.names'
TRAIN_FILES = ['dataset_tools/train.tf']

model = YOLOv3(CFG_PATH)
graph = tf.Graph()

with graph.as_default():
    dataset = utils.create_dataset(TRAIN_FILES, model.BATCH, model.HEIGHT,
                                   model.WIDTH, model.CHANNELS)
    images_batch, dense_indices_batch, dense_bxs_batch, dense_bys_batch, dense_bws_batch, dense_bhs_batch \
        = utils.get_next_batch(dataset)

    inputs = tf.placeholder(
        dtype=tf.float32,
        name='input_images',
        shape=[model.BATCH, model.WIDTH, model.HEIGHT, model.CHANNELS])

    weights_list, predictions = model.forward(inputs)
    load_weights_ops = utils.load_darknet_weights(DARKNET_WEIGHTS_PATH,
                                                  weights_list)

    num_detections = [(item**2) * 3 for item in model.GRID_SIZES]
    num_detections = sum(num_detections)
Beispiel #7
0
        if epoch > 0 and epoch % 3 == 0:
            check_class_accuracy(model,
                                 test_loader,
                                 threshold=config.CONF_THRESHOLD)
            pred_boxes, true_boxes = get_evaluation_bboxes(
                test_loader,
                model,
                iou_threshold=config.NMS_IOU_THRESH,
                anchors=config.ANCHORS,
                threshold=config.CONF_THRESHOLD,
            )
            mapval = mean_average_precision(
                pred_boxes,
                true_boxes,
                iou_threshold=config.MAP_IOU_THRESH,
                box_format="midpoint",
                num_classes=config.NUM_CLASSES,
            )
            print(f"MAP: {mapval.item()}")
            model.train()


if __name__ == "__main__":
    print("VOVA")
    pytorch_total_params = sum(
        p.numel() for p in YOLOv3(num_classes=config.NUM_CLASSES).parameters())

    print(pytorch_total_params)

    # main()
    import_param = {
        'batch_size': 16,
        'img_size': 320,
        'conf_thres': 0.5,
        'iou_thres': 0.5,
        'nms_thres': 0.1,
        'cfg_path':
        'D:\py_pro\YOLOv3-PyTorch\yolo_cfg\\' + model_name + '.cfg',
        'weights': 'D:\py_pro\YOLOv3-PyTorch\weights\\' + map_name +
        '\\yolov3_ep43-map82.67-loss0.15187.pt',
        'train_path':
        'D:\py_pro\YOLOv3-PyTorch\data\\' + map_name + '\\train.txt',
        'val_path': 'D:\py_pro\YOLOv3-PyTorch\data\\' + map_name + '\\val.txt',
        'prune_num': 16,  # YOLOv3标准网络中有23个res块,这里代表剪掉多少块
    }
    model = YOLOv3(import_param['cfg_path']).cuda()
    model.load_state_dict(torch.load(import_param['weights']))

    precision, recall, before_AP, f1, ap_class = evaluate(
        model,
        path=import_param['val_path'],
        iou_thres=import_param['iou_thres'],
        conf_thres=import_param['conf_thres'],
        nms_thres=import_param['nms_thres'],
        img_size=import_param['img_size'],
        batch_size=import_param['batch_size'],
    )
    # 剪枝前模型参数总量
    before_parameters = sum([param.nelement() for param in model.parameters()])
    print(f'稀疏化训练后模型mAP:{before_AP.mean():.4f}')
Beispiel #9
0
parser = argparse.ArgumentParser("Training options for the YOLOv3 model.")

parser.add_argument("--anchors_file", type=str, default='./data/anchors.txt')
parser.add_argument("--num_classes", type=int, default=1000)
parser.add_argument("--train_file", type=str, default='./data/train.npz')
parser.add_argument("--epochs", type=int, default=50)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument("--lr", type=float, default=0.001)
parser.add_argument("--lr_decay", type=float, default=0.0001)
parser.add_argument("--shuffle", type=bool, default=True)
parser.add_argument("--repeat", type=int, default=100)
parser.add_argument("--snapshots", type=str, default='./snapshots/')

args = parser.parse_args()

yolov3 = YOLOv3(
    anchors_file=args.anchors_file,
    num_classes=args.num_classes,
    train_file=args.train_file,
    epochs=args.epochs,
    batch_size=args.batch_size,
    lr=args.lr,
    lr_decay=args.lr_decay,
    shuffle=args.shuffle,
    repeat=args.repeat,
    snapshots=args.snapshots
)

if __name__ == 'main':
    yolov3.train()
Beispiel #10
0
    return loss['total'], np.mean(mAPs)


if __name__ == '__main__':
    # 1. Parsing arguments
    print(colored("\n==>", 'blue'), emojify("Parsing arguments :zap:\n"))
    assert args.reso % 32 == 0, emojify(
        "Resolution must be interger times of 32 :shit:")
    for arg in vars(args):
        print(arg, ':', getattr(args, arg))
    print("log_dir :", log_dir)

    # 2. Loading network
    # TODO: resume tensorboard
    print(colored("\n==>", 'blue'), emojify("Loading network :hourglass:\n"))
    yolo = YOLOv3(cfg, args.reso).cuda()
    start_epoch, start_iteration = args.checkpoint.split('.')
    start_epoch, start_iteration, state_dict = load_checkpoint(
        opj(config.CKPT_ROOT, args.dataset), int(start_epoch),
        int(start_iteration))
    yolo.load_state_dict(state_dict)
    print("Model starts training from epoch %d iteration %d" %
          (start_epoch, start_iteration))

    # 3. Preparing data
    print(colored("\n==>", 'blue'), emojify("Preparing data :coffee:\n"))
    train_img_datasets, train_dataloader = prepare_train_dataset(
        args.dataset, args.reso, args.batch)
    val_img_datasets, val_dataloader = prepare_val_dataset(
        args.dataset, args.reso, args.batch)
    print("Number of training images:", len(train_img_datasets))
#     cv2.destroyAllWindows()
#     cap.release()


if __name__ == "__main__":
    args = parse_arguments()

    if args.save_path:
        if not os.path.exists(args.save_path):
            os.mkdir(args.save_path)

    if args.source == "0":
        source = 0
    else:
        if not os.path.isfile(args.source):
            raise IOError("Provided file is not a video")
        source = args.source

    skip_frame = args.skip
    assert 0 <= skip_frame <= 100, "Wrong number of SKIP frames provided"

    # Initialize predicting nets
    poles_net = YOLOv3()
    components_net = YOLOv3()
    pole_detector = PolesDetector(predicting_net=poles_net)
    components_detector = ComponentsDetector(predicting_net=components_net)

    results_manager = ResultsManager()

    main()
    #main_multiprocessing()
Beispiel #12
0
        global_step = batch_idx + epoch * len(trainloader)

        optimizer.zero_grad()
        inputs = inputs.cuda()
        loss = yolo(inputs, targets)
        log(writer, 'training loss', loss, global_step)
        loss['total'].backward()
        optimizer.step()
        # tbar.set_postfix(loss=loss['total'])


if __name__ == '__main__':
    # Loading network
    # TODO: resume tensorboard
    print("[LOG] Loading network and data")
    yolo = YOLOv3(cfg, args.reso)
    start_epoch, start_iteration = args.ckpt.split('.')
    start_epoch, start_iteration, state_dict = load_checkpoint(
        opj(config.CKPT_ROOT, args.dataset), int(start_epoch),
        int(start_iteration))
    yolo.load_state_dict(state_dict)
    yolo = yolo.cuda()

    # Preparing data
    train_img_datasets, train_dataloader = prepare_train_dataset(args.dataset,
                                                                 args.reso,
                                                                 args.bs,
                                                                 seq=args.seq)
    val_img_datasets, val_dataloder = prepare_val_dataset(args.dataset,
                                                          args.reso,
                                                          args.bs,
Beispiel #13
0
import tensorflow as tf
import numpy as np
import cv2

from model import YOLOv3

yolov3 = YOLOv3(anchors_file='./data/yolo_anchors.txt',
                num_classes=80,
                train_file='./data/train.npz',
                epochs=50,
                batch_size=1,
                lr=0.0001,
                lr_decay=0.000001,
                shuffle=True,
                repeat=1,
                snapshots='')

model_file = 'model.h5'
model = tf.keras.models.load_model(model_file)

img_file = './data/timg.jpg'
img = cv2.imread(img_file)
img = cv2.resize(img, (416, 416)) / 255.0
inputs = np.expand_dims(img, 0)

yolo_outputs = model.predict(inputs)

boxes, scores, classes = yolov3.inference(yolo_outputs, image_shape=[416, 416])

print(boxes)
print(scores)
Beispiel #14
0
from utils.util import *
from model import YOLOv3
from torch.utils.data import DataLoader
from datasets import *
from prune.prune_tool import parse_blocks_normal,  parse_blocks_layer, parse_blocks_slim, updateBN
from config import cfg
from test import evaluate
import visdom
from terminaltables import AsciiTable

if __name__ == "__main__":
    model = YOLOv3(cfg.cfg_path).cuda()
    if cfg.pretrained:
        model.load_state_dict(torch.load(cfg.weights_path))
    else:
        # 随机初始化权重,会对模型进行高斯随机初始化
        model.apply(weights_init_normal)
    prune_set = {
        1 : parse_blocks_normal(model.blocks),  # 通道剪枝
        2 : parse_blocks_layer(model.blocks),   # 层剪枝
        3 : parse_blocks_slim(model.blocks),    # slim剪枝
    }
    _, _, prune_idx = prune_set[cfg.pruned_id]

    # 设置网络输入图片尺寸大小与学习率
    reso = int(cfg.input_h)
    lr = float(cfg.lr)

    assert reso % 32 == 0  # 判断如果不是32的整数倍就抛出异常
    assert reso > 32  # 判断如果网络输入图片尺寸小于32也抛出异常
def main():
    """
    Setup the model, loss function, data loader. Run the train function
    for each epoch.

    We may want to consider tune the number for non max suppression and mean
    average precision in order to remove false positive. BY false positive, they
    are a set of wrong output boxes. We can get rid of them by increasing the
    parameters.
    """
    model = YOLOv3(num_classes=config.NUM_CLASSES).to(config.DEVICE)
    optimizer = optim.Adam(model.parameters(),
                           lr=config.LEARNING_RATE,
                           weight_decay=config.WEIGHT_DECAY)
    loss_fn = YoloLoss()
    scaler = torch.cuda.amp.GradScaler()

    train_loader, test_loader, train_eval_loader = get_loaders(
        train_csv_path=config.DATASET + "/train.csv",
        test_csv_path=config.DATASET + "/test.csv")

    if config.LOAD_MODEL:
        load_checkpoint(config.CHECKPOINT_FILE, model, optimizer,
                        config.LEARNING_RATE)

    scaled_anchors = (torch.tensor(config.ANCHORS) * torch.tensor(
        config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)).to(config.DEVICE)

    for epoch in range(config.NUM_EPOCHS):
        # plot_couple_examples(model, test_loader, 0.6, 0.5, scaled_anchors)
        train_fn(train_loader, model, optimizer, loss_fn, scaler,
                 scaled_anchors)

        if config.SAVE_MODEL:
            save_checkpoint(model, optimizer, filename=f"checkpoint.pth.tar")

        # print(f"Currently epoch {epoch}")
        # print("On Train Eval loader:")
        # print("On Train loader:")
        # check_class_accuracy(model, train_loader, threshold=config.CONF_THRESHOLD)

        if epoch > 0 and epoch % 3 == 0:
            check_class_accuracy(model,
                                 test_loader,
                                 threshold=config.CONF_THRESHOLD)
            pred_boxes, true_boxes = get_evaluation_bboxes(
                test_loader,
                model,
                iou_threshold=config.NMS_IOU_THRESH,
                anchors=config.ANCHORS,
                threshold=config.CONF_THRESHOLD,
            )
            mapval = mean_average_precision(
                pred_boxes,
                true_boxes,
                iou_threshold=config.MAP_IOU_THRESH,
                box_format="midpoint",
                num_classes=config.NUM_CLASSES,
            )
            print(f"MAP: {mapval.item()}")
            model.train()