Example #1
0
def box_gen(folder, images):
    model = YOLO()
    for path in images:
        print(path)
        path = os.path.join(data_folder, path)
        img = cv2.imread(path)

        boxes = model.predict(img)
        yield img, boxes
Example #2
0
def _main_():

    if tf.test.gpu_device_name():
        print('>>>>> USING GPU: Default GPU Device: {}'.format(
            tf.test.gpu_device_name()))
    else:
        print(">>>>> Please install GPU version of TF")

    with open(CONFIG_FILE) as config_buffer:
        config = json.loads(config_buffer.read())

    ################################
    # Load data info
    ################################
    print('>>>>> Loading the annotation data')
    train_data_infos = parse_input_data(
        image_folder=Path(config['train']['train_images_folder']),
        annotation_folder=Path(config['train']['train_annotations_folder']),
        annotation_extension=config['train']['annotations_format_extension'],
        image_extension=config['train']['image_format_extension'])

    train_dataset, validation_dataset = train_test_split(
        train_data_infos,
        test_size=config['train']['validation_dataset_ratio'])

    ################################
    # Make and train model
    ################################
    print('>>>>> Creating model')
    yolo = YOLO(input_size=tuple(config['model']['input_size']),
                grid_size=int(config['model']['grid_size']),
                bbox_count=int(config['model']['bboxes_per_grid_cell']),
                classes=config['model']['class_names'],
                lambda_coord=config['model']['lambda_coord'],
                lambda_noobj=config['model']['lambda_noobj'],
                bbox_params=config['model']['bbox_params'])

    print('>>>>> Starting the training process')
    yolo.train_gen(training_infos=train_dataset,
                   validation_infos=validation_dataset,
                   save_model_path=config['train']['model_path'],
                   batch_size=config['train']['batch_size'],
                   nb_epochs=config['train']['nb_epochs'],
                   learning_rate=config['train']['learning_rate'],
                   use_pretrained_model=bool(
                       config['train']['use_pretrained_model']),
                   model_name=config['train']['model_name'],
                   steps_per_epoch=config['train']['steps_per_epoch'])
Example #3
0
def _main_():
    with open(CONFIG_FILE) as config_buffer:
        config = json.loads(config_buffer.read())

    ################################
    # Load data info
    ################################
    train_data_infos = parse_input_data(
        image_folder=Path(config['train']['train_images_folder']),
        annotation_folder=Path(config['train']['train_annotations_folder']),
        annotation_extension=config['train']['annotations_format_extension'],
        image_extension=config['train']['image_format_extension'])

    validation_data_infos = parse_input_data(
        image_folder=Path(config['train']['validation_images_folder']),
        annotation_folder=Path(
            config['train']['validation_annotations_folder']),
        annotation_extension=config['train']['annotations_format_extension'],
        image_extension=config['train']['image_format_extension'])

    ################################
    # Make and train model
    ################################
    yolo = YOLO(input_size=tuple(config['model']['input_size']),
                grid_size=int(config['model']['grid_size']),
                bbox_count=int(config['model']['bboxes_per_grid_cell']),
                classes=config['model']['class_names'],
                lambda_coord=config['model']['lambda_coord'],
                lambda_noobj=config['model']['lambda_noobj'],
                bbox_params=config['model']['bbox_params'])

    yolo.train_gen(training_infos=train_data_infos,
                   validation_infos=validation_data_infos,
                   save_weights_path=config['train']['trained_weights_path'],
                   batch_size=config['train']['batch_size'],
                   nb_epochs=config['train']['nb_epochs'],
                   learning_rate=config['train']['learning_rate'])
Example #4
0
def _main_(args):
    config_path = args.conf
    weights_path = args.weights
    image_path = args.input

    with open(config_path) as config_buffer:
        config = json.load(config_buffer)

    yolo = YOLO(backend=config['model']['architecture'],
                input_size=config['model']['input_size'],
                labels=config['model']['labels'],
                max_box_per_image=config['model']['max_box_per_image'],
                anchors=config['model']['anchors'])

    yolo.load_weights(weights_path)

    if image_path[-4:] == '.mp4':
        video_out = image_path[:-4] + '_detected' + image_path[-4:]
        video_reader = cv2.VideoCapture(image_path)

        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))

        video_writer = cv2.VideoWriter(video_out,
                                       cv2.VideoWriter_fourcc(*'MPEG'), 50.0,
                                       (frame_w, frame_h))

        for i in tqdm(range(nb_frames)):
            _, image = video_reader.read()

            boxes = yolo.predict(image)
            image = draw_boxes(image, boxes, config['model']['labels'])
            video_writer.write(np.uint8(image))
        video_reader.release()
        video_writer.release()
    else:
        image = cv2.imread(image_path)
        boxes = yolo.predict(image)
        image = draw_boxes(image, boxes, config['model']['labels'])

        print(len(boxes), 'boxes are found')
        cv2.imwrite(image_path[:-4] + '_result' + image_path[-4:], image)
Example #5
0
def predict(img):
    print("######################")
    img_resized = cv2.resize(img, (448, 448))
    img_RGB = cv2.cvtColor(img_resized, cv2.COLOR_BGR2RGB)
    img_resized_np = np.asarray(img_RGB)
    inputs = np.zeros((1, 448, 448, 3), dtype='float32')
    inputs[0] = (img_resized_np / 255.0) * 2.0 - 1.0
    with tf.Graph().as_default():
        input = tf.placeholder('float32', [None, 448, 448, 3])
        model = YOLO()
        model.build_model(input=input)
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            saver = tf.train.Saver()
            saver.restore(sess, weights_file)
            in_dict = {input: inputs, model.keep_prob: 1.0}
            net_output = sess.run(model.fc_32, feed_dict=in_dict)
            result = model.process_output(net_output[0])
            labels, results = model.show_result(img=img_resized,
                                                results=result)

    return labels, results
Example #6
0
def load_model(modules):
    model = YOLO(modules)
    model.to(DEVICE)
    model.load_weights('./weights/yolov3.weights')
    return model
import argparse

os.environ["CUDA_VISIBLE_DEVICES"] = '0'
result_dir = './evaluate/result_evaluate/'
pred_dir = './evaluate/predicted/'
gt_dir = './evaluate/ground_truth/'
if not os.path.exists(pred_dir):
    os.mkdir(pred_dir)

parser = argparse.ArgumentParser()
parser.add_argument('--image_dir',
                    type=str,
                    help="give dataset dir path by '--image_dir' !!!")
parser.add_argument(
    '--pred_csv',
    type=str,
    help="give prediction output csv filename by '--pred_csv' !!!")
args = parser.parse_args()

if __name__ == '__main__':
    image_paths_list = glob(args.image_dir + '*')
    print('Find {} images.'.format(len(image_paths_list)))

    yolo = YOLO()
    yolo.detect_on_set(image_paths_list=image_paths_list,
                       output_csv_name=args.pred_csv,
                       conf_threshold=0.99,
                       object='person',
                       save_animation=True)
    yolo.close_session()
Example #8
0
from timeit import default_timer as timer
import numpy as np
from PIL import Image
from model import yolo_eval, YOLO
import cv2
import os
os.environ["CUDA_VISIBLE_DEVICES"] = '0,1'



if __name__ == '__main__':
    video_path='path2your-video'
    yolo = YOLO()
    vid = cv2.VideoCapture(video_path)
    if not vid.isOpened():
        raise IOError("Couldn't open webcam or video")
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    while True:
        return_value, frame = vid.read()
        image = Image.fromarray(frame)
        image = yolo.detect_image(image)
        result = np.asarray(image)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
Example #9
0
    # optim.load_state_dict(checkpoint['optim_state_dict'])

    print("loaded checkpoint: ", path)


train_dataloader = DataLoader(train_data,
                              batch_size=bs,
                              shuffle=False,
                              num_workers=0)
valid_dataloader = DataLoader(valid_data,
                              batch_size=bs,
                              shuffle=False,
                              num_workers=0)

# show_boxes(image, boxes)
yolo = YOLO(4, 2)
loss_fn = YOLOLoss(classes, 2)

optim = torch.optim.SGD(yolo.parameters(), lr=0.001, momentum=0.9)

# checkpoint_path = "checkpoints/test2.checkpoint"
# yolo.load_state_dict(torch.load(checkpoint_path))
# print("loading checkpoint")

for epoch in range(epochs):
    print("epoch: ", epoch)

    # validation set eval
    with torch.no_grad():
        valid_loss = 0.
        for i_batch, sample_batched in enumerate(valid_dataloader):
Example #10
0
from constants import CONFIG_FILE
import json
from keras.models import Model, load_model
import cv2
import os
from model import YOLO

if __name__ == '__main__':

    with open(CONFIG_FILE) as config_buffer:
        config = json.loads(config_buffer.read())

    image_path = os.path.expanduser(config["predict"]["image_path"])
    model_path = os.path.expanduser(config["predict"]["model_path"])
    output_file_path = os.path.expanduser(config["predict"]["output"])

    print('>>>>> Creating YOLO object')
    yolo = YOLO(input_size=tuple(config['model']['input_size']),
                grid_size=int(config['model']['grid_size']),
                bbox_count=int(config['model']['bboxes_per_grid_cell']),
                classes=config['model']['class_names'],
                lambda_coord=config['model']['lambda_coord'],
                lambda_noobj=config['model']['lambda_noobj'],
                bbox_params=config['model']['bbox_params'])

    if os.path.isfile(image_path) and os.path.isfile(model_path):
        yolo.predict(image_path, model_path, output_file_path)

    else:
        print('Path to image or model does not exist...')
Example #11
0
            tars[..., j] = tars[..., j] / num_classes
        yield imgs, tars


device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
"""params"""
# anchors as unit of pixel
anchors = np.array([[10, 13], [16, 30], [33, 23], [30, 61], [62, 45],
                    [59, 119], [116, 90], [156, 198], [373, 326]])
anchor_mask = np.array([[6, 7, 8], [3, 4, 5], [0, 1, 2]])
image_size = (416, 416)  # default input image size
# transform into unit of image
anchors = anchors / np.asarray(image_size).reshape(1, 2)
num_classes = 80
"""train"""
net = YOLO()
net.to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
loss_fn = yolo_loss

for epoch in range(10):
    loss = 0.
    num_of_batch = 20
    for imgs, tars in create_fake_data(image_size, num_of_batch=num_of_batch):
        imgs = imgs.to(device)
        feats = net(imgs)
        loss_ = loss_fn(feats, tars, anchors, anchor_mask, device, image_size)
        loss += loss_.item()
        optimizer.zero_grad()
        loss_.backward()
        optimizer.step()
Example #12
0
def train(args):
    print('Dataset of instance(s) and batch size is {}'.format(
        args.batch_size))
    vgg = models.vgg16(True)
    model = YOLO(vgg.features)
    if args.use_cuda:
        model = torch.nn.DataParallel(model)
        model.cuda()

    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    best = 1e+30

    for epoch in range(1, args.epochs + 1):
        l = train_epoch(epoch, model, optimizer, args)

        upperleft, bottomright, classes, confs = test_epoch(
            model, jpg='../data/1.jpg')
        is_best = l < best
        best = min(l, best)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict(),
            }, is_best)
    checkpoint = torch.load('./model_best.pth.tar')
    state_dict = checkpoint['state_dict']

    new_state_dict = OrderedDict()

    for k, v in state_dict.items():
        name = k[7:]
        new_state_dict[name] = v

    model.load_state_dict(new_state_dict)
    model.cpu()

    torch.save(model.state_dict(), 'model_cpu.pth.tar')
size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
        int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))

out = None
if args.save_video:
    codecs = 'H264'
    fourcc = cv2.VideoWriter_fourcc(*codecs)
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    out = cv2.VideoWriter('out_' + os.path.basename(args.input), fourcc, fps,
                          size)

trace_boxes_database = TraceBoxesDatabase(size[0], size[1])

yolo = YOLO(model_path=args.model,
            anchors_path=args.anchors,
            model_image_size=(args.size, args.size),
            score=args.conf_threshold,
            iou=args.nms_threshold,
            trace_boxes_database=trace_boxes_database)

result = None

while True:

    return_value, frame = cap.read()

    if not return_value:
        trace_boxes_database.padding_boxes()
        # result = cv2.cvtColor(result, cv2.COLOR_RGB2HSV)
        for i, trace_box in enumerate(trace_boxes_database.trace_boxes):
            hue_value = (i * 70 % 180, 255, 255)
            color_value = (255, 255, 0)
Example #14
0
if __name__ == '__main__':
    from dataset import DarknetDataset
    from torchvision import transforms
    from transforms import PadToSquare, Rescale, SampleToYoloTensor
    from torch.utils.data import DataLoader

    from model import YOLO

    train_path = "data/train.txt"

    composed = transforms.Compose([PadToSquare(), Rescale(448), SampleToYoloTensor(7, 4)])
    image_dataset = DarknetDataset(train_path, transform=composed)

    dataloader = DataLoader(image_dataset, batch_size=2, shuffle=False, num_workers=4)

    classes = 4
    bboxes = 2
    net = YOLO(classes, bboxes)

    for i_batch, sample_batched in enumerate(dataloader):
        print(i_batch, sample_batched['image'].size(), sample_batched['boxes'].size())
        output = net(sample_batched['image'].float())
        print(output.shape)

        loss_fn = YOLOLoss(4, 2)
        loss = loss_fn.forward(output, sample_batched['boxes'])
        print(f"loss: {loss}")
        
        # check output tensor size, should be [1, 7, 7, 14]
        break
Example #15
0
def find_ball(img):
    model = YOLO(w_file=WEIGHTS_FILE, c_file=CONFIG_FILE, n_file=NAMES_FILE)
    boxes = model.predict(img, 32)
    return boxes
Example #16
0
from torch.utils.data.dataloader import DataLoader
from torch.optim import SGD, Adam
from config import device, tc
from model import YOLO
from utils import *
import torch
import numpy as np

# MARK: - load data
cocoDataset = COCODataset(tc.imageDir,
                          tc.annFile,
                          fromInternet=False if tc.imageDir else True)
dataLoader = DataLoader(cocoDataset, batch_size=tc.batchSize, shuffle=True)

# MARK: - train
model = YOLO().to(device)
if tc.preTrainedWeight:
    model.load_state_dict(torch.load(tc.preTrainedWeight, map_location=device))
    model.warmUpBatch = tc.warmUpBatches

optimizer = SGD(model.parameters(), lr=1e-3)
prevBestLoss = np.inf
batches = len(dataLoader)
logger = MetricsLogger()

model.train()
for epoch in range(tc.epochs):
    losses = []
    for batch, (x, y, z) in enumerate(dataLoader):
        x, y, z = x.to(device), y.to(device), z.to(device)
Example #17
0
im, image, image_size = utils.preprocess_image(image_path, input_shape)
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

anchors = np.array([
    [10, 13], [16, 30], [33, 23], [30, 61], [62, 45], 
    [59, 119], [116, 90], [156, 198], [373, 326]
], dtype=np.float32)
anchors = anchors / input_shape[0] # as unit of image

class_path = "./data/coco_classes.txt"
class_names = utils.get_classes(class_path)
num_cls = len(class_names)

"""load model"""
weightsfile = './weights/yolov3.weights'
net = YOLO()
net.to(device)
load_weights(net, weightsfile)
net.eval()

with torch.no_grad():
    image = image.to(device)
    feats = net(image)
    boxes_, scores_, classes_ = filter(
        feats,
        anchors,
        image_size,
        device,
        num_cls,
        threshold=0.4
    )