Ejemplo n.º 1
0
def get_model(ie, args):
    input_transform = models.InputTransform(args.reverse_input_channels, args.mean_values, args.scale_values)
    common_args = (ie, args.model, input_transform)
    if args.architecture_type in ('ctpn', 'yolo', 'yolov4', 'retinaface',
                                  'retinaface-pytorch') and not input_transform.is_trivial:
        raise ValueError("{} model doesn't support input transforms.".format(args.architecture_type))

    if args.architecture_type == 'ssd':
        return models.SSD(*common_args, labels=args.labels, keep_aspect_ratio_resize=args.keep_aspect_ratio)
    elif args.architecture_type == 'ctpn':
        return models.CTPN(ie, args.model, input_size=args.input_size, threshold=args.prob_threshold)
    elif args.architecture_type == 'yolo':
        return models.YOLO(ie, args.model, labels=args.labels,
                           threshold=args.prob_threshold, keep_aspect_ratio=args.keep_aspect_ratio)
    elif args.architecture_type == 'yolov4':
        return models.YoloV4(ie, args.model, labels=args.labels,
                             threshold=args.prob_threshold, keep_aspect_ratio=args.keep_aspect_ratio,
                             anchors=args.anchors, masks=args.masks)
    elif args.architecture_type == 'faceboxes':
        return models.FaceBoxes(*common_args, threshold=args.prob_threshold)
    elif args.architecture_type == 'centernet':
        return models.CenterNet(*common_args, labels=args.labels, threshold=args.prob_threshold)
    elif args.architecture_type == 'retinaface':
        return models.RetinaFace(ie, args.model, threshold=args.prob_threshold)
    elif args.architecture_type == 'ultra_lightweight_face_detection':
        return models.UltraLightweightFaceDetection(*common_args, threshold=args.prob_threshold)
    elif args.architecture_type == 'retinaface-pytorch':
        return models.RetinaFacePyTorch(ie, args.model, threshold=args.prob_threshold)
    else:
        raise RuntimeError('No model type or invalid model type (-at) provided: {}'.format(args.architecture_type))
def get_model(ie, args):
    if args.architecture_type == 'ssd':
        return models.SSD(ie, args.model, labels=args.labels, keep_aspect_ratio_resize=args.keep_aspect_ratio)
    elif args.architecture_type == 'ctpn':
        return models.CTPN(ie, args.model, input_size=args.input_size, threshold=args.prob_threshold)
    elif args.architecture_type == 'yolo':
        return models.YOLO(ie, args.model, labels=args.labels,
                           threshold=args.prob_threshold, keep_aspect_ratio=args.keep_aspect_ratio)
    elif args.architecture_type == 'yolov4':
        return models.YoloV4(ie, args.model, labels=args.labels,
                             threshold=args.prob_threshold, keep_aspect_ratio=args.keep_aspect_ratio)
    elif args.architecture_type == 'faceboxes':
        return models.FaceBoxes(ie, args.model, threshold=args.prob_threshold)
    elif args.architecture_type == 'centernet':
        return models.CenterNet(ie, args.model, labels=args.labels, threshold=args.prob_threshold)
    elif args.architecture_type == 'retinaface':
        return models.RetinaFace(ie, args.model, threshold=args.prob_threshold)
    else:
        raise RuntimeError('No model type or invalid model type (-at) provided: {}'.format(args.architecture_type))
Ejemplo n.º 3
0
#
#
# config = ConfigProto()
# config.gpu_options.allow_growth = True
# session = InteractiveSession(config=config)

import loss
import config
import models
from tensorflow import keras
from generator import data_generator

class_mapping = dict(enumerate(config.classes_names))
class_mapping = {class_mapping[key]: key for key in class_mapping}

model_yolo = models.YOLO(pre_train=None)()

f = open(config.label_path)
label_lines = f.readlines()

train_lines = label_lines[:-int(len(label_lines) * config.validation_split)]
valid_lines = label_lines[-int(len(label_lines) * config.validation_split):]

h, w = config.image_input_shape
y_true = [
    keras.layers.Input(shape=(h // config.scale_size[l],
                              w // config.scale_size[l], config.num_anchors,
                              config.num_classes + 5)) for l in range(3)
]

model_loss = keras.layers.Lambda(function=loss.yolo4_loss,
frame_width = 320
frame_height = 240
camera_width = 640
camera_height = 480

dir = os.path.dirname(__file__)

model = sys.argv[1]
cap_source = sys.argv[2]
gui = sys.argv[3]
input_size = int(sys.argv[4])
max_frames = int(sys.argv[5])

# load the model
if model == "yolo":
    net, blob_options, labels = models.YOLO()
elif model == "ssd":
    net, blob_options, labels = models.SSD()
elif model == "rcnn":
    net, blob_options, labels = models.FasterRCNN()

net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)
net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)

# Create label RGB values
np.random.seed(0)
COLORS = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")

if cap_source == "cam":
    # capture from camera
    cap = cv.VideoCapture(0)
Ejemplo n.º 5
0
import config
import models
from tensorflow.keras.layers import Lambda, Input
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Model
from generator import data_generator
from tensorflow.keras.callbacks import *

anchors = config.anchors
class_names = config.classes_names
num_anchors = len(anchors)
num_classes = len(class_names)
class_mapping = dict(enumerate(class_names))
class_mapping = {class_mapping[key]: key for key in class_mapping}

model_yolo = models.YOLO()()
model_yolo.summary()

f = open(config.label_path)
label_lines = f.readlines()

train_lines = label_lines[:int(len(label_lines) * config.validation_split)]
valid_lines = label_lines[int(len(label_lines) * config.validation_split):]

h, w = config.image_input_shape
y_true = [
    Input(shape=(h // config.scale_size[l], w // config.scale_size[l],
                 num_anchors // 3, num_classes + 5)) for l in range(3)
]

model_loss = Lambda(function=loss.yolo4_loss,
Ejemplo n.º 6
0
                    default='data/000030.jpg')

args = parser.parse_args()
model_file_path = args.model
image_file_path = args.image
class_file_path = config.classes_path

anchors = config.anchors
class_names = config.classes_names

num_anchors = len(anchors)
num_classes = len(class_names)
class_mapping = dict(enumerate(class_names))
colors = utils_image.get_random_colors(len(class_names))
class_mapping = {class_mapping[key]: key for key in class_mapping}
model = models.YOLO()()
model.load_weights('model_train/model_train_final.weights')

image = cv.imread(image_file_path)
image = cv.cvtColor(image, cv.COLOR_BGR2RGB)

new_image = utils_image.resize_image(image, config.image_input_shape)

new_image = np.array(new_image, dtype='float32')
new_image /= 255.
new_image = np.expand_dims(new_image, 0)
feats = model.predict(new_image)

boxes, scores, classes = eval.yolo_eval(feats, anchors, len(class_names),
                                        (image.shape[0], image.shape[1]))
out_boxes, out_scores, out_classes = boxes[:5], scores[:5], classes[:5]