Ejemplo n.º 1
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    class_names = [c.strip() for c in open("data/classes.txt").readlines()]

    model = yolo_model(n_classes=len(class_names))
    latest_weights = tf.train.latest_checkpoint("checkpoints")
    model.load_weights(latest_weights).expect_partial()
    logging.info('weights loaded')

    img_raw = tf.image.decode_image(open(
        "data/images/2B9 VASILIOK/15.maxresdefault.jpg", 'rb').read(),
                                    channels=3)

    img = tf.expand_dims(img_raw, 0)
    img = transform_img(img, 416)

    t1 = time.time()
    boxes, scores, classes, nums = model(img)
    t2 = time.time()
    logging.info('time: {}'.format(t2 - t1))

    logging.info('detections:')
    for i in range(nums[0]):
        logging.info('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
                                           np.array(scores[0][i]),
                                           np.array(boxes[0][i])))

    img = cv2.cvtColor(img_raw.numpy(), cv2.COLOR_RGB2BGR)
    img = draw_outputs(img, (boxes, scores, classes, nums), class_names)
    cv2.imwrite('./output.jpg', img)
    logging.info('output saved to: {}'.format('./output.jpg'))
Ejemplo n.º 2
0
def main(_argv):
    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    class_names = [c.strip() for c in open("data/classes.txt").readlines()]

    model = yolo_model(n_classes=len(class_names))
    weights = tf.train.latest_checkpoint("checkpoints")
    print(f"Loading weights from {weights}")
    model.load_weights(weights).expect_partial()
    logging.info('Weights loaded')

    model_save_path = 'model/latest'

    logging.info(f"Saving model to {model_save_path}")
    tf.saved_model.save(model, model_save_path)
    shutil.copy2("data/classes.txt", model_save_path + "/assets")

    logging.info(f"Loading model from {model_save_path}")
    model = tf.saved_model.load(model_save_path)
    infer = model.signatures[tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
    logging.info(infer.structured_outputs)
    logging.info(f"Model loaded")

    img_raw = tf.image.decode_image(open("data/images/2B9 VASILIOK/15.maxresdefault.jpg", 'rb').read(), channels=3)

    logging.info(f"Performing sanity check")
    img = tf.expand_dims(img_raw, 0)
    img = transform_img(img, 416)

    t1 = time.time()
    boxes, scores, classes, nums = model(img)
    t2 = time.time()
    logging.info('Inference time: {}'.format(t2 - t1))

    logging.info('Detections:')
    for i in range(nums[0]):
        logging.info('\t{}, {}, {}'.format(class_names[int(classes[0][i])],
                                           np.array(scores[0][i]),
                                           np.array(boxes[0][i])))

    logging.info(f"Sanity check complete")

    logging.info(f"Archiving saved model to model/latest.zip")
    shutil.make_archive("model/latest", "zip", model_save_path)

    print("Export completed.")
Ejemplo n.º 3
0
                                        max_boxes=10,
                                        aug=False)
        val_generator = dataGenerator(img_dir,
                                      anno_dir,
                                      batch_size,
                                      target_size,
                                      anchors,
                                      n_classes,
                                      max_boxes=10,
                                      aug=False)

        # model
        model = yolo_model(anchors,
                           n_classes,
                           input_shape=input_shape,
                           lr=lr,
                           load_pretrained=True,
                           freeze_body=1,
                           weights_path='prep/yolo.h5')
        model.compile(Adam(1e-3),
                      loss=lambda y_true, y_pred: y_pred[0],
                      metrics=metric_lst)

        # train
        steps_per_epoch = 3000 // batch_size
        model.fit_generator(
            generator=train_generator,
            steps_per_epoch=steps_per_epoch,
            validation_data=val_generator,
            validation_steps=steps_per_epoch // 5,
            epochs=100,
Ejemplo n.º 4
0
def main(_argv):
    class_names = [c.strip() for c in open("data/classes.txt").readlines()]

    physical_devices = tf.config.experimental.list_physical_devices('GPU')
    if len(physical_devices) > 0:
        tf.config.experimental.set_memory_growth(physical_devices[0], True)

    train_ds = load_dataset("data/images/military_train.tfrecord", "data/classes.txt", IMG_SIZE)
    train_ds = train_ds.batch(BATCH_SIZE)
    train_ds = train_ds.map(
        lambda x, y: (
            transform_img(x, IMG_SIZE),
            transform_targets(y, yolo_anchors, yolo_anchor_masks, IMG_SIZE),
        )
    )
    train_ds = train_ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE)

    val_ds = load_dataset("data/images/military_val.tfrecord", "data/classes.txt", IMG_SIZE)
    val_ds = val_ds.batch(BATCH_SIZE)
    val_ds = val_ds.map(
        lambda x, y: (
            transform_img(x, IMG_SIZE),
            transform_targets(y, yolo_anchors, yolo_anchor_masks, IMG_SIZE),
        )
    )

    model = yolo_model(
        size=IMG_SIZE,
        n_classes=len(class_names),
        training=True,
    )

    if FLAGS.finetune:
        latest_weights = tf.train.latest_checkpoint("checkpoints")

        if FLAGS.latest_weights:
            if latest_weights is None:
                raise RuntimeError("Latest weights were None")
            else:
                model.load_weights(latest_weights)
                logging.info(f"Loaded weights from '{latest_weights}'")
        else:
            model.load_weights(FLAGS.weights)
            logging.info(f"Loaded weights from '{FLAGS.weights}'")
    else:
        pretrained = yolo_model(n_classes=80, training=True)
        pretrained.load_weights("yolo3_coco_ckp/yolov3.tf")

        model.get_layer("yolo_darknet").set_weights(
            pretrained.get_layer("yolo_darknet").get_weights()
        )

    freeze_all(model.get_layer("yolo_darknet"))

    optimizer = tf.keras.optimizers.Adam(lr=3e-5)
    loss = [
        create_yolo_loss(yolo_anchors[mask], n_classes=len(class_names)) for mask in yolo_anchor_masks
    ]
    model.compile(optimizer=optimizer, loss=loss, run_eagerly=False)
    callbacks = [
        # tf.keras.callbacks.ReduceLROnPlateau(verbose=1, factor=0.3, patience=2),
        tf.keras.callbacks.EarlyStopping(patience=5, verbose=1),
        tf.keras.callbacks.ModelCheckpoint(
            "checkpoints/yolo_train_{epoch}.tf", verbose=1, save_weights_only=True, save_best_only=True
        ),
        TrainCycle(epochs=100, lr=(3e-5, 1e-6), batch_size=BATCH_SIZE, train_set_size=914),
    ]

    model.fit(
        train_ds, epochs=100, callbacks=callbacks, validation_data=val_ds, verbose=1
    )
Ejemplo n.º 5
0
def main():
    print("Inside the main function of main.py")

    config_file = open("config.json", "r")
    json_object = json.load(config_file)
    if (
        json_object["train_val_VOCdevkit_path"] == "None"
        or json_object["test_VOCdevkit_path"] == "None"
    ):
        print(
            "Please set the test and train dataset path correctly in the config.json file first."
        )
        return

    global train_vocdevkit
    global test_vocdevkit
    train_vocdevkit = str(json_object["train_val_VOCdevkit_path"])
    test_vocdevkit = str(json_object["test_VOCdevkit_path"])

    print(
        "Train VOCdevkit path set to {}\n Test VOCdevkit path set to {}".format(
            train_vocdevkit, test_vocdevkit
        )
    )

    parser = argparse.ArgumentParser(description="Build Annotations.")
    parser.add_argument("dir", default="..", help="Annotations.")

    for year, image_set in sets:
        print(train_vocdevkit)
        print(year, image_set)
        if image_set == "train":
            path_to_vocdevkit = train_vocdevkit
        elif image_set == "test":
            path_to_vocdevkit = test_vocdevkit
            print("path changed to test :{}".format(path_to_vocdevkit))
        with open(
            os.path.join(
                "{}/VOC{}/ImageSets/Main/{}.txt".format(
                    path_to_vocdevkit, year, image_set
                )
            ),
            "r",
        ) as f:
            image_ids = f.read().strip().split()
        with open(
            os.path.join("{}/{}_{}.txt".format(path_to_vocdevkit, year, image_set)), "w"
        ) as f:
            for image_id in image_ids:
                f.write(
                    "{}/VOC{}/JPEGImages/{}.jpg".format(
                        path_to_vocdevkit, year, image_id
                    )
                )
                convert_annotation(year, image_id, f, image_set)
                f.write("\n")

    # Preparing the input and output arrays
    print("Preparing inputs/outputs")
    train_datasets = []
    val_datasets = []
    test_datasets = []

    with open(os.path.join(train_vocdevkit, "2007_train.txt"), "r") as f:
        train_datasets = train_datasets + f.readlines()
    with open(os.path.join(train_vocdevkit, "2007_val.txt"), "r") as f:
        val_datasets = val_datasets + f.readlines()
    with open(os.path.join(test_vocdevkit, "2007_test.txt"), "r") as f:
        test_datasets = test_datasets + f.readlines()

    X_train = []
    Y_train = []

    X_val = []
    Y_val = []

    X_test = []
    Y_test = []

    for item in train_datasets:
        item = item.replace("\n", "").split(" ")
        X_train.append(item[0])
        arr = []
        for i in range(1, len(item)):
            arr.append(item[i])
        Y_train.append(arr)

    for item in val_datasets:
        item = item.replace("\n", "").split(" ")
        X_val.append(item[0])
        arr = []
        for i in range(1, len(item)):
            arr.append(item[i])
        Y_val.append(arr)

    for item in test_datasets:
        item = item.replace("\n", "").split(" ")
        X_test.append(item[0])
        arr = []
        for i in range(1, len(item)):
            arr.append(item[i])
        Y_test.append(arr)

    # Now let us create an instance of our custom generator for training and validation sets
    print("Genrating instance")
    batch_size = 4
    test_batch_size = 4

    # print("Y_train: {}".format(Y_train))

    my_training_batch_generator = My_Custom_Generator(X_train, Y_train, batch_size)

    my_validation_batch_generator = My_Custom_Generator(X_val, Y_val, batch_size)

    my_test_batch_generator = My_Custom_Generator(X_test, Y_test, test_batch_size)

    # return
    x_train, y_train = my_training_batch_generator.__getitem__(0)
    x_val, y_val = my_validation_batch_generator.__getitem__(0)
    x_test, y_test = my_test_batch_generator.__getitem__(0)

    print("Xtrain shape :{}".format(x_train.shape))
    print("Ytrain shape :{}".format(y_train.shape))
    print("Xval shape :{}".format(x_val.shape))
    print("Yval shape :{}".format(y_val.shape))
    print("Xtest shape :{}".format(x_test.shape))
    print("Ytest shape :{}".format(y_test.shape))

    # Getting the model ready
    print("Model making")
    model = yolo_model()
    mcp_save = ModelCheckpoint(
        "weight.hdf5", save_best_only=True, monitor="val_loss", mode="min"
    )
    model.compile(loss=yolo_loss, optimizer="adam")
    # Training the model
    model.fit(
        x=my_training_batch_generator,
        steps_per_epoch=int(len(X_train) // batch_size),
        epochs=135,
        verbose=1,
        workers=4,
        validation_data=my_validation_batch_generator,
        validation_steps=int(len(X_val) // batch_size),
        callbacks=[CustomLearningRateScheduler(lr_schedule), mcp_save],
    )
    # Training end
    y_pred = model.predict(x_test)
    np.save("results", y_pred)
    print("We are done training ! Results are stored in the file results.npy")