Exemple #1
0
def train(epochs, start_epoch, learning_rate, tensorboard_dir, checkpoint,
          num_heatmap, batch_size, train_tfrecords, val_tfrecords, version):
    strategy = tf.distribute.MirroredStrategy()
    global_batch_size = strategy.num_replicas_in_sync * batch_size
    train_dataset = create_dataset(train_tfrecords, global_batch_size, num_heatmap, is_train=True)
    val_dataset = create_dataset(val_tfrecords, global_batch_size, num_heatmap, is_train=False)

    if not os.path.exists(os.path.join('./'+MODEL_NAME)):
        os.makedirs(os.path.join('./'+MODEL_NAME))

    with strategy.scope():
        train_dist_dataset = strategy.experimental_distribute_dataset(train_dataset)
        val_dist_dataset = strategy.experimental_distribute_dataset(val_dataset)

        model = StackedHourglassNetwork(IMAGE_SHAPE, 2, 1, num_heatmap)
        print(model.summary())

        if checkpoint and os.path.exists(checkpoint):
            model.load_weights(checkpoint)

        trainer = Trainer(
            model,
            epochs,
            global_batch_size,
            strategy,
            initial_learning_rate=learning_rate,
            start_epoch=start_epoch,
            version=version,
            tensorboard_dir=tensorboard_dir)

        print('Start training...')
        return trainer.run(train_dist_dataset, val_dist_dataset)
Exemple #2
0
        plt.imshow(rgba, interpolation='nearest')
        plt.show()

    results['detected_gt'] += len(detected_ground_true)
    results['total_gt'] += gt_points.shape[0]
    results["total_det"] += detections.shape[0]
    return results


if __name__ == "__main__":

    # os.environ["CUDA_VISIBLE_DEVICES"] = "-1"

    #nacitabie modelu
    model = StackedHourglassNetwork(input_shape=IMG_INPUT_SHAPE,
                                    num_stack=NUM_STACK,
                                    num_residual=1,
                                    num_heatmap=NUM_HEATMAP)
    model.load_weights(WEIGHTS_PATH)

    image_paths = glob.glob(os.path.join(FOLDER, '*.jpg'))[:20]  #POCET
    data = pd.read_csv(CSV_PATH)
    classes = np.unique(data["class"].values).tolist()
    results_hrany = {
        "correct_points": 0,
        "incorrect_points": 0,
        "total_det": 0,
        "total_gt": 0,
        "detected_gt": 0,
        'perc_valid': 0,
        'perc_detected': 0
    }
        rect = [[x_min, y_min], [x_min, y_max], [x_max, y_max], [x_max, y_min]]

        htm_sep = np.zeros_like(heatmap)
        htm_sep[y_min:y_max, x_min:x_max, :] = heatmap[y_min:y_max,
                                                       x_min:x_max, :]

        yield htm_sep, rect


if __name__ == '__main__':
    from hourglass import StackedHourglassNetwork
    from visualizer import *

    # ziskanie heatmapy z modelu
    model = StackedHourglassNetwork(input_shape=(512, 512, 3),
                                    num_stack=2,
                                    num_residual=1,
                                    num_heatmap=8)
    model.load_weights('./MODEL_4.2.2/model-v0.0.1-epoch-37-loss-0.4046.h5')
    image = tf.io.decode_jpeg(tf.io.read_file('../img/hrany_rohy/0535.jpg'))
    inputs = tf.cast(tf.image.resize(image,
                                     (512, 512)), tf.float32) / 127.5 - 1
    inputs = tf.expand_dims(inputs, 0)
    heatmap = model(inputs, training=True)[-1][0].numpy()
    dwnsz = image.numpy().astype(int)

    # skuska
    for htm, rect in separate_boxes(heatmap, dwnsz=dwnsz):

        plt.imshow(dwnsz)
        xmin, xmax = plt.xlim()
        ymin, ymax = plt.ylim()
Exemple #4
0
def run_training():

    callbacks = [
        tf.keras.callbacks.LearningRateScheduler(scheduler),
        tf.keras.callbacks.ModelCheckpoint(
            data_folder + 'models/weights.40k_{epoch:02d}.hdf5',
            monitor="loss",
            save_weights_only=True,
            verbose=1,
            save_best_only=True,
            mode="auto",
            save_freq="epoch",
            #), gifCallback()]
        )
    ]

    # using RMSprop optimizer
    optimizer = tf.keras.optimizers.RMSprop(2.5e-4)

    # create the stacked hourglass model with the given number
    # of stacked hourglass modules
    # code source: https://github.com/ethanyanjiali/deep-vision/tree/master/Hourglass/tensorflow
    model = StackedHourglassNetwork(num_stack=2)
    model.compile(optimizer=optimizer, loss=adaptive_wing_loss)

    # from each of the generators create a pair of interleaved datasets
    # tensorflow can automatically multiprocess interleaved datasets
    # so that while batches can be loaded and processed ahead of time
    interleaved_train = tf.data.Dataset.range(2).interleave(
        get_train_dataset, num_parallel_calls=tf.data.experimental.AUTOTUNE)
    interleaved_val = tf.data.Dataset.range(2).interleave(
        get_val_dataset, num_parallel_calls=tf.data.experimental.AUTOTUNE)

    # load training and validation filenames
    train_ids, val_ids = load_filenames()
    # random.shuffle(train_ids)
    # random.shuffle(val_ids)

    # store generator parameters
    batch_size = 2
    image_shape = (256, 256, 3)
    #
    # train_params = {
    #     'ids': train_ids,
    #     'batch_size': batch_size,
    #     'image_shape': image_shape
    # }
    # val_params = {
    #     'ids': val_ids,
    #     'batch_size': batch_size,
    #     'image_shape': image_shape
    # }
    # json.dump(train_params, open(data_folder + 'train_params.json', 'w'))
    # json.dump(val_params, open(data_folder + 'val_params.json', 'w'))

    # train the model
    model.fit(interleaved_train,
              steps_per_epoch=int(len(train_ids) // batch_size),
              epochs=2,
              verbose=1,
              validation_data=interleaved_val,
              validation_steps=int(len(val_ids) // batch_size),
              callbacks=callbacks)
Exemple #5
0
        save_weights_only=True,
        verbose=1,
        save_best_only=True,
        mode="auto",
        save_freq="epoch",
    ),
    gifCallback()
]

# using RMSprop optimizer
optimizer = tf.keras.optimizers.RMSprop(2.5e-4)

# create the stacked hourglass model with the given number
# of stacked hourglass modules
# code source: https://github.com/ethanyanjiali/deep-vision/tree/master/Hourglass/tensorflow
model = StackedHourglassNetwork(num_stack=2)
model.compile(optimizer=optimizer, loss=adaptive_wing_loss)

# from each of the generators create a pair of interleaved datasets
# tensorflow can automatically multiprocess interleaved datasets
# so that while batches can be loaded and processed ahead of time
interleaved_train = tf.data.Dataset.range(2).interleave(
    get_train_dataset, num_parallel_calls=tf.data.experimental.AUTOTUNE)
interleaved_val = tf.data.Dataset.range(2).interleave(
    get_val_dataset, num_parallel_calls=tf.data.experimental.AUTOTUNE)

# load training and validation filenames
train_ids, val_ids = load_filenames()
random.shuffle(train_ids)
random.shuffle(val_ids)
if __name__ == "__main__":
    IMG_FOLDER = '../img/hrany_rohy/test/'
    ANNOT_FOLDER = '../img/hrany_rohy/test/bbox/'
    MODEL_PATH = './MODEL_4.2.2'
    OUTPUT_PATH = MODEL_PATH + '/test_data_eval.txt'
    WEIGHTS_PATH = MODEL_PATH + '/model-v0.0.1-epoch-37-loss-0.4046.h5'

    #Results
    TP = 0
    FP = 0
    FN = 0

    #vyhodnotenie
    model = StackedHourglassNetwork(input_shape=(512, 512, 3),
                                    num_stack=2,
                                    num_residual=1,
                                    num_heatmap=8)
    model.load_weights(WEIGHTS_PATH)
    image_paths = glob.glob(os.path.join(IMG_FOLDER, '*.jpg'))  #POCET

    for img_path in image_paths:
        #read gt annotation
        annot_file = os.path.basename(img_path)[:-4] + '.txt'
        f = open(ANNOT_FOLDER + annot_file, "r")
        print(annot_file)
        gt_bboxes = []
        for line in f:
            data = [float(x) * 128 for x in line.split()]
            #from yolo to left bottom rigth top
            gt_bboxes.append({
                "x1": data[1] - data[3] / 2,