def main():
    dir_path = sys.argv[1]
    model_path = sys.argv[2]
    phi = 0
    weighted_bifpn = True
    freeze_backbone = False
    #Load test data
    images, heatmaps = get_testData(dir_path)
    tf.compat.v1.keras.backend.set_session(get_session())

    pred_model = efficientdet(phi,
                              weighted_bifpn=weighted_bifpn,
                              freeze_bn=freeze_backbone)
    print("Load model ........ \n")
    pred_model.load_weights(model_path, by_name=True)
    pred_model.compile(optimizer=Adam(lr=1e-3), loss='mse')
    preds = pred_model.predict(images)
    save_preds(dir_path, preds)
Exemple #2
0
def main():
    dir_path = sys.argv[1]
    phi = 0
    cont_training = False
    weighted_bifpn = True
    freeze_backbone = False
    tf.compat.v1.keras.backend.set_session(get_session())

    # create the generators
    # train_generator = trainGenerator(dir_path)
    images, heatmaps = get_trainData(dir_path, multi_dim=True)
    print("Number of images: %s and heatmaps: %s\n" %
          (len(images), len(heatmaps)))
    model = efficientdet(phi,
                         weighted_bifpn=weighted_bifpn,
                         freeze_bn=freeze_backbone)

    # model_name = 'efficientnet-b{}'.format(phi)
    # file_name = '{}_weights_tf_dim_ordering_tf_kernels_autoaugment_notop.h5'.format(model_name)
    # file_hash = WEIGHTS_HASHES[model_name][1]
    # weights_path = keras.utils.get_file(file_name,
    #                                     BASE_WEIGHTS_PATH + file_name,
    #                                     cache_subdir='models',
    #                                     file_hash=file_hash)
    # model.load_weights(weights_path, by_name=True)

    # freeze backbone layers
    if freeze_backbone:
        # 227, 329, 329, 374, 464, 566, 656
        for i in range(1, [227, 329, 329, 374, 464, 566, 656][phi]):
            model.layers[i].trainable = False

    # compile model
    print("Compiling model ... \n")
    # # SOFTMAX ACTIVATION
    # model.compile(optimizer=Adam(lr=1e-3),
    #                 loss=[categorical_focal_loss(gamma = 2, alpha = 0.25)])

    # SIGMOID ACTIVATION
    focalloss = SigmoidFocalCrossEntropy(
        reduction=Reduction.SUM_OVER_BATCH_SIZE)
    model.compile(optimizer=Adam(lr=1e-3), loss=focalloss)

    # # LINEAR ACTIVATION
    # model.compile(optimizer=Adam(lr=1e-3),
    #                 loss='mean_absolute_error')

    # print(model.summary())

    # start training
    # return model.fit_generator(
    #     generator=train_generator,
    #     steps_per_epoch=10,
    #     initial_epoch=0,
    #     epochs=10,
    #     verbose=1
    # validation_data=validation_generator
    # )

    ## 'efficientdet' for the first stacked heatmaps
    if cont_training:
        model.load_weights('efficientdet2')
        model.fit(images, heatmaps, batch_size=16, epochs=60, verbose=1)
    else:
        model.fit(images, heatmaps, batch_size=16, epochs=10, verbose=1)
    model.save_weights('efficientdet2')
    preds = model.predict(images[0:3])
    # save_preds(dir_path, preds)

    # fig = plt.figure()

    plt.subplot(1, 2, 1)
    plt.imshow(np.sum(preds[0], axis=-1))

    plt.subplot(1, 2, 2)
    plt.imshow(np.sum(heatmaps[0], axis=-1))

    plt.show()
    plt.savefig("testres.png")
def main():
    dir_path = sys.argv[1]
    phi = 0
    cont_training = False
    weighted_bifpn = True
    freeze_backbone = False
    input_shape = (320, 320, 3)
    tf.compat.v1.keras.backend.set_session(get_session())

    # images, heatmaps, heatmaps2,heatmaps3, coord = get_trainData(dir_path, 100, multi_dim=True)

    traingen = dataGenerator_RHD(dir_path, batch_size=8, data_set='training')
    validgen = dataGenerator_RHD(dir_path, batch_size=8, data_set='validation')

    # check if it looks good
    # plot_heatmaps_with_coords(images, heatmaps, coord)

    # print("Number of images: %s and heatmaps: %s\n" % (len(images), len(heatmaps)))
    model = efficientdet(phi,
                         input_shape=input_shape,
                         weighted_bifpn=weighted_bifpn,
                         freeze_bn=freeze_backbone)
    # model = efficientdet_coord(phi, weighted_bifpn=weighted_bifpn,
    #                       freeze_bn = freeze_backbone)

    # freeze backbone layers
    if freeze_backbone:
        # 227, 329, 329, 374, 464, 566, 656
        for i in range(1, [227, 329, 329, 374, 464, 566, 656][phi]):
            model.layers[i].trainable = False

    # compile model
    print("Compiling model ... \n")
    losses = {
        "normalsize": weighted_bce,
        "size2": weighted_bce,
        'size3': weighted_bce
    }
    # losses = {"normalsize" : weighted_bce, "size2" : weighted_bce, 'size3':weighted_bce, 'depth' : 'mean_squared_error'}
    # losses = {"normalsize" : weighted_bce, "size2" : weighted_bce, 'size3':weighted_bce, 'depthmaps' : 'mean_squared_error'}
    lossWeights = {"normalsize": 1.0, "size2": 1.0, 'size3': 1.0}
    # lossWeights = {"normalsize" : 1.0, "size2" : 1.0, 'size3' : 1.0, 'depth' : 1.0}
    # lossWeights = {"normalsize" : 1.0, "size2" : 1.0, 'size3' : 1.0, 'depthmaps' : 1.0}
    # focalloss = SigmoidFocalCrossEntropy(reduction=Reduction.SUM_OVER_BATCH_SIZE)
    model.compile(optimizer=Adam(lr=1e-3),
                  loss=losses,
                  loss_weights=lossWeights,
                  metrics={'normalsize': 'mse'})
    # model.compile(optimizer='adam', metrics=['accuracy'], loss=weighted_bce)
    # loss=tf.keras.losses.SigmoidFocalCrossEntropy())
    # loss=weighted_bce)
    # loss=tf.keras.losses.SigmoidFocalCrossEntropy())
    # loss=[focal_loss(gamma = 2, alpha = 0.25)])
    # loss = 'mean_absolute_error'
    # print(model.summary())
    print("Number of parameters in the model : ", model.count_params())
    # print(get_flops(model))

    # model.fit(images, {"normalsize" : heatmaps, "size2": heatmaps2, 'size3': heatmaps3},
    # batch_size=16, epochs=100, verbose=1)
    # K.set_value(model.optimizer.learning_rate, 1e-5)
    # model.fit(images, heatmaps, batch_size = 16, epochs = 100, verbose = 1)

    # callbacks = [
    # keras.callbacks.ModelCheckpoint(
    #     filepath='mymodel_{epoch}',
    #     # Path where to save the model
    #     # The two parameters below mean that we will overwrite
    #     # the current checkpoint if and only if
    #     # the `val_loss` score has improved.
    #     save_best_only=True,
    #     monitor='val_loss',
    #     verbose=1)
    #     ]

    model.fit(traingen,
              validation_data=validgen,
              validation_steps=18,
              steps_per_epoch=1,
              epochs=1,
              verbose=1)

    # model.save_weights('handposenet')

    # images = get_evalImages(dir_path, 10)
    validgen2 = dataGenerator_RHD(dir_path,
                                  batch_size=10,
                                  data_set='validation')
    (images, targets) = next(validgen2)

    (preds, preds2, preds3) = model.predict(images)
    # (preds, preds2 ,preds3, depth) = model.predict(images)

    (heatmaps, heatmaps2, heatmaps3) = targets

    # plot_acc_loss(history)

    # get coordinates from predictions
    coord_preds = heatmaps_to_coord(preds)
    coord = heatmaps_to_coord(heatmaps)
    # coord_upsamp = heatmaps_to_coord(preds2)

    plot_predicted_heatmaps(preds, heatmaps)
    # plot_predicted_heatmaps(preds2, heatmaps2)
    plot_predicted_hands_uv(images, coord_preds * 4)

    # xyz_pred = add_depth_to_coords(coord_preds[0], depth[0])
    plot_predicted_coordinates(images, coord_preds * 4, coord * 4)