Ejemplo n.º 1
0
def main(directory):
    # tensorboard runs at http://localhost:6006
    subprocess.Popen(['tensorboard', '--logdir', os.path.join(directory,'train')])
    data = parse_hyperparams.parse(os.path.join(directory,"hyperparameters.json"))
    TRAIN_STEPS = data["epochs"]
    parser = log_parser.EvalJSONifier(directory, TRAIN_STEPS)
    parser.start()
Ejemplo n.º 2
0
    def __init__(self, model_dir):
        data = parse_hyperparams.parse(model_dir + "/testparameters.json")
        output_vid_path = data["output-vid-path"]
        self.video_path = data["test-video"]
        self.test_dir = data["test-dir"]
        if not os.path.isdir(self.test_dir):
            os.mkdir(self.test_dir)
        model_path = data["model-tar"]
        tar = tarfile.open(model_path)
        tar.extractall("/tensorflow/models/research/")

        self.interpreter = tflite.Interpreter(
            model_path="/tensorflow/models/research/unoptimized.tflite")

        parser = PBTXTParser("/tensorflow/models/research/map.pbtxt")
        parser.parse()
        self.labels = parser.get_labels()

        self.input_video = cv2.VideoCapture(self.video_path)
        width = self.input_video.get(cv2.CAP_PROP_FRAME_WIDTH)
        height = self.input_video.get(cv2.CAP_PROP_FRAME_HEIGHT)
        fps = self.input_video.get(cv2.CAP_PROP_FPS)
        self.total_frames = self.input_video.get(cv2.CAP_PROP_FRAME_COUNT)
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        self.output_video = cv2.VideoWriter(output_vid_path, fourcc, fps,
                                            (int(width), int(height)))
        self.server = MJPEGServer(300, 300)

        self.frames = 0
Ejemplo n.º 3
0
def main():
    # tensorboard runs at http://localhost:6006
    subprocess.Popen(['tensorboard', '--logdir', '/opt/ml/model/train'])
    data = parse_hyperparams.parse("/opt/ml/model/hyperparameters.json")
    TRAIN_STEPS = data["epochs"]
    parser = log_parser.EvalJSONifier(TRAIN_STEPS)
    parser.start()
Ejemplo n.º 4
0
def main():
    """
    Trains a mobilenetv2 using transfer learning, as described by `hyperparameters.json`. Checkpoints other than the
    default should be mounted in "/opt/ml/model/train/". `map.pbtxt` and the .record(s) should be mounted.
    Returns:
        None
    """
    # hyperparameters
    TRAIN_PATH = '/opt/ml/model/train'
    data = parse_hyperparams.parse("/opt/ml/model/hyperparameters.json")
    TRAIN_STEPS = data["epochs"]
    BATCH_SIZE = data["batch-size"]
    EVAL_FREQ = data["eval-frequency"]
    CHECKPOINT = data["checkpoint"]

    shutil.rmtree(TRAIN_PATH, ignore_errors=True)

    if not os.path.exists(TRAIN_PATH):
        os.mkdir(TRAIN_PATH)

    # transfer learning with checkpoints other than default checkpoint
    if CHECKPOINT != "default":
        sed.replace_words('/tensorflow/models/research/start_ckpt/model.ckpt',
                          '/opt/ml/model/%s' % (CHECKPOINT), "pipeline.config")

    nb_classes = labels.get_total()
    sed.replace_words('NUM_CLASSES', str(nb_classes), "pipeline.config")
    sed.replace_words('BATCH_SIZE_PARAM', str(BATCH_SIZE), "pipeline.config")
    shutil.copy('pipeline.config', '/opt/ml/model/pipeline.config')

    # call the API for retraining
    modularized_model_main.main(pipeline_config_path='pipeline.config',
                                model_dir=TRAIN_PATH,
                                num_train_steps=TRAIN_STEPS,
                                eval_period=EVAL_FREQ)
Ejemplo n.º 5
0
def main():
    model_dir = "/opt/ml/model/"
    unoptimized = "/tensorflow/models/research/learn/models/output_tflite_graph.tflite"
    second_export = "output_tflite_graph_edgetpu.tflite"
    data = parse_hyperparams.parse(model_dir + "exportparameters.json")

    epoch = data["epochs"]
    output_name = data["name"]
    export_dir = join(model_dir, data["export-dir"])
    subprocess.check_call("./convert_checkpoint_to_edgetpu_tflite.sh --checkpoint_num %s" % epoch, shell=True)
    subprocess.check_call("edgetpu_compiler %s -o %s" % (unoptimized, model_dir), shell=True)
    #
    with tarfile.open(join(export_dir, output_name + ".tar.gz"), 'w:gz') as model:
        model.add(join(model_dir, second_export), arcname="model.tflite")
        model.add(model_dir + "map.pbtxt", arcname="map.pbtxt")
        model.add(unoptimized, arcname="unoptimized.tflite")
Ejemplo n.º 6
0
def main():
    model_dir = "/opt/ml/model/"
    data = parse_hyperparams.parse(model_dir + "testparameters.json")
    video_path = data["test-video"]
    model_path = data["model-tar"]
    tar = tarfile.open(model_path)
    tar.extractall("/tensorflow/models/research/")

    parser = PBTXTParser("/opt/ml/model/map.pbtxt")
    parser.parse()
    labels = parser.file

    interpreter = tf.lite.Interpreter(
        model_path="/tensorflow/models/research/unoptimized.tflite")
    interpreter.allocate_tensors()
    test_video(video_path, interpreter, labels)

    print("Done.")
Ejemplo n.º 7
0
def main(directory):
    model_dir = directory
    unoptimized = "/tensorflow/models/research/learn/models/output_tflite_graph.tflite"
    second_export = "output_tflite_graph_edgetpu.tflite"
    data = parse_hyperparams.parse(join(model_dir, "exportparameters.json"))

    output_name = data["name"]
    config_path = data["config"]
    checkpoint_path = data["checkpoint"]
    export_dir = join(model_dir, data["export-dir"])
    subprocess.check_call(
        "./convert_checkpoint_to_edgetpu_tflite.sh --config_path %s --ckpt_path %s"
        % (config_path, checkpoint_path),
        shell=True)
    subprocess.check_call("edgetpu_compiler %s -o %s" %
                          (unoptimized, model_dir),
                          shell=True)
    #
    with tarfile.open(join(export_dir, output_name + ".tar.gz"),
                      'w:gz') as model:
        model.add(join(model_dir, second_export), arcname="model.tflite")
        model.add(join(model_dir, "map.pbtxt"), arcname="map.pbtxt")
        model.add(unoptimized, arcname="unoptimized.tflite")
Ejemplo n.º 8
0
        print('unable to retrieve a dataset tar file.')
        sys.exit(1)
    for dataset in dataset_paths:
        with tarfile.open(dataset) as tar_file:
            tar_file.extractall(join(EXTRACT_PATH, 'out'))

    if percent_eval > 100 or percent_eval < 0:
        percent_eval = 30
    json_to_csv.main(percent_eval)
    try:

        generate_tfrecord.main(TMP_PATH + "/train.csv",
                               join(OUTPUT_PATH, 'train.record'))
        generate_tfrecord.main(TMP_PATH + "/eval.csv",
                               join(OUTPUT_PATH, 'eval.record'))

        parse_meta.main(join(OUTPUT_PATH, 'map.pbtxt'))

        print(".\nRecords generated")
    except ValueError:
        print(
            "The datasets provided do not have the same class labels. Please make sure that labels are spelt the same in both datasets, or label the same objects for both datasets."
        )


if __name__ == "__main__":
    data = parse_hyperparams.parse("/opt/ml/model/hyperparameters.json")
    DATASET_PATHS = data["dataset-path"]
    PERCENT_EVAL = data["percent-eval"]
    main(DATASET_PATHS, PERCENT_EVAL)
Ejemplo n.º 9
0
    json_to_csv.main(percent_eval)
    try:

        generate_tfrecord.main(TMP_PATH + "/train.csv",
                               join(OUTPUT_PATH, 'train.record'))
        generate_tfrecord.main(TMP_PATH + "/eval.csv",
                               join(OUTPUT_PATH, 'eval.record'))

        parse_meta.main(join(OUTPUT_PATH, 'map.pbtxt'))

        print(".\nRecords generated")
    except ValueError:
        print(
            "The datasets provided do not have the same class labels. Please make sure that labels are spelt the same in both datasets, or label the same objects for both datasets."
        )


if __name__ == "__main__":

    parser = argparse.ArgumentParser()
    parser.add_argument('--dir',
                        type=str,
                        help='Path of the folder to train in.')
    DIRECTORY = parser.parse_args().dir

    data = parse_hyperparams.parse(
        os.path.join(DIRECTORY, "hyperparameters.json"))
    DATASET_PATHS = data["dataset-path"]
    PERCENT_EVAL = data["percent-eval"]
    main(DATASET_PATHS, PERCENT_EVAL, DIRECTORY)