Beispiel #1
0
def main(train_params, model_dir):
    params = do.Dicto(train_params)
    # params = do.Dicto(
    #     buffer_size = 34799,
    #     batch_size = 16,
    #     epochs = 400
    # )

    with open("/data/train.p", "rb") as fd:
        train = pickle.load(fd)
        
    train_input_fn = lambda : input_fn(train['features'], train['labels'].astype(np.int32), params, training=True)

    with open("/data/test.p", "rb") as fd:
        test = pickle.load(fd)

    # Support for CuDNN fail to allocate enough memory
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    trainingConfig = tf.estimator.RunConfig(session_config=config)

    eval_input_fn = lambda : input_fn(test['features'], test['labels'].astype(np.int32), params, training=False)
   
    classifier = tf.estimator.Estimator(model_fn, model_dir=model_dir, config=trainingConfig)

    train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn)
    eval_spec = tf.estimator.EvalSpec(input_fn=eval_input_fn)

    tf.estimator.train_and_evaluate(classifier, train_spec, eval_spec)
Beispiel #2
0
def main(data_dir, job_dir, params):

    params = do.Dicto(params)



    tf.logging.set_verbosity(tf.logging.DEBUG)
    print("job_dir", job_dir)


    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True


    run_config = tf.estimator.RunConfig(
        model_dir=job_dir,
        save_summary_steps=params.summary_steps,
        save_checkpoints_steps=params.save_checkpoints_steps,
        session_config=config
    )



    estimator = tf.estimator.Estimator(
        model_fn=est.model_fn,
        params=params,
        config=run_config,
    )

    exporter = tf.estimator.LatestExporter(
        params.project,
        lambda: data_pipelines.serving_input_fn(params),
    )

    train_spec = tf.estimator.TrainSpec(
        lambda: data_pipelines.input_fn(data_dir, params),
        max_steps=params.max_steps,
    )

    test_spec = tf.estimator.EvalSpec(
        lambda: data_pipelines.input_fn(data_dir, params),
        steps=params.eval_steps,
        exporters=[exporter], 
    )

    print("Start training and evaluate")

    tf.estimator.train_and_evaluate(estimator, train_spec, test_spec)

    # for _ in range(1000000):

    #     tf.estimator.train_and_evaluate(estimator, train_spec, test_spec)


    print("exporting")

    estimator.export_savedmodel(
        os.path.join(job_dir, "export", params.project),
        lambda: data_pipelines.serving_input_fn(params)
    )
Beispiel #3
0
def main(raw_dir, augmented_dir, rm, params):
    """
    Args:
        rm: bool
    """
    params = do.Dicto(params)

    if rm and os.path.exists(augmented_dir):
        shutil.rmtree(augmented_dir)

    csv_filepath = os.path.join(raw_dir, "driving_log.csv")
    # df = pd.read_csv(raw_dir)
    df = pd.read_csv(csv_filepath)

    if not "filepath" in df.columns:
        for path_column in ["center", "left", "right"]:
            # There are whitespace characters in the csv so that's why we strip characters.
            df[path_column + "_filepath"] = df[path_column].apply(
                lambda x: os.path.join(raw_dir, x.strip()))

        df["folder"] = raw_dir

    sd = augment_dataset(df,
                         params,
                         save_dir=augmented_dir,
                         return_image=False)

    print("Augmenting images")

    with ProgressBar():
        # df = sd.compute(get=get)
        df = sd.compute(scheduler='processes')

    print("Saving CSVs")

    for _, dfg in df.groupby(["folder", "augment_idx"]):

        sample = dfg.iloc[0]
        folder = sample.folder + "_" + str(sample.augment_idx)
        csv_path = os.path.join(augmented_dir, folder, "driving_log.csv")

        del dfg["left_image"]
        del dfg["left_filepath"]
        del dfg["center_image"]
        del dfg["center_filepath"]
        del dfg["right_image"]
        del dfg["right_filepath"]

        del dfg["augment_idx"]
        del dfg["folder"]
        del dfg["augmented"]

        dfg.to_csv(csv_path, index=False)
def main(train_params, model_dir):

    params = do.Dicto(train_params)

    with open("/data/valid.p", "rb") as fd:
        valid = pickle.load(fd)

    
    validate_input_fn = lambda : input_fn(valid['features'].astype(np.float32), valid['labels'].astype(np.int32), params, training=False)

    classifier = tf.estimator.Estimator(model_fn, model_dir=model_dir)

    print(classifier.evaluate(input_fn=validate_input_fn))