Beispiel #1
0
def input_fn(data_dir, params):

    dataset = dg.data(
        "udacity-selfdriving-simulator",
        path=data_dir,
    )
    dataset = dataset.get()

    df = dataset.df

    df = process_dataframe(df, params)

    if params.only_center_camera:
        df = df[df.camera == 1]

    df = dg.shuffle(df)

    tensors = dict(
        filepath=df.filepath.as_matrix(),
        steering=df.steering.as_matrix(),
        camera=df.camera.as_matrix(),
        original_steering=df.original_steering.as_matrix(),
    )

    if "flipped" in df:
        tensors["flipped"] = df.flipped.as_matrix().astype(np.int32)

    ds = tf.data.Dataset.from_tensor_slices(tensors)

    ds = ds.apply(
        tf.contrib.data.shuffle_and_repeat(buffer_size=params.buffer_size, ))

    ds = ds.apply(
        tf.contrib.data.map_and_batch(
            lambda row: process_data(row, params),
            batch_size=params.batch_size,
            num_parallel_calls=params.n_threads,
            drop_remainder=True,
        ))

    ds = ds.prefetch(tf.contrib.data.AUTOTUNE)

    return ds
Beispiel #2
0
def main(device):

    n_classes = data("visual-words").n_classes

    graph = tf.Graph()
    sess = tf.Session(graph=graph)

    # inputs
    inputs_t, model_t = get_templates(n_classes)

    # model
    with tf.device(device):
        inputs = inputs_t()
        model = model_t(inputs)

    with graph.as_default():
        print("")
        print("##########################################################")
        print("Number of Weights = {:,}".format(model.count_weights()))
        print("##########################################################")
Beispiel #3
0
def main(device):
    print("DEVICE:", device)

    # seed: resultados repetibles
    seed = 32
    np.random.seed(seed=seed)
    random.seed(seed)

    # dataget
    dataset = data("german-traffic-signs").get()

    # obtener imagenes
    print("loading data")
    features_test, labels_test = dataset.test_set.arrays()
    # features_test, labels_test = next(dataset.test_set.random_batch_arrays_generator(500))

    #model
    with tf.device(device):
        graph = tf.Graph()
        sess = tf.Session(graph=graph)

        # inputs
        inputs = SupervisedInputs(
            name=network_name + "_inputs",
            graph=graph,
            sess=sess,
            # tensors
            features=dict(shape=(None, 32, 32, 3)),
            labels=dict(shape=(None, ), dtype=tf.uint8))

        # create model template
        template = Model(
            n_classes=43,
            name=network_name,
            model_path=model_path,
            graph=graph,
            sess=sess,
            seed=seed,
        )

        inputs = inputs()
        model = template(inputs)

        # restore
        print("restoring model")
        model.initialize(restore=True)

        # test
        print("testing")
        generator = batch_generator(len(features_test), 100)
        generator = map(
            lambda batch: dict(features=features_test[batch],
                               labels=labels_test[batch]), generator)

        predictions = model.batch_predict(
            generator,
            print_fn=lambda batch: print(
                accuracy_score(np.argmax(model.predict(**batch), axis=1),
                               batch["labels"]),
                np.mean(
                    np.argmax(model.predict(**batch), axis=1) == batch[
                        "labels"]), model.score(**batch)))
        predictions = np.argmax(predictions, axis=1)
        test_score = accuracy_score(predictions, labels_test)
        print("test score: {}".format(test_score))
Beispiel #4
0
def main(device, epochs, batch_size, loss, restore):

    # seed: resultados repetibles
    seed = 32
    np.random.seed(seed=seed)
    random.seed(seed)

    # dataget
    dataset = data("visual-words").get(process=False)

    df = dataset.training_set.dataframe()
    dataset.training_set._dataframe = df[df["image"].apply(lambda a: a.shape)
                                         == (299, 299, 3)]

    # utils.process_steering(dataset.training_set, params.alpha, params.straight_tol, params.steering_filter)
    # print(dataset.training_set.pure_dataframe().columns)

    # obtener todas las imagenes (lento)
    def data_generator_fn():
        data_generator = dataset.training_set.random_batch_arrays_generator(
            batch_size)
        data_generator = cz.map(
            Dict(image=P[0], labels=P[1], keras_training=False),
            data_generator)
        # data_generator = cz.map(utils.get_processed_image, data_generator)

        return data_generator

    graph = tf.Graph()
    sess = tf.Session(graph=graph)

    # inputs
    input_t, model_t = get_templates(dataset.n_classes, seed=seed)

    # model

    with tf.device(device):
        inputs = input_t()
        model = model_t(inputs)

    # initialize variables
    print("Initializing Model: restore = {}".format(restore))
    model.initialize(restore=restore)

    # start queue
    # inputs.start_queue(data_generator_fn)

    # fit
    print("training")
    model.fit(
        data_generator=data_generator_fn(),
        epochs=epochs,
        # log_summaries = True,
        log_interval=10,
        print_test_info=True,
        on_train=[
            dict(when=lambda step, **kwargs: step % 1000 == 0,
                 do=lambda **kwargs: model.save())
        ])

    # save
    print("saving model")
    model.save()
Beispiel #5
0
def main(device, epochs, batch_size):

    # seed: resultados repetibles
    seed = 32
    np.random.seed(seed=seed)
    random.seed(seed)

    # dataget
    dataset = data("german-traffic-signs").get()

    # data_generator
    data_generator = dataset.training_set.random_batch_arrays_generator(
        batch_size)
    data_generator = utils.batch_random_image_rotation(data_generator, 15.0)
    data_generator = cz.map(Dict(features=P[0], labels=P[1]), data_generator)

    graph = tf.Graph()
    sess = tf.Session(graph=graph)

    # inputs
    inputs = SupervisedInputs(
        name=network_name + "_inputs",
        graph=graph,
        sess=sess,
        # tensors
        features=dict(shape=(None, 32, 32, 3)),
        labels=dict(shape=(None, ), dtype=tf.uint8))

    # create model template
    template = Model(
        n_classes=43,
        name=network_name,
        model_path=model_path,
        graph=graph,
        sess=sess,
        seed=seed,
        optimizer=tf.train.AdamOptimizer,
    )

    # model

    with tf.device(device):
        inputs = inputs()
        model = template(inputs)

    # initialize variables
    model.initialize()

    # fit
    print("training")
    model.fit(
        data_generator=data_generator,
        epochs=epochs,
        log_summaries=True,
        log_interval=10,
        print_test_info=True,
    )

    # save
    print("saving model")
    model.save()
#MNIST
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from time import time
from sklearn import datasets, svm, metrics
from sklearn.datasets import fetch_mldata
from numpy import arange
from sklearn.model_selection import train_test_split
from dataget import data
#Read image input as vector or matrix

#Train algorithm using data from data/training-set

#Get data
dataset = data("mnist").get()
#Format training_set
features_train, labels_train = dataset.training_set.arrays()
features_train = features_train.reshape((60000, 28 * 28))
#print("Features shape: {} \nLabels shape: {}".format(features_train.shape, labels_train.shape))

#Format test_set
features_test, labels_test = dataset.test_set.arrays()
features_test = features_test.reshape((10000, 28 * 28))
#print("Features shape: {} \nLabels shape: {}".format(features_test.shape, labels_test.shape))


def kClassify(features_train, labels_train):
    clf = KNeighborsClassifier(n_neighbors=2, algorithm='brute', leaf_size=7)
    t0 = time()
    clf.fit(features_train, labels_train)
    print "training time:", round(time() - t0, 3), "s"
Beispiel #7
0
import dataget as dg

ds = dg.data(
    "udacity-selfdriving-simulator",
    path="data/raw",
)
ds = ds.get(download=False)
df = ds.df

print(df.head())
print(df.filepath.iloc[0])