Example #1
0
def main():

    # SETUP
    metadata = dg.read_dataset_metadata("./data/driving_log.csv")
    model = get_model()
    model.summary()

    # generators for training and validation
    train_lines, val_lines = train_test_split(metadata, test_size=0.2)
    train_gen = dg.generator(train_lines, batch_size=128)
    valid_gen = dg.generator(val_lines, batch_size=128)

    # https://faroit.github.io/keras-docs/1.2.1/models/sequential/
    model.compile(optimizer=Adam(1e-3), loss="mse")

    # TRAINING
    # Fits the model on data generated batch-by-batch by a Python generator.
    history = model.fit_generator(train_gen,
                                  samples_per_epoch=20480,
                                  nb_epoch=6,
                                  validation_data=valid_gen,
                                  nb_val_samples=4096,
                                  verbose=1)

    # save model
    model.save('model.h5')
Example #2
0
def run():
    files = dg.get_hdf5_file_names(hdf5_path)
    training_files = files[:-1]
    evaluate_files = []
    evaluate_files.append(files[-1])
    ds = tf.data.Dataset.from_generator(
        dg.generator(training_files), (tf.int8, tf.float32),
        (tf.TensorShape([66, 200, 3]), tf.TensorShape([])))

    # dataset = dataset.map(map_fn)
    ds = ds.batch(46)
    ds = ds.repeat()

    ds = ds.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)

    model = m.create_model()
    adam = Adam(lr=1e-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
    model.compile(optimizer="adam", loss="mse")
    iterator = ds.make_one_shot_iterator()

    # print(iterator.get_next())

    tbCallback = keras.callbacks.TensorBoard(log_dir='./Graph',
                                             histogram_freq=0,
                                             write_graph=True,
                                             write_images=True)

    tbCallback.set_model(model)
    cp_callback = tf.keras.callbacks.ModelCheckpoint('checkpoints/cp.ckpt',
                                                     save_weights_only=True,
                                                     verbose=1)

    # validation
    print(evaluate_files)
    dt = tf.data.Dataset.from_generator(
        dg.generator(training_files), (tf.int8, tf.float32),
        (tf.TensorShape([66, 200, 3]), tf.TensorShape([])))
    dt = dt.batch(1)
    dt = dt.repeat()

    dt = dt.prefetch(buffer_size=tf.contrib.data.AUTOTUNE)
    model.fit(iterator,
              steps_per_epoch=1000,
              validation_data=dt.make_one_shot_iterator(),
              validation_steps=1000,
              epochs=15,
              verbose=1,
              callbacks=[tbCallback, cp_callback])
Example #3
0
def test_modelselection():
    df = generator(5)
    JM = JointModel(df,
                    formula='y|id|t~1|1',
                    poly_orders=(2, 2, 2),
                    model_select=True,
                    optim_meth='default')
Example #4
0
def train(load_file=None, save_file=None):
    """
    Train a keras model
    :param load_file: load a previously saved checkpoint or model
    :param save_file: file to save trained model to
    """
    epochs = 50
    steps_per_epoch = 1000
    batch_size = 5000

    # Create model
    # Load checkpoint if exists
    if load_file is not None:
        model = keras.models.load_model(load_file)
    else:
        model = make_model()

    print(model.summary())

    # Create callbacks
    log_dir = os.path.join('logs', 'scalars', curr_date)
    tensorboard_callback = keras.callbacks.TensorBoard(log_dir=log_dir)
    checkpoint_file = os.path.join('checkpoints', curr_date)
    checkpoint_callback = keras.callbacks.ModelCheckpoint(
        filepath=checkpoint_file,
        monitor='val_accuracy',
        verbose=1,
        save_best_only=True,
        mode='max')

    # Train model
    model.fit(
        generator(batch_size=batch_size, gen_mode=0),
        epochs=epochs,
        steps_per_epoch=steps_per_epoch,
        validation_data=generator(batch_size=10000, gen_mode=1),
        validation_steps=steps_per_epoch // 3,
        callbacks=[tensorboard_callback, checkpoint_callback],
    )

    score = test(model)

    if save_file is None:
        model.save(os.path.join('models', str(score)))
    else:
        model.save(save_file)
Example #5
0
def test_functions():
    df = generator(5)
    JM = JointModel(df,
                    formula='y|id|t~1|1',
                    poly_orders=(2, 2, 2),
                    optim_meth='default')
    JM.summary()
    JM.wald_test()
Example #6
0
def begin_training(model,
                   training_samples,
                   validation_samples,
                   n_epoch=10,
                   begin_at_epoch=0):

    model.compile(loss='mean_squared_error', optimizer=Adam(lr=1e-3))
    model_checkpoint = ModelCheckpoint(
        filepath='model.weights.{epoch:02d}-{val_loss:.5f}.h5',
        verbose=1,
        save_best_only=True,
        save_weights_only=True,
        period=20)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=5,
                                   verbose=1)
    reduce_LR_plateau = ReduceLROnPlateau(factor=0.1,
                                          patience=2,
                                          verbose=1,
                                          epsilon=1e-5)
    tensorboard_log = TensorBoard(log_dir='./tb_logs',
                                  histogram_freq=0,
                                  write_graph=True,
                                  write_images=False)

    batch_size = 128
    samples_per_epoch = 8000
    nb_val_samples = 2000
    n_epoch = n_epoch
    fit = model.fit_generator(gen.generator(training_samples, batch_size),
                              samples_per_epoch=samples_per_epoch,
                              nb_epoch=n_epoch,
                              verbose=2,
                              callbacks=[
                                  model_checkpoint, early_stopping,
                                  reduce_LR_plateau, tensorboard_log
                              ],
                              validation_data=gen.generator(
                                  validation_samples, batch_size),
                              nb_val_samples=nb_val_samples,
                              initial_epoch=begin_at_epoch)
    print("Done Training")
    return model
Example #7
0
def distribution_calculator():
    [data, energies, angles, distances] = data_generator.generator(1)
    total_counts = sum(data[:, 3])
    i = 0
    dd = distances_distribution(data, distances[10])
    de = energies_distribution(dd, energies[2])
    da = angles_distribution(de, angles[3])

    print sum(dd[:, 3]) / total_counts, sum(de[:, 3]) / total_counts, sum(
        da[:, 3]) / total_counts
Example #8
0
def test_modelfit():
    df = generator(5)
    JM0 = JointModel(df,
                     formula='y|id|t~1|1',
                     poly_orders=(0, 0, 0),
                     optim_meth='default')
    JM1 = JointModel(df,
                     formula='y|id|t~1|1',
                     poly_orders=(0, 0, 0),
                     optim_meth='BFGS')
Example #9
0
def test_class():
	df=generator(m=10)
	rd=ReadData(df, "y|id|t~1|1", (3,3,3))
	assert rd.mat_X.shape[1]==4
	assert rd.mat_Z.shape[1]==4
	assert rd.mat_W.shape[1]==4
	bf=BaseFunc(rd.mat_X, rd.mat_Z, rd.mat_W, rd.vec_y, rd.vec_n)
	assert bf.get_y(0) is not None
	assert bf.get_X(0) is not None
	assert bf.get_Z(0) is not None
	assert bf.get_W(0) is not None
Example #10
0
def main(argv):
    try:
        opts, args = getopt.getopt(argv,"hi:sb:e:m:j:p:a:",["ifile=", "summary", "batch_size=", "epochs=", "model=", "model_to_json=", "initial_epoch=", "arch="])
    except getopt.GetoptError:
        usage()
        sys.exit(2)

    settings = parse_options(opts)
    check_settings(settings)
            
    train_samples, validation_samples = split_samples(settings.trainingfiles)
    
    # create training and validation generators - only training samples will be augmented
    train_generator = dg.generator(train_samples, batch_size=settings.batches)
    validation_generator = dg.generator(validation_samples, batch_size=settings.batches, isAugment=False)

    # Tensorboard logging
    callback_tb = TensorBoard(log_dir='./Graph', histogram_freq=0, write_graph=True, write_images=True)
    
    # checkpoint
    filepath="new_model-{epoch:02d}-{val_loss:.2f}.hdf5"
    checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
    callbacks_list = [callback_tb, checkpoint]

    # create model
    settings.model = create_model() if settings.model == None else settings.model

    # Mean square error function is used as loss function because this is a regression problem.
    settings.model.compile(optimizer="adam", loss='mse')

    print("start training")
    history = settings.model.fit_generator(train_generator,
                          steps_per_epoch = len(train_samples) / settings.batches,
                          epochs = settings.epochs,
                          validation_data=validation_generator,
                          validation_steps = len(validation_samples) / settings.batches,
                          callbacks=callbacks_list,
                          initial_epoch=settings.initial_epoch,
                          verbose=1)
    
    dg.plot_history(history)
def train():
    tf_config()

    printing('Creating and compiling model...')
    model = unet_original()
    # model = unet_small()

    plot_model(model,
               to_file='model_' + str(img_rows) + 'x' + str(img_cols) + '.png',
               show_shapes=True,
               show_layer_names=True)
    model_checkpoint = ModelCheckpoint(path_weights,
                                       monitor='val_loss',
                                       save_best_only=True)

    get_session().run(tf.global_variables_initializer())
    images_train, images_valid, images_dict = import_images_train_valid()
    create_valid_npy_for_generator(images_valid)
    validation_imgs, validation_mask = load_data('valid')
    validation_imgs = validation_imgs[..., np.newaxis]
    validation_mask = validation_mask[..., np.newaxis]

    batch = 64
    epochs = 8
    printing('Fitting model...')
    t0 = time()
    model.fit_generator(generator(batch_size=batch,
                                  images_train=images_train,
                                  images_dict=images_dict),
                        steps_per_epoch=round(num_train / batch),
                        epochs=epochs,
                        validation_data=(validation_imgs, validation_mask),
                        verbose=1,
                        callbacks=[model_checkpoint])
    printing('Trainin time - %.1f min' % ((time() - t0) / 60))
    printing('Done')
Example #12
0
def main():

    # variables
    cluster_num = INIT_GROUP  # number of the clusters at the current iteration
    z = []  # list indicating which cluster each data point x_i belongs to
    mu = []  # mean value of each cluster
    cov = np.eye(DIM)  # covariance matrix of each cluster
    cov_t = np.linalg.inv(
        (1 / SIGMA**2) * np.eye(DIM) + np.linalg.inv(cov)
    )  # two following covariance matrice used in calculating probability of a new cluster
    cov_z = np.linalg.inv(cov) * cov_t * np.linalg.inv(cov) - np.linalg.inv(
        cov)
    # records
    cluster_num_record = np.zeros(
        ITER_NUM +
        1)  # records the change of the clusters number over iterations
    dist_record = np.zeros(
        ITER_NUM +
        1)  # records the change of the mean distance over iterations

    # generates the data points
    x = dg.generator(DATA_MEAN)  # list of data points x
    data_length = len(x)  # total number of data points x

    # random initialization
    for _ in range(data_length):
        z.append(random.randint(0, cluster_num - 1))
    for _ in range(INIT_GROUP):
        mu.append(np.random.multivariate_normal(np.zeros(DIM), np.eye(DIM)))

    n_cluster = cluster_num_count(
        z)  # list indicating how many data points there are in each cluster
    prob_data_cluster = np.array(
        np.zeros((data_length, cluster_num))
    )  # array indicating the probability that every data point x_i belongs to a certain cluster

    # records
    dist_cal = 0.
    for i in range(data_length):
        dist_cal += np.sqrt(np.sum((x[i] - mu[z[i]])**2))
    dist_record[0] = 1 / (SIGMA**2) * dist_cal
    cluster_num_record[0] = cluster_num

    for n in range(1, ITER_NUM + 1):

        # samples z_i for each data point
        for i in range(data_length):

            # calculates the probability that a data point belongs to a certain cluster
            for k in range(cluster_num):
                prob_data_cluster[
                    i, k] = n_cluster[k] / (float)(n + data_length + ALPHA) / (
                        ((2 * np.pi)**(DIM / 2)) *
                        (np.linalg.det(cov))**0.5) * (float)(np.exp(
                            -0.5 *
                            (np.mat(x[i]) - mu[k]) * np.linalg.inv(cov) *
                            (np.mat(x[i]) - mu[k]).T))
            prob_temp = list(prob_data_cluster[i, :])

            # probability that the data point belongs to a new cluster
            prob_temp.append(
                ALPHA / (float)(n + data_length + ALPHA) /
                (((2 * np.pi)**(DIM / 2)) * (SIGMA**DIM)) *
                (np.linalg.det(cov_t)**0.5) / (np.linalg.det(cov)**0.5) *
                (float)(np.exp(0.5 * np.mat(x[i]) * cov_z * np.mat(x[i]).T)))
            prob_temp = prob_temp / np.sum(prob_temp)

            # samples the new z_i for given data point x_i
            data_cluster_temp = sample_z(prob_temp)

            # checks whether a new cluster is generated
            if (data_cluster_temp > cluster_num - 1):
                cluster_num += 1
                mu.append(
                    np.random.multivariate_normal(np.zeros(DIM), np.eye(DIM)))
                n_cluster.append(0)
                prob_data_cluster = np.column_stack(
                    (prob_data_cluster, np.zeros((data_length, 1))))

            # updates the cluster that the data point x_i belongs to
            if (z[i] != data_cluster_temp):
                n_cluster[z[i]] -= 1
                n_cluster[data_cluster_temp] += 1
                z[i] = data_cluster_temp

            # checks whether an original cluster disappears
            if 0 in n_cluster:
                cluster_num -= 1
                zero_index = n_cluster.index(0)
                del mu[zero_index]
                del n_cluster[zero_index]
                prob_data_cluster = np.delete(prob_data_cluster,
                                              zero_index,
                                              axis=1)
                z = list(
                    np.array(z) -
                    np.array(z > zero_index * np.ones(data_length)))

        # samples phi_k for each cluster
        for k in range(cluster_num):
            sum_x_i_cluster_k = np.zeros(DIM)
            for i in range(data_length):
                if (z[i] == k):
                    sum_x_i_cluster_k += x[i]
            cov_k = np.linalg.inv(1 / (SIGMA**2) * np.eye(DIM) +
                                  n_cluster[k] * np.linalg.inv(cov))
            mu_k = np.array(
                (cov_k * (np.linalg.inv(cov) * np.mat(sum_x_i_cluster_k).T)).T)
            mu[k] = np.random.multivariate_normal(mu_k[0], cov_k)

        # records
        dist_cal = 0.
        for i in range(data_length):
            dist_cal += np.sqrt(np.sum((x[i] - mu[z[i]])**2))
        dist_record[n] = 1 / (SIGMA**2) * dist_cal
        cluster_num_record[n] = cluster_num

        #test
        print 'After %d iterations, the number of clusters is %d' % (
            n, cluster_num)

    # output
    print mu  # prints the mean value of each cluster after iterations
    print n_cluster  # prints number of the data points in each cluster after iterations

    # plots the number of clusters over iterations
    plt.figure(1)
    plt.plot(cluster_num_record)
    xlim(0, ITER_NUM)
    plt.xlabel('iterations')
    plt.ylabel('number of clusters')
    plt.show()
    # plots the mean distance over iterations
    plt.figure(2)
    plt.plot(dist_record)
    xlim(0, ITER_NUM)
    plt.xlabel('iterations')
    plt.ylabel('mean distance')
    plt.show()
Example #13
0
def main():

    parser = argparse.ArgumentParser(
        description='Train NVIDIA End-to-End Learning model')
    parser.add_argument(
        '--init',
        help=
        "Path to .h5 file (optional). If not provided, a new model will be created and trained",
        type=str)
    parser.add_argument(
        '--save',
        help=
        "Path to save .h5 file. If not provided a generic timestamped name will be used and saved",
        type=str)

    args = parser.parse_args()
    init_file = args.init
    save_file = args.save
    if save_file is None:
        timestamp = strftime("%Y-%m-%d_%H:%M", gmtime())
        out_name = timestamp + '_model.h5'
        save_file = os.path.join(out_name)

    if init_file is None:
        model = model_NVIDIA.define_NVIDIA()
        len(model.layers)
    else:
        print("Loading Model")
        model = mutils.load_net(init_file)
        if model is None:
            print('Could not find right model. Exiting')
            exit

    samples = []
    data_dir = ['../BehavClone_training'
                ]  ## '../BehavClone_training']#, './'];
    for training_dir in data_dir:
        if not os.path.isdir(training_dir):
            print("data directory doesn't exist")

        csv_file = os.path.join(training_dir, 'driving_log.csv')
        if not os.path.isfile(csv_file):
            print("Could not find CSV file")

        image_dir = os.path.join(training_dir, 'IMG')
        if not os.path.isdir(image_dir):
            print("Could not find image directory")

        print(csv_file)
        with open(csv_file) as csvfile:
            reader = csv.reader(csvfile)
            for line in reader:
                samples.append(line)

    train_samples, validation_samples = train_test_split(samples,
                                                         test_size=0.2)

    model.compile(loss='mean_squared_error', optimizer=Adam(lr=1e-3))
    model_checkpoint = ModelCheckpoint(
        filepath='model.weights.{epoch:02d}-{val_loss:.5f}.h5',
        verbose=1,
        save_best_only=True,
        save_weights_only=True,
        period=20)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=5,
                                   verbose=1)
    reduce_LR_plateau = ReduceLROnPlateau(factor=0.1,
                                          patience=5,
                                          verbose=1,
                                          epsilon=1e-5)
    tensorboard_log = TensorBoard(log_dir='./tb_logs',
                                  histogram_freq=0,
                                  write_graph=True,
                                  write_images=False)

    batch_size = 15
    augmentation_factor = (3 * 2) * 2  # (Left+right+center) * flip
    samples_per_epoch = len(train_samples) * augmentation_factor
    nb_val_samples = len(validation_samples) * augmentation_factor
    n_epoch = 30
    fit = model.fit_generator(generator(train_samples, batch_size),
                              samples_per_epoch=samples_per_epoch,
                              nb_epoch=n_epoch,
                              verbose=2,
                              callbacks=[
                                  model_checkpoint, early_stopping,
                                  reduce_LR_plateau, tensorboard_log
                              ],
                              validation_data=generator(
                                  validation_samples, batch_size),
                              nb_val_samples=nb_val_samples,
                              initial_epoch=0)
    print("Done Training")
    mutils.save_net(model, save_file)
    mutils.draw_net(model, 'model.png')
Example #14
0
import data_generator as dg
import pandas as pd
import cv2
input_file = './deepdrivedb/deepdrive/linux_recordings/2018-01-18__05-14-48PM'
output_file = '.'


files = dg.get_hdf5_file_names(input_file)


gen = dg.generator(files)
print(gen())

df = pd.DataFrame(columns=('name', 'steering'))

print('Starting time')
import time
t = time.time()
index = 0
for tpl in gen():

    name = str(index) + '.png'
    cv2.imwrite('images/' + name, tpl[0])
    df.loc[index] = [name, tpl[1]]
    print(tpl[1])
    index += 1

print('Process time: ' + str(time.time() - t))
df.to_csv('a.cvs', index=False)
Example #15
0
    def train(self, batch_size=128, epochs=100000, keep_prob=0.8):
        '''

        Defines the variables necessary for training then begins training


        Keyword arguments:

        generator -- a data_generator object with an implementation of get_batch, as seen

                          in the data_generator.py module

        batch_size -- the number of samples to be used in each training batch. Keep memory

                      constraints in mind

        epochs -- the number of epochs to be used for training

        keep_prob -- the keep probability of the dropout layer to be used for training

        '''
        #self.cross_entropy = tf.reduce_sum(tf.square(self.y_hat-self.y))
        self.lr = 1e-5
        self.cross_entropy = tf.reduce_mean(
            tf.nn.softmax_cross_entropy_with_logits_v2(labels=self.y,
                                                       logits=self.fc))
        self.train_step = tf.train.AdamOptimizer(self.lr).minimize(
            self.cross_entropy)

        #params = tf.trainable_variables()
        #opt = tf.train.AdamOptimizer(learning_rate=0.001)
        #gradients = tf.gradients(self.cross_entropy, params)
        #clipped_gradients, norm = tf.clip_by_global_norm(gradients,5)
        #train_op = opt.apply_gradients(zip(clipped_gradients, params))

        self.correct_prediction = tf.equal(tf.argmax(self.y_hat, 1),
                                           tf.argmax(self.y, 1))
        self.accuracy = tf.reduce_mean(
            tf.cast(self.correct_prediction, tf.float32))

        # Creates a saver object to generate checkpoints during training. This one also saves

        # the gradients and the increment of the Adam Optimizer.

        ckptPath = self.ckptPath
        modelPath = self.modelPath
        self.saver = tf.train.Saver()

        l, acc, valid_acc = [], [], []
        g = generator('./trainset.csv', batch_size)
        v = generator('./validset.csv', batch_size)
        with tf.Session() as sess:
            if os.path.isdir('trainModel'):
                self.saver.restore(sess, ckptPath)

            else:
                sess.run(tf.global_variables_initializer())

            for i in range(epochs):
                bag = next(g)
                images, labels = bag[0], bag[1]

                label = (np.arange(
                    self.numclass) == labels[:, None]).astype('float32')

                #logit = self.y_hat.eval(feed_dict = {self.x:images,self.keep_prob:1.0})

                loss = self.cross_entropy.eval(feed_dict={
                    self.x: images,
                    self.y: label,
                    self.keep_prob: 1.0
                })
                train_accuracy = self.accuracy.eval(feed_dict={
                    self.x: images,
                    self.y: label,
                    self.keep_prob: 1.0
                })

                #print('step %d, loss %3f, training accuracy %.3f' % (i, loss, train_accuracy))

                if i % 50 == 0:
                    #获取权重和偏置
                    #tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,scope)
                    #tf.get_default_graph().get_tensor_by_name(variable_name)
                    print('step %d, loss %3f, training accuracy %.3f' %
                          (i, loss, train_accuracy))
                    l.append(loss)
                    acc.append(train_accuracy)

                if i % 20000 == 0 and i != 0:
                    self.lr *= 0.1

                if i % 1407 == 0:
                    valid_bag = next(v)
                    valid_imgs, valid_labels = valid_bag[0], valid_bag[1]
                    valid_label = (np.arange(
                        self.numclass) == valid_labels[:,
                                                       None]).astype('float32')

                    valid_accuracy = self.accuracy.eval(
                        feed_dict={
                            self.x: valid_imgs,
                            self.y: valid_label,
                            self.keep_prob: 1.0
                        })
                    valid_acc.append(valid_accuracy)
                    print('step %d, validing accuracy %.3f' %
                          (i, valid_accuracy))
                    self.saver.save(sess, save_path=ckptPath)

                self.train_step.run(feed_dict={
                    self.x: images,
                    self.y: label,
                    self.keep_prob: 1.0
                })

            self.model_saver.save(sess, save_path=modelPath)
            np.save('loss.npy', l)
            np.save('train_acc.npy', acc)
            np.save('valid_acc.npy', valid_acc)
        layer.trainable = False

    # compile the model (should be done *after* setting layers to non-trainable)
    model.compile(optimizer='rmsprop',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # data for trainingand validation
    # Split the lines into training and validation samples (20% for validation set)
    train_samples, validation_samples = dg.train_test_split(
        dg.read_gt_data(13356), test_size=0.2)
    print('train samples len', len(train_samples))

    # Set the traing and validation data generators
    batch_size = 32
    train_generator = dg.generator(train_samples, batch_size=batch_size)
    validation_generator = dg.generator(validation_samples,
                                        batch_size=batch_size)

    # train the model on the new data for a few epochs
    history_object = model.fit_generator(
        train_generator,
        steps_per_epoch=len(train_samples) / batch_size,
        validation_data=validation_generator,
        validation_steps=len(validation_samples) / batch_size,
        epochs=5,
        verbose=2)

    #model.fit_generator(X_train, Y_train, epochs=50,shuffle=True, verbose=2)

    # at this point, the top layers are well trained and we can start fine-tuning
Example #17
0
def test():
    m=3
    df=generator(m)
    assert isinstance(df, pd.DataFrame)
    assert df.shape[1]==3
    assert df.iloc[-1]['id']==m-1
Example #18
0
    image_dir = os.path.join(training_dir, 'IMG')
    if not os.path.isdir(image_dir):
        print("Could not find image directory")

    print(csv_file)
    with open(csv_file) as csvfile:
        reader = csv.reader(csvfile)
        for line in reader:
            samples.append(line)

train_samples, validation_samples = train_test_split(samples,
                                                     test_size=0.2,
                                                     random_state=1200)
train_samples = train_samples[5:6]
train_generator = generator(train_samples, batch_size=1, drop_prob=0)
plt.figure(figsize=(6, 2))
print(train_samples)
plt.xticks([], [])
plt.yticks([], [])
plt.subplot(1, 3, 1)
plt.imshow(
    cv2.cvtColor(cv2.imread(train_samples[0][1].strip()), cv2.COLOR_BGR2RGB))
s = float(train_samples[0][3]) + 0.2
plt.title('Left: {}'.format(str(s)))
plt.xticks([], [])
plt.yticks([], [])
plt.subplot(1, 3, 2)
plt.imshow(
    cv2.cvtColor(cv2.imread(train_samples[0][0].strip()), cv2.COLOR_BGR2RGB))
s = s - 0.2