Exemplo n.º 1
0
        samples = []
        for tub in tubs:
            num_records = tub.get_num_records()
            for iRec in range(0, num_records):
                json_data = tub.get_json_record(iRec)
                sample = json_data[record]
                samples.append(float(sample))

        plt.hist(samples, 50)
        plt.xlabel(record)
        plt.show()


if __name__ == '__main__':
    args = docopt(__doc__)
    cfg = dk.load_config()
    
    if args['drive']:
        drive(cfg, model_path = args['--model'], use_joystick=args['--js'], mode = args['--mode'])
    
    elif args['calibrate']:
        calibrate()
    
    elif args['train']:
        tub = args['--tub']
        model = args['--model']
        mode=args['--mode']
        train(cfg, tub, model, mode)

    elif args['check']:
        tub = args['--tub']
Exemplo n.º 2
0
            print(
                "You can now go to <your hostname.local>:%d to drive your car."
                % cfg.WEB_CONTROL_PORT)
    elif isinstance(ctr, JoystickController):
        print("You can now move your joystick to drive your car.")
        ctr.set_tub(tub_writer.tub)
        ctr.print_controls()

    #run the vehicle for 20 seconds
    print(
        '11111111111111111111111111111111111111111111111111111111111111111111111111111111111'
    )
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)


if __name__ == '__main__':
    args = docopt(__doc__)
    cfg = dk.load_config(myconfig=args['--myconfig'])

    if args['drive']:
        model_type = args['--type']
        camera_type = args['--camera']
        drive(cfg,
              model_path=args['--model'],
              use_joystick=args['--js'],
              model_type=model_type,
              camera_type=camera_type,
              meta=args['--meta'])
    elif args['train']:
        print('Use python train.py instead.\n')
Exemplo n.º 3
0
    import argparse
    from vae_model import KerasVAE
    from donkeycar.templates.train import collate_records, preprocessFileList

    parser = argparse.ArgumentParser(
        description='Generate training data for a WorldModels RNN.',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('vae', help='Path to the weights of a trained VAE.')
    parser.add_argument('file',
                        nargs='+',
                        help='Text file with a list of tubs to preprocess.')

    args = parser.parse_args()

    try:
        cfg = dk.load_config()
    except FileNotFoundError:
        cfg = dk.load_config("config.py")  # retry in the current directory
    tub_names = preprocessFileList(args.file)

    input_shape = (cfg.IMAGE_W, cfg.IMAGE_H, cfg.IMAGE_DEPTH)
    z_dim, dropout, aux = model_meta(args.vae)
    if aux is None:
        aux = 0

    kl = KerasVAE(input_shape=input_shape,
                  z_dim=z_dim,
                  aux=aux,
                  dropout=dropout)
    kl.set_weights(args.vae)
    kl.compile()
Exemplo n.º 4
0
    def run(self, args, parser):
        '''
        Load the images from a tub and create a movie from them.
        Movie
        '''

        if args.tub is None:
            print("ERR>> --tub argument missing.")
            parser.print_help()
            return

        conf = os.path.expanduser(args.config)
        if not os.path.exists(conf):
            print("No config file at location: %s. Add --config to specify\
                 location or run from dir containing config.py." % conf)
            return

        self.cfg = dk.load_config(conf)

        if args.type is None and args.model is not None:
            args.type = self.cfg.DEFAULT_MODEL_TYPE
            print(
                "Model type not provided. Using default model type from config file"
            )

        if args.salient:
            if args.model is None:
                print(
                    "ERR>> salient visualization requires a model. Pass with the --model arg."
                )
                parser.print_help()

            if args.type not in ['linear', 'categorical']:
                print(
                    "Model type {} is not supported. Only linear or categorical is supported for salient visualization"
                    .format(args.type))
                parser.print_help()
                return

        self.tub = Tub(args.tub)
        self.index = self.tub.get_index(shuffled=False)
        start = args.start
        self.end = args.end if args.end != -1 else len(self.index)
        if self.end >= len(self.index):
            self.end = len(self.index) - 1
        num_frames = self.end - start
        self.iRec = start
        self.scale = args.scale
        self.keras_part = None
        self.do_salient = False
        if args.model is not None:
            self.keras_part = get_model_by_type(args.type, cfg=self.cfg)
            self.keras_part.load(args.model)
            self.keras_part.compile()
            if args.salient:
                self.do_salient = self.init_salient(self.keras_part.model)

        print('making movie', args.out, 'from', num_frames, 'images')
        clip = mpy.VideoClip(self.make_frame,
                             duration=((num_frames - 1) /
                                       self.cfg.DRIVE_LOOP_HZ))
        clip.write_videofile(args.out, fps=self.cfg.DRIVE_LOOP_HZ)
Exemplo n.º 5
0
def test_config():
    path = default_template(d2_path(gettempdir()))
    cfg = dk.load_config(os.path.join(path, 'config.py'))
    assert (cfg != None)
Exemplo n.º 6
0
"""
import os
import time
import math
from docopt import docopt
import donkeycar as dk

from donkeycar.parts.cv import CvImageView, ImgBGR2RGB, ImgRGB2BGR, ImageScale, ImgWriter
from donkeycar.parts.salient import SalientVis
from donkeycar.parts.network import ZMQValueSub, UDPValueSub, TCPClientValue
from donkeycar.parts.transform import Lambda
from donkeycar.parts.image import JpgToImgArr

V = dk.vehicle.Vehicle()
args = docopt(__doc__)
cfg = dk.load_config(args['--config'])

model_path = args['--model']
model_type = args['--type']
ip = args['--ip']

if model_type is None:
    model_type = "categorical"

model = dk.utils.get_model_by_type(model_type, cfg)
model.load(model_path)

V.add(TCPClientValue(name="camera", host=ip), outputs=["packet"])
V.add(JpgToImgArr(), inputs=["packet"], outputs=["img"])
V.add(ImgBGR2RGB(), inputs=["img"], outputs=["img"])
V.add(SalientVis(model), inputs=["img"], outputs=["img"])
Exemplo n.º 7
0
def MAIN():
    cfg = dk.load_config()
    CFG = cfg

    parts = []

    # 1. power train
    steering_controller = PCA9685(cfg.STEERING_CHANNEL,
                                  cfg.PCA9685_I2C_ADDR,
                                  busnum=cfg.PCA9685_I2C_BUSNUM)
    steering = PWMSteering(controller=steering_controller,
                           left_pulse=cfg.STEERING_LEFT_PWM,
                           right_pulse=cfg.STEERING_RIGHT_PWM)

    throttle_controller = PCA9685(cfg.THROTTLE_CHANNEL,
                                  cfg.PCA9685_I2C_ADDR,
                                  busnum=cfg.PCA9685_I2C_BUSNUM)
    throttle = PWMThrottle(controller=throttle_controller,
                           max_pulse=cfg.THROTTLE_FORWARD_PWM,
                           zero_pulse=cfg.THROTTLE_STOPPED_PWM,
                           min_pulse=cfg.THROTTLE_REVERSE_PWM)

    # 2. driver
    drivers = []
    ctr = get_js_controller(cfg)

    # 3. sensors
    cam = PiCamera(image_w=cfg.IMAGE_W,
                   image_h=cfg.IMAGE_H,
                   image_d=cfg.IMAGE_DEPTH)

    # 4. vehicle run configurations
    run_params = {'DRIVE_LOOP_HZ': 20, 'MAX_LOOPS': None}

    #### SHOULD END HERE
    """
    V = dk.vehicle.Vehicle(
      cfg
    )
    """

    parts = [
        {
            'part': throttle,
            'inputs': ['throttle'],
            'outputs': [],
            'threaded': False
        },
        {
            'part': steering,
            'inputs': ['angle'],
            'outputs': [],
            'threaded': False
        },
        {
            'part': cam,
            'inputs': [],
            'outputs': ['cam/image_array'],
            'threaded': True
        },
        {
            'part': ctr,
            'inputs': [],
            'outputs': ['angle', 'throttle', 'user/mode', 'recording'],
            'threaded': True
        },
    ]
    """
    V.add(throttle, inputs=['throttle'], outputs=[])
    V.add(steering, inputs=['angle'], outputs=[])
    V.add(
        ctr, 
        inputs=[],
        outputs=['angle', 'throttle', 'user/mode', 'recording'],
        threaded=True
    )
    """
    #V.add(cam, outputs=['cam/image_array'], threaded=True)

    return parts, CFG
Exemplo n.º 8
0
    def run(self, args):
        '''
        Load the images from a tub and create a movie from them.
        Movie
        '''
        import moviepy.editor as mpy

        args, parser = self.parse_args(args)

        if args.tub is None:
            print("ERR>> --tub argument missing.")
            parser.print_help()
            return

        if args.type is None and args.model is not None:
            print(
                "ERR>> --type argument missing. Required when providing a model."
            )
            parser.print_help()
            return

        if args.salient:
            if args.model is None:
                print(
                    "ERR>> salient visualization requires a model. Pass with the --model arg."
                )
                parser.print_help()
                return

            # imported like this, we make TF conditional on use of --salient
            # and we keep the context maintained throughout our callbacks to
            # compute the salient mask
            from tensorflow.python.keras import backend as K
            import tensorflow as tf
            os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

        conf = os.path.expanduser(args.config)

        if not os.path.exists(conf):
            print("No config file at location: %s. Add --config to specify\
                 location or run from dir containing config.py." % conf)
            return

        self.cfg = dk.load_config(conf)
        self.tub = Tub(args.tub)
        self.index = self.tub.get_index(shuffled=False)
        start = args.start
        self.end = args.end if args.end != -1 else len(self.index)
        if self.end >= len(self.index):
            self.end = len(self.index) - 1
        num_frames = self.end - start
        self.iRec = start
        self.scale = args.scale
        self.keras_part = None
        self.convolution_part = None
        if args.model is not None:
            self.keras_part = get_model_by_type(args.type, cfg=self.cfg)
            self.keras_part.load(args.model)
            self.keras_part.compile()
            if args.salient:
                self.init_salient(self.keras_part.model)

                #This method nested in this way to take the conditional import of TF
                #in a manner that extends to this callback. Done this way, we avoid
                #importing in the below method, which triggers a new cuda device allocation
                #each call.
                def compute_visualisation_mask(img):
                    #from https://github.com/ermolenkodev/keras-salient-object-visualisation

                    activations = self.functor([np.array([img])])
                    activations = [
                        np.reshape(
                            img, (1, img.shape[0], img.shape[1], img.shape[2]))
                    ] + activations
                    upscaled_activation = np.ones((3, 6))
                    for layer in [5, 4, 3, 2, 1]:
                        averaged_activation = np.mean(
                            activations[layer],
                            axis=3).squeeze(axis=0) * upscaled_activation
                        output_shape = (activations[layer - 1].shape[1],
                                        activations[layer - 1].shape[2])
                        x = tf.constant(
                            np.reshape(averaged_activation,
                                       (1, averaged_activation.shape[0],
                                        averaged_activation.shape[1], 1)),
                            tf.float32)
                        conv = tf.nn.conv2d_transpose(
                            x,
                            self.layers_kernels[layer],
                            output_shape=(1, output_shape[0], output_shape[1],
                                          1),
                            strides=self.layers_strides[layer],
                            padding='VALID')
                        with tf.Session() as session:
                            result = session.run(conv)
                        upscaled_activation = np.reshape(result, output_shape)
                    final_visualisation_mask = upscaled_activation
                    return (final_visualisation_mask -
                            np.min(final_visualisation_mask)) / (
                                np.max(final_visualisation_mask) -
                                np.min(final_visualisation_mask))

                self.compute_visualisation_mask = compute_visualisation_mask

        print('making movie', args.out, 'from', num_frames, 'images')
        clip = mpy.VideoClip(self.make_frame,
                             duration=((num_frames - 1) /
                                       self.cfg.DRIVE_LOOP_HZ))
        clip.write_videofile(args.out, fps=self.cfg.DRIVE_LOOP_HZ)
Exemplo n.º 9
0
    def run(self, args, parser):
        '''
        Load the images from a tub and create a movie from them.
        Movie
        '''
        global cfg

        if args.tub is None:
            print("ERR>> --tub argument missing.")
            parser.print_help()
            return

        conf = os.path.expanduser(args.config)
        if not os.path.exists(conf):
            print("No config file at location: %s. Add --config to specify\
                 location or run from dir containing config.py." % conf)
            return

        cfg = dk.load_config(conf)

        if args.type is None and args.model is not None:
            args.type = cfg.DEFAULT_MODEL_TYPE
            print(
                "Model type not provided. Using default model type from config file"
            )

        if args.salient:
            if args.model is None:
                print(
                    "ERR>> salient visualization requires a model. Pass with the --model arg."
                )
                parser.print_help()

            #if args.type not in ['linear', 'categorical']:
            #    print("Model type {} is not supported. Only linear or categorical is supported for salient visualization".format(args.type))
            #    parser.print_help()
            #    return

        self.tub = Tub(args.tub)

        start = args.start
        self.end_index = args.end if args.end != -1 else len(self.tub)
        num_frames = self.end_index - start

        # Move to the correct offset
        self.current = 0
        self.iterator = self.tub.__iter__()
        while self.current < start:
            self.iterator.next()
            self.current += 1

        self.scale = args.scale
        self.keras_part = None
        self.do_salient = False
        self.user = args.draw_user_input
        self.pilot_angle = 0.0
        self.pilot_throttle = 0.0
        self.pilot_score = 1.0  # used for color intensity
        self.user_angle = 0.0
        self.user_throttle = 0.0
        self.control_score = 0.25  # used for control size
        self.flag_test_pilot_angle = 1
        self.flag_test_pilot_throttle = 1
        self.flag_test_user_angle = 1
        self.flag_test_user_throttle = 1
        self.throttle_circle_pilot_angle = 0
        self.throttle_circle_user_angle = 0
        self.is_test = False
        self.last_pilot_throttle = 0.0  # used for color transparency
        self.last_user_throttle = 0.0  # used for color transparency
        self.pilot_throttle_trans = 1.0  # used for color transparency
        self.user_throttle_trans = 1.0  # used for color transparency
        self.pilot_throttle_trans_rate = 0.25  # used for color transparency
        self.user_throttle_trans_rate = 0.25  # used for color transparency

        if args.model is not None:
            self.keras_part = get_model_by_type(args.type, cfg=cfg)
            self.keras_part.load(args.model)
            if args.salient:
                self.do_salient = self.init_salient(
                    self.keras_part.interpreter.model)

        print('making movie', args.out, 'from', num_frames, 'images')
        clip = mpy.VideoClip(self.make_frame,
                             duration=((num_frames - 1) / cfg.DRIVE_LOOP_HZ))
        clip.write_videofile(args.out, fps=cfg.DRIVE_LOOP_HZ)
Exemplo n.º 10
0
    steps_per_epoch = total_train // cfg.BATCH_SIZE
    print('steps_per_epoch', steps_per_epoch)

    kl.train(train_gen,
             val_gen,
             saved_model_path=model_path,
             steps=steps_per_epoch,
             train_split=cfg.TRAIN_TEST_SPLIT)





if __name__ == '__main__':
    args = docopt(__doc__)
    cfg = dk.load_config()
    try:
        if args['drive']:
            drive(cfg, model_path = args['--model'], use_joystick=args['--js'])
    
        elif args['train']:
            tub = args['--tub']
            model = args['--model']
            cache = not args['--no_cache']
            train(cfg, tub, model)
    except (KeyboardInterrupt,SystemExit):
        GPIO.clean_up()
        raise
    

Exemplo n.º 11
0
def default_categorical(input_shape=(120, 160, 3), roi_crop=(0, 0)):

    opt = keras.optimizers.Adam()
    drop = 0.2
    cfg = dk.load_config()

    #we now expect that cropping done elsewhere. we will adjust our expeected image size here:
    input_shape = adjust_input_shape(input_shape, roi_crop)

    img_in = Input(
        shape=input_shape, name='img_in'
    )  # First layer, input layer, Shape comes from camera.py resolution, RGB
    x = img_in
    x = Convolution2D(
        24, (5, 5), strides=(2, 2), activation='relu', name="conv2d_1"
    )(
        x
    )  # 24 features, 5 pixel x 5 pixel kernel (convolution, feauture) window, 2wx2h stride, relu activation
    x = Dropout(drop)(
        x
    )  # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
    x = Convolution2D(
        32, (5, 5), strides=(2, 2), activation='relu', name="conv2d_2"
    )(x)  # 32 features, 5px5p kernel window, 2wx2h stride, relu activatiion
    x = Dropout(drop)(
        x
    )  # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
    if input_shape[0] > 32:
        x = Convolution2D(
            64, (5, 5), strides=(2, 2), activation='relu', name="conv2d_3")(
                x)  # 64 features, 5px5p kernal window, 2wx2h stride, relu
    else:
        x = Convolution2D(
            64, (3, 3), strides=(1, 1), activation='relu', name="conv2d_3")(
                x)  # 64 features, 5px5p kernal window, 2wx2h stride, relu
    if input_shape[0] > 64:
        x = Convolution2D(
            64, (3, 3), strides=(2, 2), activation='relu', name="conv2d_4")(
                x)  # 64 features, 3px3p kernal window, 2wx2h stride, relu
    elif input_shape[0] > 32:
        x = Convolution2D(
            64, (3, 3), strides=(1, 1), activation='relu', name="conv2d_4")(
                x)  # 64 features, 3px3p kernal window, 2wx2h stride, relu
    x = Dropout(drop)(
        x
    )  # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
    x = Convolution2D(
        64, (3, 3), strides=(1, 1), activation='relu', name="conv2d_5")(
            x)  # 64 features, 3px3p kernal window, 1wx1h stride, relu
    x = Dropout(drop)(
        x
    )  # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
    # Possibly add MaxPooling (will make it less sensitive to position in image).  Camera angle fixed, so may not to be needed

    # x = Flatten(name='flattened')(x)
    a, b, c, d = x.shape  # returns dimension
    a = b * c * d
    x = Permute([1, 2, 3])(x)
    x = Reshape((int(a), ))(
        x
    )  # convert dim -> int                                        # Flatten to 1D (Fully connected)
    x = Dense(100, activation='relu', name="fc_1")(
        x)  # Classify the data into 100 features, make all negatives 0
    x = Dropout(drop)(
        x
    )  # Randomly drop out (turn off) 10% of the neurons (Prevent overfitting)
    x = Dense(50, activation='relu', name="fc_2")(
        x)  # Classify the data into 50 features, make all negatives 0
    x = Dropout(drop)(
        x)  # Randomly drop out 10% of the neurons (Prevent overfitting)
    #categorical output of the angle
    if cfg.DEFAULT_MODEL_TYPE == 'copter':

        angle_out = Dense(4, activation='softmax', name='angle_out')(x)
        model = Model(inputs=[img_in], outputs=[angle_out])

    else:

        angle_out = Dense(15, activation='softmax', name='angle_out')(
            x
        )  # Connect every input with every output and output 15 hidden units. Use Softmax to give percentage. 15 categories and find best one based off percentage 0.0-1.0
        throttle_out = Dense(20, activation='softmax', name='throttle_out')(
            x)  # Reduce to 1 number, Positive number only

        model = Model(inputs=[img_in], outputs=[angle_out, throttle_out])

    return model
Exemplo n.º 12
0
    sample_vae(kl, dirs, count)


if __name__ == "__main__":
    args = docopt(__doc__)

    if args['--meta']:
        z, dropout, aux = model_meta(args['<model>'])
        print("Z_dim: {}".format(z))
        print("  Aux: {}".format(aux))
        print(" Drop: {}".format(dropout))
        exit()

    try:
        # This will look in the directory containing this script, not necessarily the current dir
        cfg = dk.load_config()
    except FileNotFoundError:
        cfg = dk.load_config("config.py")

    tub = args['--tub']
    model = args['<model>']
    model_type = args['--type']
    count = int(args['--count'])
    z_dim = int(args['--z_dim'])
    z, dropout, aux = model_meta(model)
    if z is not None:
        z_dim = z
    if aux is None:
        aux = 0

    print("Model meta: {} auxiliary outputs, z_dim {}, dropout {}".format(
    th = TubHandler(path=cfg.DATA_PATH)
    tub = th.new_tub_writer(inputs=inputs, types=types, user_meta=meta)
    V.add(tub, inputs=inputs, outputs=["tub/num_records"], run_condition='recording')


###########################################################3yy
# 8) V.start (Main Loop)
###########################################################

    #V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=cfg.MAX_LOOPS)
    V.start(rate_hz=cfg.DRIVE_LOOP_HZ, max_loop_count=maxloop)


#
#
# main program
#

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_path',help='.h5 nvidia model path')
    parser.add_argument('--myconfig', default='myconfig.py', help='configuration file')
    parser.add_argument('--speed', type=int, default=12, help='speed percentage')
    parser.add_argument('--maxloop', type=int, default=200, help='max loop coount')

    args = parser.parse_args()
    print(args)

    cfg = dk.load_config(myconfig=args.myconfig)
    drive(cfg,args)
Exemplo n.º 14
0
import donkeycar as dk
from donkeycar.vehicle import Vehicle
from lanedetection import LanesDetector

cfg = dk.load_config(config_path='./config.py')

print(cfg.LD_PARAMETERS["videofile_in"])

V = Vehicle()
laneDetector = LanesDetector(cfg.CAMERA_ID)

V.add(laneDetector)

V.start()