Esempio n. 1
0
    def __recv_message(self):
        message = self.__client.recvMessage()
        image = frame2numpy(message['frame'], (self.__frame_size[0], self.__frame_size[1]))
        detect = dict()
        detect['peds'] = message['peds']
        detect['vehicles'] = message['vehicles']
        recv_labels = [message['steering'],
                           message['throttle'], message['brake'], message['speed'], message['time']]

        return message, self.__process_image(image= image), recv_labels, detect
Esempio n. 2
0
def load_batches_numeric(verbose=1, samples_per_batch=100):
    # Generator for loading batches of frames

    print("Loading dataset file...")
    dataset = gzip.open(dataset_path)
    print("Finished loading dataset file.")

    batch_count = 0
    abandon = 60  # 처음 버릴 frame 수
    abandoned = False
    # 시작시 끊김 현상, 상/하단부 메시지 등 ... 때문에

    while True:
        try:
            x_train = []
            y_train = []
            x_test = []
            y_test = []
            count = 0

            print('----------- On Batch: ' + str(batch_count) + ' -----------')
            while count < samples_per_batch:
                    data_dct = pickle.load(dataset)  # 참고: pickle.load() 는 파일에서 한 줄씩 읽어온다.

                    if batch_count == 0 and abandoned is False and count < abandon:  # abandon 만큼 첫 frame 은 버린다.
                        count += 1
                        if count == abandon:
                            count = 0
                            abandoned = True
                        continue

                    image = frame2numpy(data_dct['frame'], (800, 600))

                    steering = data_dct['steering']

                    # Train test split
                    # TODO: Dynamic train test split | Test series at end of batch
                    if (count % 10) != 0:  # Train
                        x_train.append(image)
                        y_train.append(steering)
                    else:  # Test
                        x_test.append(image)
                        y_test.append(steering)
                    
                    count += 1
                    if (count % 50) == 0 and verbose == 1:
                        print('     ' + str(count) + ' data points loaded in batch.')
            print('Batch loaded.')
            batch_count += 1
            yield x_train, y_train, x_test, y_test
        except EOFError:  # Breaks at end of file
            break
Esempio n. 3
0
    def work(self):
        """
        Pretend this worker method does work that takes a long time. During this time, the thread's
        event loop is blocked, except if the application's processEvents() is called: this gives every
        thread (incl. main) a chance to process events, which in this sample means processing signals
        received from GUI (such as abort).
        """
        thread_name = QThread.currentThread().objectName()
        thread_id = int(
            QThread.currentThreadId())  # cast to int() is necessary
        self.sig_msg.emit('Running worker #{} from thread "{}" (#{})'.format(
            self.__id, thread_name, thread_id))

        # Creates a new connection to DeepGTAV using the specified ip and port.
        # If desired, a dataset path and compression level can be set to store in memory all the data received in a gziped pickle file.
        # We don't want to save a dataset in this case
        self.client = Client(ip=self.args.host, port=self.args.port)
        # self.client = Client(ip="127.0.0.1", port=8000)

        # We set the scenario to be in manual driving, and everything else random (time, weather and location).
        # See deepgtav/messages.py to see what options are supported
        scenario = Scenario(drivingMode=-1)  #manual driving

        # Send the Start request to DeepGTAV. Dataset is set as default, we only receive frames at 10Hz (320, 160)
        self.client.sendMessage(Start(scenario=scenario))

        # Dummy agent
        model = Model()

        # Start listening for messages coming from DeepGTAV. We do it for 80 hours
        stoptime = time.time() + 80 * 3600
        while (time.time() < stoptime and (not self.__abort)):
            # We receive a message as a Python dictionary
            app.processEvents()
            message = self.client.recvMessage()

            # The frame is a numpy array that can we pass through a CNN for example
            image = frame2numpy(message['frame'], (320, 160))
            commands = model.run(image)
            self.sig_step.emit(self.__id, 'step ' + str(time.time()))
            self.sig_image.emit(image.tolist())
            # We send the commands predicted by the agent back to DeepGTAV to control the vehicle
            self.client.sendMessage(
                Commands(commands[0], commands[1], commands[2]))

        # We tell DeepGTAV to stop
        self.client.sendMessage(Stop())
        self.client.close()

        self.sig_done.emit(self.__id)
def get_mean_std():
    means = []
    stds = []

    for i in tqdm(range(493504)):

        data_dct = pickle.load(dataset)

        frame = frame2numpy(data_dct['frame'], (350,205+20))[20:]
        frame = totensor(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).data.numpy()

        mean = np.mean(frame, axis=(1, 2))
        std = np.std(frame, axis=(1, 2))

        means.append(mean)
        stds.append(std)

    mean = np.mean(np.stack(means), axis=0)
    std = np.mean(np.stack(stds), axis=0)

    return mean, std
def load_batches(verbose=1, samples_per_batch=1000):
    ''' Generator for loading batches of frames'''
    dataset = gzip.open('dataset.pz')
    batch_count = 0
    while True:
        try:
            x_train = []
            y_train = []
            x_test = []
            y_test = []
            count = 0
            print('----------- On Batch: ' + str(batch_count) + ' -----------')
            while count < samples_per_batch:
                data_dct = pickle.load(dataset)
                frame = data_dct['frame']
                image = frame2numpy(frame, (320, 160))
                image = ((image / 255) - .5) * 2  # Simple preprocessing

                # Train test split
                # TODO: Dynamic train test split | Test series at end of batch
                if (count % 5) != 0:  # Train
                    x_train.append(image)
                    # Steering in dict is between -1 and 1, scale to between 0 and 999 for categorical input
                    y_train.append(
                        int(float(data_dct['steering']) * 500) + 500)
                else:  # Test
                    x_test.append(image)
                    # Steering in dict is between -1 and 1, scale to between 0 and 999 for categorical input
                    y_test.append(int(float(data_dct['steering']) * 500) + 500)

                count += 1
                if (count % 250) == 0 and verbose == 1:
                    print('     ' + str(count) +
                          ' data points loaded in batch.')
            print('Batch loaded.')
            batch_count += 1
            yield x_train, y_train, x_test, y_test
        except EOFError:  # Breaks at end of file
            break
def split_dataset():

    for i in tqdm(range(total_num//seq_len)):
        data_point = {key:[] for key in keys}
        data_name = data_dir + 'data_%d'%i + '.gz'

        for j in range(seq_len):
            data_dct = pickle.load(dataset)
            for key in keys:
                if key != 'frame':
                    data_point[key].append(data_dct[key])

                elif key == 'frame':
                    frame = frame2numpy(data_dct[key], (350,205+20))[20:]
                    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                    data_point[key].append(transform(frame))

                else:
                    print('Encounter wrong key!')
                    raise KeyboardInterrupt

        for key in keys:
            if key == 'frame':
                data_point[key] = torch.stack(data_point[key])
            elif key != 'frame':
                data_point[key] = torch.FloatTensor(data_point[key])


        with gzip.open(data_name, 'wb', compresslevel=4) as f:
            pickle.dump(data_point, f)

        s = time.time()
        with gzip.open(data_name, 'rb') as f:
            pickle.load(f)
        e = time.time()
        print(e-s)
Esempio n. 7
0
def load_batches_category(verbose=1, samples_per_batch=1000):
    # Generator for loading batches of frames

    print("Loading dataset file...")
    dataset = gzip.open(dataset_path)
    print("Finished loading dataset file.")

    batch_count = 0
    abandon = 60  # 처음 버릴 frame 수
    abandoned = False
    # 시작시 끊김 현상, 상/하단부 메시지 등 ... 때문에

    while True:
        try:
            x_train = []
            y_train = []
            x_test = []
            y_test = []
            count = 0

            print('----------- On Batch: ' + str(batch_count) + ' -----------')
            while count < samples_per_batch:
                data_dct = pickle.load(dataset)  # 참고: pickle.load() 는 파일에서 한 줄씩 읽어온다.

                if batch_count == 0 and abandoned is False and count < abandon:  # abandon 만큼 첫 frame 은 버린다.
                    count += 1
                    if count == abandon:
                        count = 0
                        abandoned = True
                    continue

                # 아래 Simple preprocessing 은 그냥 down sizing 인듯 하다.
                # image normalization 을 하면 필요 없을듯 하다. <중요>
                image = frame2numpy(data_dct['frame'], (800, 600))
                # image = normalize(image)  # Normalization
                image = ((image / 255) - .5) * 2  # Simple preprocessing

                steering = data_dct['steering']


                # Train test split
                # TODO: Dynamic train test split | Test series at end of batch
                if (count % 10) != 0:  # Train
                # if count != 10:
                    x_train.append(image)
                    # Steering in dict is between -1 and 1, scale to between 0 and 999 for categorical input
                    # y_train.append(int(float(data_dct['steering']) * 500) + 500)  # for categorical
                    y_train.append(int(float(steering) * 500) + 500)  # for numeric
                else:  # Test
                    x_test.append(image)
                    # SantosNet 에서는 steering 을 1~1000 의 정수로 categorize 하였음.
                    # Steering in dict is between -1 and 1, scale to between 0 and 999 for categorical input
                    # y_test.append(int(float(data_dct['steering']) * 500) + 500)
                    y_test.append(int(float(steering) * 500) + 500)  # for numeric

                count += 1
                if (count % 250) == 0 and verbose == 1:
                    print('     ' + str(count) + ' data points loaded in batch.')
            print('Batch loaded.')
            batch_count += 1
            yield x_train, y_train, x_test, y_test
        except EOFError:  # Breaks at end of file
            break
Esempio n. 8
0
def main():

    global client, scenario
    client = Client(ip='localhost', port=8000)  # Default interface
    scenario = Scenario(
        weather='EXTRASUNNY',
        vehicle='blista',
        time=[12, 0],
        drivingMode=-1,
        location=[-2583.6708984375, 3501.88232421875, 12.7711820602417])
    client.sendMessage(Start(scenario=scenario, dataset=dataset))
    print("load deepGTAV successfully! \nbegin")

    # load yolo v3
    classes = yolo_util.read_coco_names('./files/coco/coco.names')
    num_classes = len(classes)
    input_tensor, output_tensors = yolo_util.read_pb_return_tensors(
        tf.get_default_graph(), "./files/trained_models/yolov3.pb",
        ["Placeholder:0", "concat_9:0", "mul_6:0"])
    print("load yolo v3 successfully!")

    with tf.Session() as sess:
        model = load_model("files/trained_models/main_model.h5")
        print("load main_model successfully!")
        while True:
            fo = open(config_position, "r")  # 配置1
            txt = fo.read()
            fo.close()
            if txt == '0':
                set_gamepad(-1, -1, 0)
                time.sleep(0.7)
                print('=====================end=====================')
                exit(0)
            elif txt == '1':
                message = client.recvMessage()
                frame = frame2numpy(message['frame'], (CAP_IMG_W, CAP_IMG_H))
                image_obj = Image.fromarray(frame)

                speed = message['speed']

                boxes, scores = sess.run(output_tensors,
                                         feed_dict={
                                             input_tensor:
                                             np.expand_dims(
                                                 yolo_img_process(frame),
                                                 axis=0)
                                         })
                boxes, scores, labels = yolo_util.cpu_nms(boxes,
                                                          scores,
                                                          num_classes,
                                                          score_thresh=0.4,
                                                          iou_thresh=0.1)
                image, warning = yolo_util.draw_boxes(image_obj,
                                                      boxes,
                                                      scores,
                                                      labels,
                                                      classes,
                                                      (IMAGE_H, IMAGE_W),
                                                      show=False)

                control, throttle, breakk = drive(model=model,
                                                  image=frame,
                                                  speed=speed,
                                                  warning=warning)

                print(warning)

                set_gamepad(control, throttle, breakk)
Esempio n. 9
0
    # We set the scenario to be in manual driving, and everything else random (time, weather and location).
    # See deepgtav/messages.py to see what options are supported
    scenario = Scenario(drivingMode=-1)  #manual driving

    # Send the Start request to DeepGTAV. Dataset is set as default, we only receive frames at 10Hz (320, 160)
    client.sendMessage(Start(scenario=scenario))

    # Dummy agent
    model = Model()

    # Start listening for messages coming from DeepGTAV. We do it for 80 hours
    stoptime = time.time() + 80 * 3600
    while time.time() < stoptime:
        try:
            # We receive a message as a Python dictionary
            message = client.recvMessage()
            print(message)

            # The frame is a numpy array that can we pass through a CNN for example
            image = frame2numpy(message['frame'], (320, 160))
            commands = model.run(image)
            # We send the commands predicted by the agent back to DeepGTAV to control the vehicle
            client.sendMessage(Commands(commands[0], commands[1], commands[2]))
        except KeyboardInterrupt:
            break

    # We tell DeepGTAV to stop
    client.sendMessage(Stop())
    client.close()
Esempio n. 10
0
    print("Processing %d images ... " % count)

    try:
        data_dct = pickle.load(dataset)
        steering = data_dct['steering']
        values.append(steering)

        # data augmentation
        # steering value 의 절대값이 0.0625 이상이면 augmentation을 한다.
        # [원본 이미지 - steering] 과 +- 0.00001, 좌우 반전을 하여 6배로 부풀린다.
        if abs(steering) > 0.0625:
            augmented_count += 5

            # augment data
            image = data_dct['frame']
            image = frame2numpy(data_dct['frame'], (800, 600))
            pickle.dump(data_dct,
                        pickleFile)  # dump original image and steering value

            original_steering = data_dct['steering']

            data_dct['steering'] = original_steering - 0.00001
            pickle.dump(
                data_dct, pickleFile
            )  # dump original image with distorted steering value 1
            values.append(data_dct['steering'])

            data_dct['steering'] = original_steering + 0.00001
            pickle.dump(
                data_dct, pickleFile
            )  # dump original image with distorted steering value 2
Esempio n. 11
0
filename = 'data.txt'

dir = 'training'
#dir = 'validation'
#dir = 'test'

data = open(path.join(DATA_PATH, dir, filename), 'w')
images_counter = 0

while True:
    try:
        data_dict = pickle.load(file)  # Iterates through pickle generator
        if (len(data_dict['vehicles']) > 0):
            #Extract frame
            frame = data_dict['frame']
            image = frame2numpy(frame, (1920, 1080))
            frames_path = path.join(DATA_PATH, dir, 'frames')
            filename = format(
                images_counter,
                '06') + '.jpg'  #saves images as six digit integer .jpg
            cv2.imwrite(path.join(frames_path, filename), image)

            #Extract vehicles info
            data.write(filename + ', ')
            vehicles_info = str(data_dict['vehicles'])
            result = re.findall(r'[\d\.]{2,}',
                                vehicles_info)  #removes non numeric values
            #print(str(result) + '\n')
            count = 1  #counter of vehicle info
            for val in result[:-1]:
                if (
Esempio n. 12
0
def main():
    # load yolo v3
    classes = yolo_util.read_coco_names('./files/coco/coco.names')
    num_classes = len(classes)
    input_tensor, output_tensors = yolo_util.read_pb_return_tensors(
        tf.get_default_graph(), "./files/trained_models/yolov3.pb",
        ["Placeholder:0", "concat_9:0", "mul_6:0"])
    print("load yolo v3 successfully!")

    with tf.Session() as sess:
        model = load_model("files/trained_models/main_model.h5")
        print("load main_model successfully!")

        while True:

            try:
                data_dict = pickle.load(data_path)  # 读取数据中的每一帧
                speed = data_dict['speed']
                frame = data_dict['frame']
                frame = frame2numpy(frame, (CAP_IMG_W, CAP_IMG_H))
                frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
                image = Image.fromarray(frame)
            except EOFError:
                print("===========end=============")
                exit(0)

            boxes, scores = sess.run(output_tensors,
                                     feed_dict={
                                         input_tensor:
                                         np.expand_dims(
                                             yolo_img_process(frame), axis=0)
                                     })
            boxes, scores, labels = yolo_util.cpu_nms(boxes,
                                                      scores,
                                                      num_classes,
                                                      score_thresh=0.4,
                                                      iou_thresh=0.1)
            image, warning = yolo_util.draw_boxes(image,
                                                  boxes,
                                                  scores,
                                                  labels,
                                                  classes, (IMAGE_H, IMAGE_W),
                                                  show=False)

            info = drive(model=model,
                         image=frame,
                         speed=speed,
                         warning=warning)

            result = np.asarray(image)
            cv2.putText(result,
                        text=info,
                        org=(50, 70),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=1,
                        color=(255, 0, 0),
                        thickness=2)
            result = cv2.cvtColor(result, cv2.COLOR_RGB2BGR)

            while True:
                cv2.imshow("result", result)
                if cv2.waitKey(0) & 0xFF == 32:  # 点击空格下一张
                    break
                elif cv2.waitKey(0) & 0xFF == ord('q'):  # 点击q退出程序
                    print("====================done===================")
                    exit(0)
Esempio n. 13
0
      1171.351563, -1925.791748, 36.220097
      ])

    client.sendMessage(Start(scenario=scenario,
                             dataset=dataset))  # Start request
    count = 0
    tripNum = 0
    path = 'D:\\Git_repositories\\VPilot-master\\KITTI\\'
    old_location = [0, 0, 0]
    while True:  # Main loop
        try:
            count += 1
            # name = os.path.join(str(count), '.png')
            # Message recieved as a Python dictionary
            message = client.recvMessage()
            image = frame2numpy(message['frame'], (1920, 1080))
            image = image[int((1080 - ORI_HEIGHT) /
                              2):int(1080 - ((1080 - ORI_HEIGHT) / 2)),
                          int((1920 - ORI_WIDTH) /
                              2):int(1920 - ((1920 - ORI_WIDTH) / 2))]
            # print(image.shape)

            dest = os.path.join(path, str(str(count) + '.png'))
            status = cv2.imwrite(dest, image)
            if status:
                print('File saved')
            else:
                print('File was not saved.')
            # cv2.imshow('img', image)
            # cv2.waitKey(-1)
            if (count % 500) == 0:
Esempio n. 14
0
    # Crops to bottom half of image
    return image[int(image.shape[0] / 1.8):image.shape[0]]


file = gzip.open('dataset_test.pz')
count = 0

while True:
    try:
        data_dict = pickle.load(file)  # Iterates through pickle generator
        count += 1
        # Every 10000 frames prints steering and displays frame
        if (count % 100) == 0:
            print(str(data_dict['steering']) + '	On Count: ' + str(count))
            frame = data_dict['frame']
            # Show full image
            image = frame2numpy(frame, (480, 270))
            cv2.imshow('img', image)
            #time.sleep(0.1)
            cv2.waitKey(
                -1)  # Must press q on keyboard to continue to next frame
            # Show cropped image
            image = crop_bottom_half(image)
            #cv2.imshow('img', image)
            #cv2.waitKey(-1)  # Must press q on keyboard to continue to next frame

    except EOFError:
        print('End of File')
        break
print(count)
Esempio n. 15
0
def load_batches(epoch, txtfile, verbose=1, samples_per_batch=4000):
    ''' Generator for loading batches of frames'''

    if os.path.exists(txtfile):
        f = open(txtfile, 'r')
        data_dump_no = int(f.read())
        f.close()
    else:
        f = open(txtfile, 'w+')
        data_dump_no = 1
        f.write(str(data_dump_no))
        f.close()

    data_dump = 'dataset_minimap_800x600-'
    data = data_dump + str(data_dump_no) + '.pz'
    print(data)
    print('Opening dump.no {}'.format(data_dump_no))
    dataset = gzip.open(data)
    batch_count = 0

    # y = []  # outputs
    x = []  # input
    s = []  # steering
    # t = []  # throttle
    # b = []  # brake
    count = 0

    while True:
        try:
            print('----------- On Epoch: ' + str(epoch) + ' ----------')
            print('----------- On Batch: ' + str(batch_count) + ' ----------')
            while count < samples_per_batch:
                data_dict = pickle.load(dataset)
                steering = int(float(data_dict['steering']) * 750) + 500
                if steering >= 1000:
                    steering = 999
                if steering < 0:
                    steering = 0
                if 470 <= steering <= 530:
                    continue

                image = frame2numpy(data_dict['frame'], (800, 600))
                image = minimap_processing(image)
                # cv2.imshow('original', image)
                # if cv2.waitKey(1) & 0xFF == ord('q'):
                #     break

                image = (image / 255 - .5) * 2

                x.append(image)

                # Steering in dict is between -1 and 1, scale to between 0 and 999
                # and then 0 to 34 for categorical input
                if steering >= 1000:
                    steering = 999
                if steering < 0:
                    steering = 0
                steering = truncate(steering)
                s.append(steering)

                # # Throttle in dict is between 0 and 1, scale to between 0 and 49 for categorical input
                # throttle = int(data_dict['throttle'] * 50)
                # if throttle >= 50:
                #     throttle = 49
                # if throttle < 0:
                #     throttle = 0
                # t.append(throttle)
                #
                # # brake in dict is between 0 and 1, scale to between 0 and 49 for categorical input
                # brake = int(data_dict['brake'] * 50)
                # if brake >= 50:
                #     brake = 49
                # if brake < 0:
                #     brake = 0
                # b.append(brake)

                count += 1
                if (count % 250) == 0 and verbose == 1:
                    print('  ' + str(count) + ' data points loaded in batch.')
            count = 0
            print('Batch loaded.')
            s = np_utils.to_categorical(s, num_classes=35)
            # t = np_utils.to_categorical(t, num_classes=50)
            # b = np_utils.to_categorical(b, num_classes=50)

            # y = np.hstack([s, b])

            # Train test split
            x_train, x_test, y_train, y_test = train_test_split(x,
                                                                s,
                                                                test_size=0.2,
                                                                random_state=1,
                                                                shuffle=True)

            # y = []
            x = []
            s = []  # steering
            # t = []  # throttle
            # b = []  # brake

            batch_count += 1
            yield x_train, y_train, x_test, y_test, samples_per_batch, batch_count
        except EOFError:

            data_dump_no += 1
            f = open(txtfile, 'w+')
            f.write(str(data_dump_no))
            f.close()
            print('Opening dump.no {}'.format(data_dump_no))
            data = data_dump + str(data_dump_no) + '.pz'
            if os.path.exists(data):
                dataset = gzip.open(data)
                pass
            else:
                print('File Completed')
                break
        except Exception as e:
            print(e)
            data_dump_no += 1
            f = open(txtfile, 'w+')
            f.write(str(data_dump_no))
            f.close()
            print('Data of this file is corrupted after this point. '
                  'Don\'t worry, Opening next file')
            print('Opening dump.no {}'.format(data_dump_no))
            data = data_dump + str(data_dump_no) + '.pz'
            if os.path.exists(data):
                dataset = gzip.open(data)
                pass
            else:
                print(e)
                print('Conversion Complete.' ' Yay!!!!!!')

                break
            pass
Esempio n. 16
0
########################## Creates folders named 0 to 999 ################################
for i in range(0,1000):
    file_name = 'dataset/{}'.format(i)
    os.mkdir(file_name)
print('Directories generated.'
      'Now Storing images to respective directories')
'''

while True:
    try:

        data_dct = pickle.load(dataset)
        steering = int(float(data_dct['steering']) * 500) + 500

        frame = data_dct['frame']
        image = frame2numpy(frame, (800, 600))
        # image = ((image / 255) - .5) * 2  # Simple preprocessing

        ################# For saving data in a single folder and steering in text file #############
        file_name = 'driving_dataset/'
        if not os.path.isdir(file_name):
            os.mkdir(file_name)
        file_name = 'driving_dataset/{}.jpg'.format(starting_value)
        str = '{}.jpg {}\r'.format(starting_value,
                                   round(float(data_dct['steering']) * 180), 5)
        cv2.imwrite(file_name, image, [cv2.IMWRITE_JPEG_QUALITY, 90])
        with open("driving_dataset/data.txt", "a+") as text_file:
            text_file.write(str)

        # make video from images
        # out.write(image)
Esempio n. 17
0
    # print(delete_data)
    deleted_list.close()
    for i in tqdm(range(last_ele-1)):  # list删除项是100,迭代99,即可看到删除图像
        pickle.load(file)

while True:
    try:
        data_dict = pickle.load(file)
        speed = data_dict['speed']
        throttle = data_dict['throttle']
        brake = data_dict['brake']
        location = data_dict["location"]
        print(str(speed)+", "+str(throttle)+","+str(brake))
        frame = data_dict['frame']
        # Show full image
        image = frame2numpy(frame, (320, 240))
        pointer = pointer+1
        while True:
            cv2.imshow('img', image)
            if cv2.waitKey(0) & 0xFF == 32:
                break
            elif cv2.waitKey(0) & 0xFF == 8:
                delete_data.append(pointer)
                print("##### delete "+str(pointer)+" successfully ####")
                break
            elif cv2.waitKey(0) & 0xFF == ord('q'):
                delete_data.append(pointer)
                print("====================done===================")
                pickle.dump(delete_data, open(deleted_list_pos, "wb"))
                file.close()
                exit(0)
Esempio n. 18
0
t_start0 = time.time()
location_same_timer = 0
location_last = [0, 0, 0]

while True:
    try:
        t_loop_start = time.time()

        if image_source == USE_GTAV:
            # Collect and preprocess image
            while True:
                t_start_recv = time.time()
                message = client.recvMessage()
                if time.time() - t_start_recv > 0.05:  #is new frame
                    break
            image = frame2numpy(message['frame'], (imgwidth0, imgheight0))
            image2 = image
        elif image_source == USE_DATASET:
            message = pickle.load(file)  # Iterates through pickle generator
            image = frame2numpy(message['frame'], (imgwidth0, imgheight0))
            image2 = cv2.resize(image, (show_imgwidth, show_imgheight),
                                interpolation=cv2.INTER_LINEAR)

        elif image_source == USE_CAPTURE_CAM:
            if not cap.isOpened():
                raise "not cap.isOpened()"
                exit()
            message = {}
            ret, frame = cap.read()
            #frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
            image = frame2numpy(frame, (frame.shape[1], frame.shape[0]))
Esempio n. 19
0
            )
    client.sendMessage(Start(scenario=scenario,dataset=dataset)) # Start request
    print('Loading stuff.. Finish.')
    count = 0

    old_location = [0, 0, 0]
    old_speed = 0.0
    i = 96876

    while True: # Main loop
        try:
            # Message recieved as a Python dictionary
            message = client.recvMessage()
            # frame = np.resize(np.fromstring(message['frame'],
            # dtype=np.float64), (320, 160, 3))
            frame = frame2numpy(message['frame'], (640,320))
            frame = frame[10:300,:,:]
            frame = (resize(frame, (160,320)) * 255.0).astype('uint8')
            frame_name = "frame_%d.jpg" % count
            # cv2.imwrite(os.path.join(DATASET_PATH, frame_name), frame)
            if args.save_data:
                mimg.imsave(os.path.join(DATASET_PATH, frame_name), frame)
            cv2.imshow('frame', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break
            throttle = message['throttle']
            speed = message['speed']
            # direction = message['direction']
            yawRate = message['yawRate']
            brake = message['brake']
            steering = message['steering']
Esempio n. 20
0
            count += 1
            time_now = time.time()  # time at receiving message
            # We receive a message as a Python dictionary
            client.recvMessage()
            client.recvMessage()
            message = client.recvMessage()
            print('receive data takes: ', time.time() - time_now)
            # Note: lazy method to solve bug from deep(er)GTAV: a recv call at n+1 will give you the data from call at n,
            # so just call it 2 times, but only use the data from the last time. one recv call takes about 0.2~0.3s.
            # lidar data seems without this problem?
            lidar_img = message['lidar']
            lidar_img.shape = (lidar_Y_res, lidar_X_res)
            lidar_img = np.flip(lidar_img, axis=0)

            image_raw = frame2numpy(message['frame'], (1280, 720))
            # 3 place to change: here, IMG:coord move, MC:check_flow, ...
            image_2 = image_raw[(360 - 150):(360 + 300),
                                (640 - 550):(640 + 550), :]
            image_2 = cv2.cvtColor(image_2, cv2.COLOR_BGR2RGB)

            image_1 = cv2.cvtColor(image_raw, cv2.COLOR_BGR2GRAY)
            image_1 = image_1[(360 - 150):(360 + 150), (640 - 550):(640 + 550)]
            #image_1 = cv2.resize(image_1, (825, 225))
            #image_1 = cv2.resize(image_1, (550, 150))

            # read more message
            speed_now = message['speed']
            Yaw_now = message['yaw']
            yawRate_ingame = message['yawRate']
            pitch = -message['pitch']
Esempio n. 21
0
def load_batches(verbose=1, samples_per_batch=128):
    ''' Generator for loading batches of frames'''
    global frame_index, frames, frames_inited
    dataset = gzip.open('dataset.pz')
    batch_count = 0
    while True:
        try:
            x_train = []
            x_train_0_5S = []
            x_train_2S = []
            x_train_5S = []

            y_train = []
            x_test = []
            x_test_0_5S = []
            x_test_2S = []
            x_test_5S = []

            y_test = []
            count = 0
            print('----------- On Batch: ' + str(batch_count) + ' -----------')
            while count < samples_per_batch or frames_inited == 0:
                data_dct = pickle.load(dataset)
                frame = data_dct['frame']
                image = frame2numpy(frame, (320, 160))
                image = crop_bottom_half(image)
                image = ((image / 255) - .5) * 2  # Simple preprocessing
                insert_image_fifo(image)

                if frames_inited == 0:
                    if len(frames) < 50:
                        continue
                count += 1
                frames_inited = 1

                image_0_5S = get_image_fifo(5)
                image_2S = get_image_fifo(20)
                #image_5S = get_image_fifo(50)

                # Train test split
                # TODO: Dynamic train test split | Test series at end of batch
                if (count < samples_per_batch * 0.9):  # Train

                    x_train.append([image, image_0_5S, image_2S])
                    #x_train_0_5S.append(image_0_5S)
                    #x_train_2S.append(image_2S)
                    #x_train_5S.append(image_5S)

                    # Steering in dict is between -1 and 1, scale to between 0 and 999 for categorical input
                    #2*(0.2*x)^0.4

                    steering1 = get_steering(float(data_dct['steering']))
                    y_train.append(steering1)

                else:  # Test
                    x_test.append([image, image_0_5S, image_2S])
                    #x_test_0_5S.append(image_0_5S)
                    #x_test_2S.append(image_2S)
                    #x_test_5S.append(image_5S)
                    # Steering in dict is between -1 and 1, scale to between 0 and 999 for categorical input
                    steering1 = get_steering(float(data_dct['steering']))
                    y_test.append(steering1)

                if (count % 250) == 0 and verbose == 1:
                    print('     ' + str(count) +
                          ' data points loaded in batch.')

            print('Batch loaded.')
            #print("x_train.shape",len(x_train))
            #print("y_train.shape",len(y_train))
            batch_count += 1
            yield x_train, y_train, x_test, y_test
        except EOFError:  # Breaks at end of file
            break
Esempio n. 22
0
import cv2
from deepgtav.messages import frame2numpy

def crop_bottom_half(image):
	''' Crops to bottom half of image '''
    return image[int(image.shape[0] / 2):image.shape[0]]

file = gzip.open('dataset_final_2.pz')
count=0

while True:
	try:
		data_dict = pickle.load(file) # Iterates through pickle generator
		count += 1
		# Every 10000 frames prints steering and displays frame 
		if (count%10000)==0:
			print(str(data_dict['steering']) + '	On Count: ' + str(count))
			frame = data_dict['frame']
			# Show full image
			image = frame2numpy(frame, (320,160))
			cv2.imshow('img',image)
			cv2.waitKey(-1) # Must press q on keyboard to continue to next frame
			# Show cropped image
			image = crop_bottom_half(image)
			cv2.imshow('img',image)
			cv2.waitKey(-1)

	except EOFError:
		break
print(count)
    host = 'localhost'
    port = 8000
    max_stop_time = 10  # in second
    max_wall_time = 10  # in hour
    frame = [350, 205 + 20]

    print('Loading model...')
    model = Model()

    client = Client(ip=host, port=port)

    scenario = Scenario(weather='EXTRASUNNY',
                        vehicle='voltic',
                        time=[12, 0],
                        drivingMode=-1,
                        location=[-2500, 3250])

    client.sendMessage(Start(scenario=scenario))

    stoptime = time.time() + max_wall_time * 3600
    while time.time() < stoptime:
        message = client.recvMessage()
        image = frame2numpy(message['frame'], frame)[20:]

        commands = model.run(image)

        client.sendMessage(Commands(commands[0], commands[1], commands[2]))

    client.sendMessage(Stop())
    client.close()
Esempio n. 24
0
 def save(self, dct, frame):
     im = frame2numpy(frame, tuple(self.frame_capture_size))
     dct['minimap'] = im[480:590,7:177,:]
     dct['frame'] = cv2.resize(im, tuple(self.frame_save_size))
     if self.pickleFile != None:
         pickle.dump(dct, self.pickleFile)
Esempio n. 25
0
def load_data(verbose=1, samples_per_batch=1000):
    batch_count = 0
    dataset = gzip.open(dataset_path)

    while True:
        try:

            x_image_train = []
            x_speed_train = []
            x_image_test = []
            x_speed_test = []

            y_train = []
            y_test = []

            count = 0
            print('----------- On Batch: ' + str(batch_count) + ' -----------')
            while count < samples_per_batch:
                data_dct = pickle.load(dataset)
                frame = data_dct['frame']
                image = frame2numpy(frame, (320, 240))
                image = cv2.GaussianBlur(image, (5, 5), 0)
                image = image[130:240, 0:320]
                image = cv2.GaussianBlur(image, (5, 5), 0)
                speed = data_dct['speed']

                steering = data_dct['steering']
                steering = steering * 5 / math.pi
                brake = data_dct['brake']
                throttle = data_dct['throttle']

                # Train test split
                if (count % 8) != 0:  # Train   #抽取80%为训练集 20%作为测试集。。。
                    x_image_train.append(image)
                    x_speed_train.append(np.array(float(speed)))

                    y_train.append(
                        np.array(
                            [float(steering),
                             float(throttle),
                             float(brake)]))

                else:  # Test
                    x_image_test.append(image)
                    x_speed_test.append(np.array(float(speed)))

                    y_test.append(
                        np.array(
                            [float(steering),
                             float(throttle),
                             float(brake)]))

                count += 1
                if (count % 250) == 0 and verbose == 1:
                    print('     ' + str(count) +
                          ' data points loaded in batch.')

            x_train = [np.array(x_image_train), np.array(x_speed_train)]
            x_test = [np.array(x_image_test), np.array(x_speed_test)]
            y_train = np.array(y_train)
            y_test = np.array(y_test)

            print('Batch loaded.')
            batch_count += 1
            yield x_train, y_train, x_test, y_test  # 将y打包为numpy数组  将x打包为list传递至  train

        except EOFError:
            print("end")
            break