def process_batch(self, batch_sample):
        steering_angle = np.float32(batch_sample[3])
        images, steering_angles = [], []

        for image_path_index in range(3):
            image_name = batch_sample[image_path_index].split('/')[-1]

            image = cv2.imread(self.image_path + image_name)
            rgb_image = utils.bgr2rgb(image)
            resized = utils.crop_and_resize(rgb_image)

            images.append(resized)

            if image_path_index == 1:
                steering_angles.append(steering_angle + self.correction_factor)
            elif image_path_index == 2:
                steering_angles.append(steering_angle - self.correction_factor)
            else:
                steering_angles.append(steering_angle)

            if image_path_index == 0:
                flipped_center_image = utils.flipimg(resized)
                images.append(flipped_center_image)
                steering_angles.append(-steering_angle)

        return images, steering_angles
Ejemplo n.º 2
0
def telemetry(sid, data):
    if data:
        # The current steering angle of the car
        steering_angle = data["steering_angle"]
        # The current throttle of the car
        throttle = data["throttle"]
        # The current speed of the car
        speed = data["speed"]
        # The current image from the center camera of the car
        imgString = data["image"]
        image = Image.open(BytesIO(base64.b64decode(imgString)))
        image_array = np.asarray(image)
        image_array = crop_and_resize(image_array)

        steering_angle = float(
            model.predict(image_array[None, :, :, :], batch_size=1))
        throttle = 0.2

        print(steering_angle, throttle)
        send_control(steering_angle, throttle)

        # save frame
        if args.image_folder != '':
            timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
            image_filename = os.path.join(args.image_folder, timestamp)
            image.save('{}.jpg'.format(image_filename))
    else:
        # NOTE: DON'T EDIT THIS.
        sio.emit('manual', data={}, skip_sid=True)
Ejemplo n.º 3
0
    def __data_generation(self, list_IDs):

        exemplars = np.empty((self.batch_size, 127, 127, self.n_channels))
        instances = np.empty((self.batch_size, 255, 255, self.n_channels))
        bboxes = []
        cats = []

        for i, ID in enumerate(list_IDs):
            # print (list_IDs)
            # exit()
            instance_img_data = self.imgs_to_anns[ID]
            base_name = os.path.basename(
                os.path.normpath(instance_img_data["file_path"]))
            base_name = int(base_name.split(".")[0])
            min_ = base_name - 100
            max_ = base_name + 100

            if min_ < 0:
                min_ = 0
            if max_ > instance_img_data["total"]:
                max_ = instance_img_data["total"]
            ex_file_name = ""
            while ex_file_name not in self.imgs_to_anns:
                ex_idx = random.randint(
                    min_, max_
                )  #random.choice(self.data[instance_img_data["category_id"]][instance_img_data["video_id"]])
                ex_idx = str(ex_idx).zfill(8) + ".jpg"
                ex_file_name = instance_img_data["file_path"]
                ex_file_name = ex_file_name[:-12]
                ex_file_name += ex_idx

                # print (self.data[instance_img_data["category_id"]][instance_img_data["video_id"]])
                # exit(
            # print (self.imgs_to_anns.keys())
            exemplar_img_data = self.imgs_to_anns[ex_file_name]

            bbox = exemplar_img_data["bbox"]
            x, y, w, h = bbox
            box = [y, x, h, w]

            # print (instance_img_data)
            # print (exemplar_img_data)

            cats.append(instance_img_data["category_name"])

            instance_img = np.array(cv2.imread(instance_img_data["file_path"]))
            exemplar_img = np.array(cv2.imread(exemplar_img_data["file_path"]))

            x_sz, z_sz = calculate_x_z_sz(box)

            instance_img = cv2.resize(instance_img, (255, 255))

            pad = 3000
            color = [0, 0, 0]  #np.mean(exemplar_img, (0, 1))

            # exemplar_img = cv2.copyMakeBorder( exemplar_img, pad, pad, pad, pad, cv2.BORDER_CONSTANT, value = color)
            # exemplar_img = np.pad(exemplar_img, [(pad, pad), (pad, pad), (0,0)], mode="constant", constant_values=[color, color])

            exemplar_img = crop_and_resize(exemplar_img,
                                           box,
                                           x_sz,
                                           out_size=127,
                                           border_value=[0, 0, 0])

            # plt.imshow(exemplar_img)
            # plt.show()
            instance_img = self._image_augment(instance_img)
            exemplar_img = self._image_augment(exemplar_img)

            instances[i] = instance_img
            exemplars[i] = exemplar_img

        # positive_label_pixel_radius = 16 # distance from center of target patch still considered a 'positive' match
        # response_size = 17
        # response_stride = 6.0
        # data_size = len(instances)
        # label = make_label(response_size, positive_label_pixel_radius / response_stride)
        # labels = np.empty((data_size,) + label.shape)
        # labels[:] = label
        labels = np.array(
            list(self.c_labels)
        )  #construct_batch_gt_score_maps((17, 17), 6, len(instances))

        return [instances, exemplars], [labels], cats
    box2 = [y, x, h, w]
    box2 = np.array(
        [box2[1] + (box2[3]) / 2, box2[0] + (box2[2]) / 2, box2[3], box2[2]],
        dtype=np.float32)
    pos_x, pos_y = box2[:2]

    print(image)
    image = np.array(cv2.imread(image))
    copy_img = np.array(image)
    copy_img2 = np.array(image)
    copy_img3 = np.array(image)
    copy_img = cv2.resize(copy_img, (255, 255))

    exemplar = [
        crop_and_resize(exemplar, [y, x, h, w],
                        x_sz * f,
                        out_size=127,
                        border_value=[0, 0, 0]) for f in scale_factors
    ]

    # for e in exemplar:
    # plt.imshow(np.array(exemplar[0]).astype(np.uint8))
    # plt.show()

    exemplar = np.stack(exemplar, axis=0)

    print(exemplar.shape)

    scores = m.predict([np.array([copy_img] * 3), exemplar])

    print(scores.shape)
Ejemplo n.º 5
0
import cv2
import utils

img_left_original = cv2.imread(
    "./MyData06/IMG/left_2019_08_11_15_35_48_328.jpg")
img_center_original = cv2.imread(
    "./MyData06/IMG/center_2019_08_11_15_35_48_328.jpg")
img_right_original = cv2.imread(
    "./MyData06/IMG/right_2019_08_11_15_35_48_328.jpg")

img_l_o_rgb = utils.bgr2rgb(img_left_original)
img_c_o_rgb = utils.bgr2rgb(img_center_original)
img_r_o_rgb = utils.bgr2rgb(img_right_original)

img_left_cropped = utils.crop_and_resize(img_l_o_rgb)
img_center_cropped = utils.crop_and_resize(img_c_o_rgb)
img_right_cropped = utils.crop_and_resize(img_r_o_rgb)
img_left_cropped = cv2.cvtColor(img_left_cropped, cv2.COLOR_RGB2BGR)
img_center_cropped = cv2.cvtColor(img_center_cropped, cv2.COLOR_RGB2BGR)
img_right_cropped = cv2.cvtColor(img_right_cropped, cv2.COLOR_RGB2BGR)
cv2.imwrite("./images/img_left_cropped.jpg", img_left_cropped)
cv2.imwrite("./images/img_center_cropped.jpg", img_center_cropped)
cv2.imwrite("./images/img_right_cropped.jpg", img_right_cropped)

img_left_flipped = utils.flipimg(img_left_cropped)
img_center_flipped = utils.flipimg(img_center_cropped)
img_right_flipped = utils.flipimg(img_right_cropped)
cv2.imwrite("./images/img_left_flipped.jpg", img_left_flipped)
cv2.imwrite("./images/img_center_flipped.jpg", img_center_flipped)
cv2.imwrite("./images/img_right_flipped.jpg", img_right_flipped)