コード例 #1
0
    def pre_change(self):

        ##### Comment/Uncomment below to set directory by user and not date #####

        todays_dir = make_daily_dir(IMAGE_DIR)

        if pre_check_dewar():
            while math.isclose(int(epics.PV('XF:17IDB-ES:AMX{Dew:1-Ax:R}Mtr.RBV').get()), int(epics.PV('XF:17IDB-ES:AMX{Dew:1-Ax:R}Mtr.VAL').get()), abs_tol=2) is False:
                print('Dewar is at: {} \nRotating to: {}'.format(int(epics.PV(
                    'XF:17IDB-ES:AMX{Dew:1-Ax:R}Mtr.RBV').get()), int(epics.PV('XF:17IDB-ES:AMX{Dew:1-Ax:R}Mtr.VAL').get())))
                time.sleep(2)
                # current_position = int(epics.PV('XF:17IDB-ES:AMX{Dew:1-Ax:R}Mtr.RBV').get())
                # position_goal = int(epics.PV('XF:17IDB-ES:AMX{Dew:1-Ax:R}Mtr.VAL').get())
                # if not math.isclose(current_position,position_goal, abs_tol = 2):
                #     print('Restarted for New Rotation')
                #     break

            inner_dir = make_inner_dir(todays_dir)
            img = take_image(todays_dir, inner_dir)

            try:
                p1 = multiprocessing.Process(
                    target=crop_image(img, inner_dir, ROOT_DIR))
                p1.start()
            except Exception as e:
                print('Prediction Failed: {}'.format(e))
            # A fifteen second wait to allow conditions to change
            time.sleep(0.5)
        self.post_change()
コード例 #2
0
    def detect(self, lidar):
        birdseye = ld.lidar_to_birdseye(lidar, ld.slice_config())

        mc = self.squeezedet.mc
        im = ci.crop_image(birdseye,
                           (mc.IMAGE_WIDTH + 1, mc.IMAGE_HEIGHT + 1, 3),
                           (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT, 3))

        found_car = None
        found_ped = None
        boxes, probs, classes = self.squeezedet.detect(im)
        # Assume decreasing order of probability
        for box, prob, class_idx in zip(boxes, probs, classes):
            global_box = ld.birdseye_to_global(box, ld.slice_config())

            car_box = rd.get_birdseye_box(lidar,
                                          (global_box[0], global_box[1]))
            predicted_yaw = self.rotation_detector.detect_rotation(car_box)

            sd.correct_global(global_box, class_idx)

            pose = np.array(
                [global_box[0], global_box[1], -0.9, predicted_yaw])

            if found_car is None and class_idx == sd.CAR_CLASS:
                found_car = pose

            if found_ped is None and class_idx == sd.PED_CLASS:
                found_ped = pose

            if found_car is not None and found_ped is not None:
                break
        return (found_car, found_ped)
コード例 #3
0
def get_birdseye_box(lidar, obs_position):
    slice_config = ld.slice_config()
    slice_config.SIDE_RANGE = (-2.5, 2.5)
    slice_config.FWD_RANGE = (-2.5, 2.5)

    birdseye = ld.lidar_to_birdseye(lidar,
                                    slice_config,
                                    return_points=False,
                                    center=(obs_position[0], obs_position[1]))
    return ci.crop_image(birdseye, (51, 51, 3), INPUT_SHAPE)
コード例 #4
0
ファイル: run_squeezedet.py プロジェクト: eljefec/eva-didi
def generate_lidar_birdseye(bag_file, mc):
  im = None
  generator = ns.generate_numpystream(bag_file, tracklet_file = None)
  for numpydata in generator:
    lidar = numpydata.lidar
    if lidar is not None:
      birdseye = ld.lidar_to_birdseye(lidar, ld.slice_config())

      im = ci.crop_image(birdseye,
                         (mc.IMAGE_WIDTH + 1, mc.IMAGE_HEIGHT + 1, 3),
                         (mc.IMAGE_WIDTH, mc.IMAGE_HEIGHT, 3))
    # Must yield item for each frame in generator.
    yield im, lidar
コード例 #5
0
#!/home/pi/miniconda3/envs/ml/bin/python
import os, sys
import json
import crop_images
if __name__ == "__main__":
    out_dict = {}
    out_dict['filename'] = sys.argv[1]
    out_dict['event'] = int(sys.argv[2])
    out_dict['frame_nr'] =int(sys.argv[3])
    out_dict['width'] = int(sys.argv[4])
    out_dict['height'] = int(sys.argv[5])
    out_dict['center_x'] = int(sys.argv[6])
    out_dict['center_y'] = int(sys.argv[7])
    filename = out_dict['filename']
    filename_txt = os.path.splitext(filename)[0]+'.json'
    with open(filename_txt,'w') as outfile:
        json.dump(out_dict,outfile)
    os.system('sync')
    crop_images.crop_image(filename_txt)
コード例 #6
0
def generate_kitti(bag_tracklets, imagedir, labeldir, output_bbox,
                   slice_config):
    if not os.path.exists(imagedir):
        os.makedirs(imagedir)
    if not os.path.exists(labeldir):
        os.makedirs(labeldir)

    print('bag_tracklets', bag_tracklets)
    print('imagedir', imagedir)
    print('labeldir', labeldir)
    print('output_bbox', output_bbox)
    print('slice_config', slice_config)

    id = 0
    expected_shape = get_expected_shape(slice_config)
    new_shape = (expected_shape[0] - 1, expected_shape[1] - 1,
                 expected_shape[2])

    assert_shape(new_shape)

    print('expected_shape', expected_shape)
    print('new_shape', new_shape)

    stopwatch = util.stopwatch.Stopwatch()

    stream = multibag.MultiBagStream(bag_tracklets,
                                     numpystream.generate_numpystream)

    stopwatch.stop()
    print('Elapsed time: {}'.format(stopwatch.format_duration()))
    stopwatch.start()

    for numpydata in stream.generate(infinite=False):
        lidar = numpydata.lidar
        obs = numpydata.obs
        if lidar is not None:
            frame_idx, obs = obs
            bbox = bbox_points(obs)

            birdseye = ld.lidar_to_birdseye(lidar, slice_config)
            birdseye_bbox = ld.lidar_to_birdseye(bbox,
                                                 slice_config,
                                                 return_points=True)

            if birdseye_bbox.shape[0] == 2 and birdseye_bbox.shape[1] == 2:
                if output_bbox:
                    bbox_tuple = ((birdseye_bbox[0][0], birdseye_bbox[0][1]),
                                  (birdseye_bbox[1][0], birdseye_bbox[1][1]))
                else:
                    bbox_tuple = None

                crop = ci.crop_image(birdseye, expected_shape, new_shape)
                image_file = os.path.join(imagedir, '{:06d}.png'.format(id))
                imlib.save_np_image(crop, image_file, bbox_tuple)

                label_path = os.path.join(labeldir, '{:06d}.txt'.format(id))
                write_kitti_annotation(obs, birdseye_bbox, label_path)

                id += 1
                if id % 1000 == 0:
                    print('Working... Processed {} samples.'.format(id))
                    stopwatch.stop()
                    print('Elapsed time: {}'.format(
                        stopwatch.format_duration()))
                    stopwatch.start()
    print('DONE. Processed {} samples.'.format(id))
    stopwatch.stop()
    print('Elapsed time: {}'.format(stopwatch.format_duration()))
コード例 #7
0
sys.path.insert(0, root_dir)
from crop_images import crop_image
from predict_position import predict_image

user = getpass.getuser()

tmp_dir = os.getcwd() + '/' + user + '_puck_visualization_' + str(
    random.randint(11111, 99999))
os.mkdir(tmp_dir)

shutil.copy(sys.argv[1], tmp_dir)

img = sys.argv[1]
tmp_fname = os.listdir(tmp_dir)[0]
fname = tmp_fname.split('.')[0]
fbase = fname + '_position'

os.remove(tmp_dir + '/' + tmp_fname)

print('Processing Image: {}'.format(fname))
print('Files in: {}'.format(tmp_dir))

crop_image(img, fbase, tmp_dir)

files = sorted([f for f in os.listdir(tmp_dir)])
position_number = 1
for my_file in files:
    filepath = tmp_dir + '/' + my_file
    print('Predicting Image: {}'.format(my_file))
    predict_image(filepath)