Esempio n. 1
0
 def read_sensor_cache_of(self, vehicle: Vehicle, _: Scenario) -> Polygon:
     from shapely.geometry import Polygon
     bbox_points = vehicle.get_bbox()
     shell = tuple(map(lambda pos: bbox_points[pos][0:2],
                       ["near_bottom_left", "far_bottom_left", "far_bottom_right", "near_bottom_right"]))
     return Polygon(shell=shell)
def main():
    setup_logging()

    beamng = BeamNGpy('localhost',
                      64256,
                      home='C:/Users/merie/Documents/BeamNG.research.v1.7.0.1')
    scenario = Scenario(
        'west_coast_usa',
        'lidar_tour',
        description='Tour through the west coast gathering Lidar data')
    vehicle = Vehicle('ego_vehicle', model='etk800', licence='LIDAR')
    vehicle = setup_sensors(vehicle)
    lidar = setup_lidar('hood')
    vehicle.attach_sensor('lidar', lidar)
    spawn_pt = spawn_point()
    ego_pos = spawn_pt['pos']
    ego_rot_quat = spawn_pt['rot_quat']
    scenario.add_vehicle(vehicle, pos=ego_pos, rot=None, rot_quat=ego_rot_quat)
    tvs = traffic_vehicles()
    scenario.make(beamng)
    bng = beamng.open(launch=True)

    #try:
    bng.load_scenario(scenario)
    bng.set_steps_per_second(60)
    bng.set_deterministic()
    bng.hide_hud()
    bng.start_scenario()
    vehicle.ai_set_mode('traffic')
    vehicle.ai_set_aggression(0.5)
    vehicle.ai_drive_in_lane(True)
    #vehicle.ai_set_speed(16, mode='limit')
    #vehicle.ai_set_target('traffic')
    bng.switch_vehicle(vehicle)
    damage_prev = None
    start = time.time()
    end = time.time()
    bng.pause()
    print("Bounding box: {}".format(vehicle.get_bbox()))
    with open('lidar_data.csv', 'w') as f:
        f.write(
            "TIMESTAMP,VEHICLE_POSITION,VEHICLE_ORIENTATION,VELOCITY,LIDAR,CRASH\n"
        )
        for _ in range(1024):

            sensors = bng.poll_sensors(vehicle)
            points = sensors['lidar']['points']
            damage = sensors['damage']
            v_state = vehicle.state
            print("vehicle_state = {}".format(v_state))
            print("vehicle_state[pos] = {}".format(v_state['pos']))
            print("vehicle_state[dir] = {}".format(v_state['dir']))
            print("Vehicle bounding box:{}".format(vehicle.get_bbox()))
            #ai_state = vehicle.ai_get_state()
            #print("ai_state = {}".format(ai_state))
            new_damage = diff_damage(damage, damage_prev)
            damage_prev = damage
            #print("new damage = {}".format(new_damage))
            print()

            f.write("{},{},{},{},{},{}\n".format(_ * 0.5, v_state['pos'],
                                                 v_state['dir'],
                                                 v_state['vel'],
                                                 points.tolist(),
                                                 str(new_damage)))
            #bng.step(30, wait=False)
            bng.step(30)
            #print("Time passed since last step: {}".format(time.time() - end))
            end = time.time()
            #print("Time passed since scenario begun: {}\n".format(end - start))

            #screenrecord()

            if end - start >= 30:
                #bng.close()
                continue
Esempio n. 3
0
    return center


def rotate(vec, h, v):
    rot_x = np.cos(h) * vec[0] - np.sin(h) * vec[1]
    rot_y = np.sin(h) * vec[0] + np.cos(h) * vec[1]
    rot_z = np.sin(v) * rot_y + np.cos(v) * vec[2]
    return np.array([rot_x, rot_y, rot_z])


with beamng as bng:
    bng.load_scenario(scenario)
    bng.start_scenario()
    bng.pause()

    center = get_bbox_center(vehicle.get_bbox())
    center[1] -= 0.75  # move center back a bit to get more of the vehicle
    cam_off = center + np.array(
        [3.5, 0, 0])  # Offset the camera 4 meters in front of the center

    images = []
    for h in variations:
        image_row = []
        for v in variations:
            current_pos = rotate(cam_off, h, v)
            current_dir = center - current_pos

            camera.pos = current_pos
            camera.direction = current_dir

            image = bng.poll_sensors(vehicle)['camera']['colour']