Exemplo n.º 1
0
 def __init__(self):
     self.camera = Camera.instance(width=224, height=224)
     self.robot = Robot()
     #Collision Avoidance
     self.ca_model = torchvision.models.alexnet(pretrained=False)
     self.ca_model.classifier[6] = torch.nn.Linear(
         self.ca_model.classifier[6].in_features, 2)
     self.ca_model.load_state_dict(torch.load('best_model.pth'))
     self.device = torch.device('cuda')
     self.ca_model = self.ca_model.to(self.device)
     self.mean = torch.Tensor([0.485, 0.456, 0.406]).cuda().half()
     self.std = torch.Tensor([0.229, 0.224, 0.225]).cuda().half()
     self.normalize = torchvision.transforms.Normalize(self.mean, self.std)
     #Road following support variables
     self.angle = 0.0
     self.angle_last = 0.0
     # Instantiating the road following network.
     self.rf_model = torchvision.models.resnet18(pretrained=False)
     self.rf_model.fc = torch.nn.Linear(512, 2)
     self.rf_model.load_state_dict(torch.load('best_steering_model_xy.pth'))
     self.rf_model = self.rf_model.to(self.device)
     self.rf_model = self.rf_model.eval().half()
     self.speed_gain = 0.12
     self.steering_gain = 0
     self.steering_dgain = 0.1
     self.steering_bias = 0.0
     self.t_unit_dist = 0.04
     self.starttime = 0
     self.cumulative_angle = -1
     self.pitstop = []
     self.startposition = []
     self.pathpoints = [[]]
     self.proportionality_const = -1  # TODO : Add the proper value
Exemplo n.º 2
0
def collision_avoidance():

    global normalize, device, model, mean, camera, robot
    i_frame = -1

    model = torchvision.models.alexnet(pretrained=False)
    model.classifier[6] = torch.nn.Linear(model.classifier[6].in_features, 2)
    model.load_state_dict(torch.load('best_model.pth'))
    device = torch.device('cuda')
    model = model.to(device)
    mean = 255.0 * np.array([0.485, 0.456, 0.406])
    stdev = 255.0 * np.array([0.229, 0.224, 0.225])
    camera = Camera.instance(width=224, height=224)

    normalize = torchvision.transforms.Normalize(mean, stdev)

    robot = Robot()
    robot.stop()
    now = time.time()
    stop_time = now + 120
    while time.time() < stop_time:
        i_frame += 1
        if i_frame % 2 == 0:

            update({'new':
                    camera.value})  # we call the function once to intialize
        #cv2.imshow(camera.value)

    #camera.observe(update, names='value')
    robot.stop()
    camera.stop()
def main(vocab_path, settings_path):
    slam = orbslam2.System(vocab_path, settings_path,
                           orbslam2.Sensor.MONOCULAR)
    slam.set_use_viewer(True)
    slam.initialize()

    camera = Camera.instance(width=960,
                             height=540,
                             capture_width=1280,
                             capture_height=720)
    print('-----')
    print('Start processing sequence ...')
    ts = 0
    #cv2.namedWindow("test")
    while (True):
        img = camera.value.copy()

        t1 = time.time()
        slam.process_image_mono(img, float(ts))
        t2 = time.time()

        ttrack = t2 - t1
        print("frame id:" + str(ts), ttrack)
        time.sleep(0.01)
        ts += 1

    save_trajectory(slam.get_trajectory_points(), 'trajectory.txt')
    slam.shutdown()
    return 0
Exemplo n.º 4
0
    def start(self):
        self.device = torch.device('cuda')

        print('Loading model...')
        # create model
        self.model = torchvision.models.alexnet(pretrained=False)
        self.model.classifier[6] = torch.nn.Linear(
            self.model.classifier[6].in_features, 2)
        self.model.load_state_dict(torch.load(self.collision_model))
        self.model = self.model.to(self.device)

        # create robot
        self.robot = Robot()

        print('Initializing camera...')
        # create camera
        self.camera = Camera.instance(width=224, height=224)

        print('Running...')
        self.camera.observe(self._update, names='value')

        def kill(sig, frame):
            print('Shutting down...')
            self.camera.stop()

        signal.signal(signal.SIGINT, kill)

        self.camera.thread.join()
Exemplo n.º 5
0
 def __init__(self):
     print('Setting up camera')
     self.camera = Camera.instance(width=224, height=224)
     print('Set up camera')
     self.robot = Robot()
     self.completed = False
     # Collision Avoidance.
     print('Loading CA model')
     self.ca_model = torchvision.models.alexnet(pretrained=False)
     self.ca_model.classifier[6] = torch.nn.Linear(
         self.ca_model.classifier[6].in_features, 2)
     #self.ca_model.load_state_dict(torch.load('best_model_nvidia.pth'))
     self.device = torch.device('cuda')
     self.ca_model = self.ca_model.to(self.device)
     print('Loaded CA model')
     self.mean = 255.0 * torch.Tensor(np.array([0.485, 0.456, 0.406
                                                ])).cuda().half()
     self.std = 255.0 * torch.Tensor(np.array([0.485, 0.456, 0.406
                                               ])).cuda().half()
     self.normalize = torchvision.transforms.Normalize(self.mean, self.std)
     # Road following support variables.
     self.angle = 0.0
     self.angle_last = 0.0
     # Instantiating the road following network.
     print('Loading RF model')
     self.rf_model = torchvision.models.resnet18(pretrained=False)
     self.rf_model.fc = torch.nn.Linear(512, 2)
     self.rf_model.load_state_dict(torch.load('best_steering_model_xy.pth'))
     self.rf_model = self.rf_model.to(self.device)
     self.rf_model = self.rf_model.eval().half()
     print('Loaded RF Model')
     # Initializing additional variables.
     self.speed_gain = 0.12
     self.steering_gain = 0
     self.steering_dgain = 0.1
     self.steering_bias = 0.0
     self.starttime = 0
     self.cumulative_angle = -1
     self.pitstop = []
     self.startposition = []
     self.start_num = -1
     self.baton_callback = None
     self.pathpoints_callback = None
     self.pathpoints = [[]]
     # Add proper value below.
     self.proportionality_const = -1
     self.image_widget = ipywidgets.Image()
     self.previous_position = -1
     traitlets.dlink((self.camera, 'value'), (self.image_widget, 'value'),
                     transform=bgr8_to_jpeg)
Exemplo n.º 6
0
    def __init__(self, width=640, height=480, saveFramesToFile=True):
        super().__init__()
        # Set camera frame size
        self.width = width
        self.height = height

        # Initialization
        # Camera instance
        self.camera = Camera.instance(width=self.width, height=self.height)
        # Region Of Interest
        self.sceneWhole = None
        self.sceneROI = None
        # Captured frame
        self.savedFrameWhole = None
        self.savedFrameROI = None
        # File numbering counter
        self.fileNumberCount = 1000000000
        # Flag whether to save to file or not
        self.saveFramesToFile = saveFramesToFile
Exemplo n.º 7
0
def cam_capture(fn):
    global cam
    global capidx

    capidx += 1

    path = "/home/jetbot/camerasnaps"
    try:
        os.makedirs(path)
    except FileExistsError:
        pass
    except Exception as ex:
        print("Exception creating directory '%s', error: %s" % (path, str(ex)))
        return

    if cam == None:
        try:
            print("Initializing camera...")
            cam = Camera.instance(width=960, height=720)
            print("\r\nCamera initialized!")
        except Exception as ex:
            sys.stderr.write("Exception initializing camera: " + str(ex))
            cam = None
            return

    try:
        fp = os.path.join(path, fn + '.jpg')
        with open(fp, 'wb') as f:
            f.write(bytes(imencode('.jpg', cam.value)[1]))
        teensyadc.set_camera_led()
        try:
            uid = pwd.getpwnam("jetbot").pw_uid
            gid = grp.getgrnam("jetbot").gr_gid
            os.chown(fp, uid, gid)
        except Exception as ex:
            sys.stderr.write(
                "Exception changing ownership of file '%s', error: %s" %
                (fp, str(ex)))
    except Exception as ex:
        sys.stderr.write("Exception writing to file '%s', error: %s" %
                         (fp, str(ex)))
Exemplo n.º 8
0
def face_main():
    global face
    global smile
    global eye

    camera_0 = Camera.instance(width=224, height=224)
    face_seen = 1
    face_haar_model = './data/haarcascade_frontalface_default.xml'
    smile_haar_model = './data/haarcascade_smile.xml'
    eye_haar_model = './data/haarcascade_eye.xml'
    face = cv2.CascadeClassifier(face_haar_model)
    smile = cv2.CascadeClassifier(smile_haar_model)
    eye = cv2.CascadeClassifier(eye_haar_model)
    while True:
        frame_0 = camera_0.value
        gray_0 = cv2.cvtColor(frame_0, cv2.COLOR_BGR2GRAY)
        canvas_0 = detect(gray_0, frame_0, face_seen)
        #cv2.imshow('Video', canvas_0)
        if cv2.waitKey(1) & 0xff == ord('q'):
            break

    camera_0.stop()

    cv2.destroyAllWindows()
import numpy as np
import cv2
import glob
from jetbot import Camera

cam = Camera.instance()
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)

# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6 * 7, 3), np.float32)
objp[:, :2] = np.mgrid[0:7, 0:6].T.reshape(-1, 2)

# Arrays to store object points and image points from all the images.
objpoints = []  # 3d point in real world space
imgpoints = []  # 2d points in image plane.

#images = glob.glob('./chessboard_img/*.jpg')

#for fname in images:

while True:

    img = cam.value
    #img = cv2.imread(fname)
    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

    # Find the chess board corners
    ret, corners = cv2.findChessboardCorners(gray, (7, 6), None)
    print(ret)
Exemplo n.º 10
0
#3 seconds

# Great! We've now defined our pre-processing function which can convert images from the camera format to the neural network input format.
#
# Now, let's start and display our camera.  You should be pretty familiar with this by now.  We'll also create a slider that will display the
# probability that the robot is blocked.

# In[5]:

print('16 second traitlets')
import traitlets
from IPython.display import display
import ipywidgets.widgets as widgets
from jetbot import Camera, bgr8_to_jpeg

camera = Camera.instance(width=224, height=224)
image = widgets.Image(format='jpeg', width=224, height=224)
blocked_slider = widgets.FloatSlider(description='blocked',
                                     min=0.0,
                                     max=1.0,
                                     orientation='vertical')
#DMF Added Next Line
gpu_slider = widgets.FloatSlider(description="GPU%",
                                 min=0.0,
                                 max=1.0,
                                 orientation='vertical')

camera_link = traitlets.dlink((camera, 'value'), (image, 'value'),
                              transform=bgr8_to_jpeg)

display(widgets.HBox([image, blocked_slider, gpu_slider]))
Exemplo n.º 11
0
    model_roadfollow.fc = torch.nn.Linear(512, 2)
    model_roadfollow.load_state_dict(torch.load(steering_model_path))

    model_dinodet = torchvision.models.resnet18(pretrained=False)
    model_dinodet.fc = torch.nn.Linear(512, 6)
    model_dinodet.load_state_dict(torch.load(dino_detect_model_path))

    device = torch.device('cuda')

    model = model_roadfollow.to(device)
    model = model_roadfollow.eval().half()

    model_dinodet = model_dinodet.to(device)
    model_dinodet = model_dinodet.eval()

    camera = Camera.instance(width=int(rospy.get_param("image_width")),
                             height=int(rospy.get_param("image_height")))

    from jetbot import Robot
    robot = Robot()
    speed_gain_slider = float(rospy.get_param("speed"))
    steering_gain_slider = float(rospy.get_param("steering_gain"))
    steering_dgain_slider = float(rospy.get_param("steering_d_gain"))
    steering_bias_slider = float(rospy.get_param("steering_bias"))
    angle = 0.0
    angle_last = 0.0
    prev_class = -1
except Exception as e:
    rospy.loginfo("Error occured:", str(e))

dino_names = rospy.get_param("dinosaurs")
Exemplo n.º 12
0
def trigger_speech(trigger_type):
    '''

    Respond to the user command, or trigger
        - "greeting": The user says hi to J-Bot and she responds about general weather information
        - "weather": The user asks about the weather, J-Bot responds with the weather information
        - "air pollution": The user asks about the air quality, J-Bot responds with the air quality information
        - "camera": The user asks for the opinion about the outfit, J-Bot responds according to the condition outside
                    whether the outfit is suitable

    Args:
        trigger_type: (str) Command by the user

    Returns:
        (str) What J-Bot tells the user according to what the user asks

    '''

    # Get the information about the weather and air quality
    d = get_outside_condition()

    temp = int(d['temperature'])
    highest_temp_forecast = 0
    highest_temp_time = 0
    forecasted_weather = ""
    forecasted_time = ""
    current_w = d['weather']['detail']
    is_all_day = True
    forecasted_data = d['forecast']
    air_quality = d['air_condition']['level']

    count_equal = 0
    for f in forecasted_data:
        # Get the highest temperature for the day
        if highest_temp_forecast < f['temperature']:
            highest_temp_forecast = f['temperature']
            highest_temp_time = f['time']
        if current_w == f['weather']['detail']:
            count_equal += 1
        elif (is_all_day == True) and (current_w != f['weather']['detail']):
            forecasted_weather = f['weather']['detail']
            forecasted_time = f['time']
            is_all_day = False

    # If the highest forecasted temperature is lower than the current weather, assign current temperature as forecasted highest temperature
    if highest_temp_forecast <= temp:
        highest_temp_forecast = temp

    # Call greeting context, weather context (temperature included), air pollution context
    if trigger_type == "greeting":
        return greeting_context() + " " + weather_context(
            temp, highest_temp_forecast, highest_temp_time, forecasted_weather,
            forecasted_time, forecasted_data, current_w, is_all_day,
            count_equal) + " " + air_context(d['air_condition']['level'])

    # Call weather context
    elif trigger_type == "weather":
        return weather_context(temp, highest_temp_forecast, highest_temp_time,
                               forecasted_weather, forecasted_time,
                               forecasted_data, current_w, is_all_day,
                               count_equal)

    # Call air pollution context
    elif trigger_type == 'air pollution':
        return air_context(air_quality)

    # Check the user's outfit and respond accordingly
    elif trigger_type == 'camera':

        # Initiate the camera
        camera = Camera.instance(width=224, height=224)
        camera.start()
        image = widgets.Image(format='jpeg', width=224, height=224)
        camera_link = traitlets.dlink((camera, 'value'), (image, 'value'),
                                      transform=bgr8_to_jpeg)
        image_path = 'temp.jpg'

        # Saving the camera image locally
        with open(image_path, 'wb') as f:
            f.write(image.value)
        camera.stop()

        # Classify the upper and lower outfits of the user
        top, bot = detectClothes(image_path, clothe_model)
        print(top, bot)
        f.close()

        return recommend_clothes(highest_temp_forecast, forecasted_weather,
                                 air_quality, top, bot)
Exemplo n.º 13
0
logging.config.dictConfig(logging_config)
logger = logging.getLogger(__name__)

# init camera
logger.info('initialize camera')

camera_width = 600
camera_height = 600
model_width = 300
model_height = 300
xscale = model_width * (camera_width / model_width)
yscale = model_height * (camera_height / model_height)
#camera = Camera.instance(width=camera_width, height=camera_height, capture_width=3280, capture_height=2464)  # W = 3280 H = 2464   1920 x 1080   1280 x 720
camera = Camera.instance(
    width=camera_width,
    height=camera_height,
    capture_width=1920,
    capture_height=1080)  # W = 3280 H = 2464   1920 x 1080   1280 x 720
cat_count = 0
seconds_between_pics = 1.0
v2_coco_labels_to_capture = [16, 17, 18, 91, 92]

# create save directory

image_dir = 'dataset/cats'

# we have this "try/except" statement because these next functions can throw an error if the directories exist already
try:
    os.makedirs(image_dir)
except FileExistsError:
    logger.info('image directories not created because they already exist')
Exemplo n.º 14
0
from jetbot import Camera

width = 640
height = 480
camera = Camera.instance(width=width, height=height)

while True:
    print(camera.value[width // 2][height // 2][0])
Exemplo n.º 15
0
    # otherwsie steer towards target
    else:
        # move robot forward and steer proportional target's x-distance from center
        center = detection_center(det)
        robot.set_motors(
            float(speed_widget.value + turn_gain_widget.value * center[0]),
            float(speed_widget.value - turn_gain_widget.value * center[0]))

    # update image widget
    image_widget.value = bgr8_to_jpeg(image)


model = ObjectDetector(
    '/home/dewald/projects/frameworks/nvidia/deepstream_sdk_v4.0.2_jetson/samples/models/SSD-Mobilenet-v2/ssd_mobilenet_v2_coco.engine'
)
camera = Camera.instance(width=300, height=300)
detections = model(camera.value)
print(detections)

collision_model = torchvision.models.alexnet(pretrained=False)
collision_model.classifier[6] = torch.nn.Linear(
    collision_model.classifier[6].in_features, 2)
collision_model.load_state_dict(
    torch.load('/home/dewald/projects/jetbot/jetbot/best_model.pth'))
device = torch.device('cuda')
collision_model = collision_model.to(device)
mean = 255.0 * np.array([0.485, 0.456, 0.406])
stdev = 255.0 * np.array([0.229, 0.224, 0.225])
normalize = torchvision.transforms.Normalize(mean, stdev)

# Robot Movement
Exemplo n.º 16
0
import os
import cv2
from jetbot import Camera

dataset_path = "jetbot_dataset_960test2/"
image_folder = "rgb/"
if not os.path.exists(dataset_path + image_folder):
    os.makedirs(dataset_path + image_folder)
file = open(dataset_path + "rgb.txt", "w")

camera = Camera.instance(width=960,
                         height=540,
                         capture_width=1280,
                         capture_height=720)
iid = 0
while (True):
    img = camera.value
    cv2.imshow("test", img)

    fname = str(iid).zfill(4) + ".jpg"
    cv2.imwrite(dataset_path + image_folder + fname, img)
    # Write file
    file.write(str(iid) + " " + image_folder + fname + "\n")

    iid += 1
    k = cv2.waitKey(100)
    if k == ord('q'):
        camera.stop()
        break
Exemplo n.º 17
0
from jetson import inference

# ## General Setups

# If GPU is available, we will use it for efficiency
if torch.cuda.is_available():
    device = torch.device("cuda")
else:
    device = torch.device("cpu")

if __name__ == '__main__':
    print(device)

# Instanciate camera with defined size.
camwidth, camheight = 320, 240  #224,224
cam = Camera.instance(width=camwidth, height=camheight)

try:
    print("Camspecs are to specifications: ",
          ((cam.width == camwidth) and (cam.height == camheight)))
    print(cam.width, cam.height)
except NameError:
    pass

# Instanciate Camera
robot = Robot()
robot.stop(
)  # Sometimes the robot start driving when instantiated, so this is in case it does this.


def geometric_average(l: list):
Exemplo n.º 18
0
    x = torch.from_numpy(x).float()
    x = normalize(x)
    x = x.to(device)
    x = x[None, ...]
    return x


#カメラ起動、画面表示(pyではwidgetが使えないため代替案検討)
update_bar(1, 'start and display our camera')

import traitlets
#from IPython.display import display
#import ipywidgets.widgets as widgets
from jetbot import Camera, bgr8_to_jpeg

camera = Camera.instance(width=300, height=224, fps=15)
#image = widgets.Image(format='jpeg', width=224, height=224)
#blocked_slider = widgets.FloatSlider(description='blocked', min=0.0, max=1.0, orientation='vertical')
#camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg)
#display(widgets.HBox([image, blocked_slider]))

#robotインスタンス生成
update_bar(1, 'create our robot instance')

from jetbot import Robot

robot = Robot()
"""
#tkinter生成
import tkinter
"""
Exemplo n.º 19
0
import imutils
from PIL import Image
from jetbot import Camera, Robot
import time


face_cascade = cv2.CascadeClassifier('./data/haarcascade_frontalface_default.xml')
KNOWN_FACES_DIR = 'known_faces'
#UNKNOWN_FACES_DIR = 'unknown_faces' trengs kun for bilder
TOLERANCE = 1 #kan endres
FRAME_THICKNESS = 3
FONT_THICKNESS = 2
MODEL = 'cnn'  # default: 'hog', other one can be 'cnn' - CUDA accelerated (if available) deep-learning pretrained model

#video = cv2.VideoCapture(0)
video = Camera.instance()




# Returns (R, G, B) from name
def name_to_color(name):
    # Take 3 first letters, tolower()
    # lowercased character ord() value rage is 97 to 122, substract 97, multiply by 8
    color = [(ord(c.lower())-97)*8 for c in name[:3]]
    return color


print('Loading known faces...')
known_faces = []
known_names = []