Beispiel #1
0
    def __init__(self):
        self.name = "YOLO-Recogniser"

        self.log("Loading weights...")
        self.model = lightnet.load("yolo")

        self.log("Created")
Beispiel #2
0
    def load_resources():
        """Loads in YoloV2 model

        Returns:
            model: YoloV2 model
        """
        app.logger.debug('Loading yolo model')

        return lightnet.load(Yolo.MODEL)
 def __init__(self):
     self.cap = cv2.VideoCapture(camera_port)
     self.model = lightnet.load('yolo')
     self.objects = []
     self.es_dict = {
         "bottle": "una botella",
         'keyboard': 'un teclado',
         'diningtable': 'una mesa',
         'cup': 'una taza',
         'laptop': 'un portatil'
     }
Beispiel #4
0
 def __init__(self):
     self.cap = cv2.VideoCapture(camera_port)
     self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
     self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
     self.cap.set(cv2.CAP_PROP_FPS, 10)
     self.model = lightnet.load('yolo')
     self.objects = []
     self.es_dict = {  "bottle" : "una botella" , \
          'keyboard':'un teclado' , \
          'diningtable':'una mesa' , \
          'cup':'una taza' , \
          'laptop':'un portatil' , \
          'wine glass': 'una copa' }
 def __init__(self, pPedidos, capture):
     # Constant values
     self.videoCapture = capture
     self.cameraPort = 0
     self.img_file_path = "data/img.jpg"
     self.boxes = 0
     self.beverage_list = ['bottle', 'vase', 'cup', 'wine glass']
     self.beverage_dict = {  'person':["persona"], \
           'bottle':["gaseosa", "cerveza"] , \
           'cell phone':["celular"], \
           'vase':["agua"] , \
           'cup':["cafe","café","tinto","te","té"] , \
           'wine glass':["copa de vino","vino"]}
     # Variables
     self.pedidos = pPedidos
     self.available_drinks = []
     self.model = lightnet.load('yolo')
 def __init__(self):
     self.txt_line = ""
     self.cap = cv2.VideoCapture(camera_port)
     self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
     self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
     self.cap.set(cv2.CAP_PROP_FPS, 30)
     self.bridge = CvBridge()
     self.model = lightnet.load('yolo')
     self.objects = []
     self.es_dict = {     'bottle' : 'botella' , \
     'keyboard':'teclado' , \
     'diningtable':'mesa' , \
     'cup':'taza' , \
     'vase':'vaso' , \
     'laptop':'portatil' , \
     'wine glass': 'copa' }
     self.frame = None
     rospy.wait_for_service("sIA_take_picture")
     self.takePicture = rospy.ServiceProxy("sIA_take_picture", TakePicture)
     self.boxes = []
     self.dists = []
Beispiel #7
0
#!/usr/bin/python

# OBJECT RECOGNITION
#   by tHE iNCREDIBLE mACHINE
#
# A nice script to test in

import lightnet

print("Loading weights...")
model = lightnet.load("yolo")
print("Loading image...")
image = lightnet.Image.from_bytes(open('/VirtualShare/dog.jpg', 'rb').read())
print("Classifying...")
boxes = model(image)
print(boxes)
print("Done.")
def plot_trajectory(_c, im):
    for t in _c.keys():
        x = []
        y = []
        for i, j in _c[t]:
            x.append(i)
            y.append(j)
        plt.plot(x, y, zorder=1)

    plt.imshow(im, zorder=0)
    plt.axis([0, 238, 158, 0])
    plt.show()


model = lightnet.load('yolo')

start = time.time()

collect = {}

# for dir in os.scandir(base_path):
prev_arr = []
for i in range(200):
    fname = str(i+1).zfill(3) + '.tif'
    image, size, im_p = get_jpg(fname)
    tmp = model(image)
    result = []

    for i, e in enumerate(tmp):
        result.append([i, e[-1][0], e[-1][1]])
Beispiel #9
0
 def __init__(self):
     self.model = lightnet.load('yolo')
Beispiel #10
0
def image_test(dataset, lightnet_model, source=None, api=None, exclude=None):
    """
    Test Prodigy's image annotation interface with a YOLOv2 model loaded
    via LightNet. Requires the LightNet library to be installed. The recipe
    will find objects in the images, and create a task for each object.
    """
    log("RECIPE: Starting recipe image.test", locals())
    try:
        import lightnet
    except ImportError:
        prints("Can't find LightNet", "In order to use this recipe, you "
               "need to have LightNet installed (currently compatible with "
               "Mac and Linux): pip install lightnet. For more details, see: "
               "https://github.com/explosion/lightnet",
               error=True,
               exits=1)

    def get_image_stream(model, stream, thresh=0.5):
        for eg in stream:
            if not eg['image'].startswith('data'):
                msg = "Expected base64-encoded data URI, but got: '{}'."
                raise ValueError(msg.format(eg['image'][:100]))
            image = lightnet.Image.from_bytes(b64_uri_to_bytes(eg['image']))
            boxes = [b for b in model(image, thresh=thresh) if b[2] >= thresh]
            eg['width'] = image.width
            eg['height'] = image.height
            eg['spans'] = [get_span(box) for box in boxes]
            for i in range(len(eg['spans'])):
                task = copy.deepcopy(eg)
                task['spans'][i]['hidden'] = False
                task = set_hashes(task, overwrite=True)
                score = task['spans'][i]['score']
                task['score'] = score
                yield task

    def get_span(box, hidden=True):
        class_id, name, prob, abs_points = box
        name = str(name, 'utf8') if not isinstance(name, str) else name
        x, y, w, h = abs_points
        rel_points = [[x - w / 2, y - h / 2], [x - w / 2, y + h / 2],
                      [x + w / 2, y + h / 2], [x + w / 2, y - h / 2]]
        return {
            'score': prob,
            'label': name,
            'label_id': class_id,
            'points': rel_points,
            'center': [abs_points[0], abs_points[1]],
            'hidden': hidden
        }

    model = lightnet.load(lightnet_model)
    log("RECIPE: Loaded LightNet model {}".format(lightnet_model))
    stream = get_stream(source, api=api, loader='images', input_key='image')
    stream = fetch_images(stream)

    def free_lighnet(ctrl):
        nonlocal model
        del model

    return {
        'view_id': 'image',
        'dataset': dataset,
        'stream': get_image_stream(model, stream),
        'exclude': exclude,
        'on_exit': free_lighnet
    }