Beispiel #1
0
 def __init__(self, data, max_partitions=None, k=None):
     """
     :type data: pyspark.RDD
     :param data: pyspark RDD (key, k-dim vector like)
     :type max_partitions: int
     :param max_partitions: maximum number of partition to split into
     :type k: int
     :param k: dimensionality of the data
     Split a given data set into approximately equal sized partition (if max_partitions
     is a power of 2 ** k) using binary trees
     """
     self.k = int(k) if k is not None else len(data.first()[1])
     self.max_partitions = int(
         max_partitions) if max_partitions is not None else 4**self.k
     current_axis = 0
     data.cache()
     box = data.aggregate(BoundingBox(k=self.k), lambda total,
                          (_, v): total.union(BoundingBox(v)),
                          lambda total, v: total.union(v))
     first_partition = data.map(lambda (key, value): ((key, 0), value))
     self._create_partitions(first_partition, box)
     self.result = data.context.emptyRDD()
     for partition in self.partitions.itervalues():
         if self.result is None:
             self.result = partition
         else:
             self.result = self.result.union(partition)
Beispiel #2
0
def parse_object(osm_object):
    object_type = 0
    if type(osm_object) == osm.osm.Node:
        inserted = False
        for bbox in bboxes:
            if bbox.__contains__(osm_object):
                inserted = True
            else:
                if bbox.distance_to_osm_object(osm_object) < bbox.get_merge_distance():
                    bbox.insert_object(osm_object)
                    inserted = True

        if not inserted:
            # Some tools, like mapproxy-seed, doesnt want to work with polygons that have all points at the same spot
            bbox = BoundingBox([float(osm_object.attribs['lon']) + polygonMinSize,
                                float(osm_object.attribs['lat']) - polygonMinSize],
                               [float(osm_object.attribs['lon']),
                                float(osm_object.attribs['lat'])], mergeDistance, mergePercentage)
            bboxes.append(bbox)
        object_type = 1
    if type(osm_object) == osm.osm.Way:
        object_type = 2
    if type(osm_object) == osm.osm.Relation:
        object_type = 3
    return object_type
Beispiel #3
0
 def mouseReleaseEvent(self, event):
     if not self.show_reticle and event.button() == Qt.LeftButton:
         return
     if event.button() == Qt.LeftButton:
         if self.bbox_label is not None:
             self.end_pt = event.pos()
             rect = self.pt2rect(self.start_pt, self.end_pt)
             if rect.width() > 5 and rect.height() > 5:
                 if self.is_new_tube:
                     if self.bboxes['current_tube'] is not None:
                         self.bboxes['other_tubes'].append(
                             self.bboxes['current_tube'])
                     self.is_new_tube = False
                 bbox = BoundingBox.from_qrect(rect, self.bbox_label, 1)
                 self.bboxes['current_tube'] = bbox
                 self.bbox_added.emit(self.proj_to_real_img(bbox))
         self.update()
     elif event.button() == Qt.RightButton:
         if not self.mouse_down:
             bbox = self.bboxes['current_tube']
             if (bbox is not None and
                     bbox.contain(event.pos().x(), event.pos().y())):
                 self.bboxes['current_tube'] = None
                 self.bbox_deleted.emit()
             self.update()
     self.mouse_down = False
Beispiel #4
0
 def from_dict(tube_dict):
     tube = Tube(tube_dict['id'], tube_dict['label'],
                 tube_dict['start'], tube_dict['end'])
     tube.bboxes = []
     if 'bboxes' in tube_dict:
         for bbox in tube_dict['bboxes']:
             tube.bboxes.append(
                 BoundingBox(tube.label, bbox['src'], *bbox['bbox']))
     return tube
Beispiel #5
0
 def update(self, frame):
     self.track_num += 1
     score = super(Tracker, self).update(frame.raw_img)
     rect = super(Tracker, self).get_position()
     l = int(rect.left())
     r = int(rect.right())
     t = int(rect.top())
     b = int(rect.bottom())
     self.bbox = BoundingBox(self.label, 0, l, t, r - l, b - t)
     return (self.bbox, score)
Beispiel #6
0
    def run_images(self, images, cutoff=0.05):
        """
        Run the neural net on a batch of input images.

        Inputs:
            images: batch of pictures as a [b, X, Y, C] numpy array.
                values between 0 and 255 (uint8 range)
            cutoff: value to clip boxes at.

        Returns:
            Array of BoundingBox objects representing the found boxes.
        """
        k = p.ANCHOR_COUNT
        gs = p.GRID_SIZE

        batch_size = 1
        start_time = time.time()
        activations, deltas, gammas, classes, chosen_anchor = \
            self.sess.run(self.all_out,
                          feed_dict={self.inp_batch: images})  # ,
        # self.do: 1.0})
        print('Took %f seconds!' % (time.time() - start_time))

        gammas = np.reshape(gammas, [-1, gs**2 * k])
        chosen_anchor = np.reshape(chosen_anchor, [-1, gs**2])
        deltas = np.reshape(deltas, [-1, gs**2 * k, 4])
        classes = np.reshape(classes, [-1, gs**2 * k, p.OUT_CLASSES])
        class_numbers = np.argmax(classes, axis=2)

        box_list = []
        anchors = u.create_anchors(gs)

        for ib in range(batch_size):
            boxes = u.delta_to_box(deltas[ib], anchors)
            nms_indices = tf.image.non_max_suppression(u.trans_boxes(boxes),
                                                       gammas[ib],
                                                       5,
                                                       iou_threshold=0.0) \
                            .eval(session=self.sess)

            selected_boxes = boxes[nms_indices]
            selected_gamma = gammas[ib, nms_indices]
            selected_class = class_numbers[ib, nms_indices]
            selected_class_scores = classes[ib, nms_indices]

            for i, box in enumerate(selected_boxes):
                sm_scores = u.softmax(selected_class_scores[i])
                conf = selected_gamma[i] * sm_scores[selected_class[i]]
                if conf > cutoff:
                    print(conf)
                    box_list.append(
                        BoundingBox(u.trans_boxes(box), conf,
                                    selected_class[i]))

        return box_list
Beispiel #7
0
 def display(self, pixmap):
     self.scale_ratio = max(pixmap.width() / self.width(),
                            pixmap.height() / self.height())
     scaled_pixmap = pixmap.scaled(self.width() - 2, self.height() - 2,
                                   Qt.KeepAspectRatio)
     x = int((self.width() - scaled_pixmap.width()) / 2)
     y = int((self.height() - scaled_pixmap.height()) / 2)
     self.img_region = BoundingBox.from_qrect(
         QRect(QPoint(x, y), scaled_pixmap.size()))
     self.setPixmap(scaled_pixmap)
     self.update()
Beispiel #8
0
 def __build_windows(self, outputs: np.ndarray):
   windows = []
   for size, values in zip(self.window_size, outputs):
     values = np.squeeze(values)
     rows, cols = values.shape
     for i in range(rows):
       for j in range(cols):
         if values[i, j] > self.threshold:
           windows.append((
             BoundingBox(j * size * self.window_step,
                         i * size * self.window_step,
                         size * INPUT_WIDTH,
                         size * INPUT_HEIGHT),
             size,
             values[i, j]
           ))
   return windows
    def find_dogs(self, image_path: pathlib.Path) -> None:
        """
    Search for dogs in the specified image using a convolutional implementation of sliding windows.

    :param image_path: Path to the image to evaluate.
    """
        # Loads image
        self.image = Image.open(image_path)
        # Pre-computes approximately the total number of windows
        total_windows = sum([
            math.ceil((self.image.width - int(INPUT_WIDTH * size) + 1) /
                      self.window_step) * math.ceil(
                          (self.image.height - int(INPUT_HEIGHT * size) + 1) /
                          self.window_step) for size in self.window_size
        ])
        # Goes through every window detecting if there is a dog in the window
        self.__setup_progress(total_windows)
        windows = []
        for size in self.window_size:
            resized_image = np.array(
                self.image.resize((int(
                    self.image.width / size), int(self.image.height / size)),
                                  resample=1))
            height, width, _ = resized_image.shape
            for i in range(0, height - INPUT_HEIGHT + 1,
                           max(1, int(self.window_step / size))):
                for j in range(0, width - INPUT_WIDTH + 1,
                               max(1, int(self.window_step / size))):
                    cropped_image = resized_image[i:i + INPUT_HEIGHT,
                                                  j:j + INPUT_WIDTH, :]
                    probability = self.model.predict(
                        self.preprocess_fnc(cropped_image))[0][0]
                    # Window is consider only when the probability is higher than some threshold
                    if probability >= self.threshold:
                        windows.append((BoundingBox(j * size, i * size,
                                                    size * INPUT_WIDTH,
                                                    size * INPUT_HEIGHT), size,
                                        probability))
                    self.__update_progress()
        # Reduces the number of windows detected
        self.dogs = self.__filter_windows(windows)
        # Plots final windows
        self.__plot_windows(self.dogs)
Beispiel #10
0
    def run(self):
        self.centers = []
        bboxes = []

        self.connection.write(struct.pack('<c', b'd'))
        self.connection.flush()  #Send b'c' to NNServer to request data

        box_count = struct.unpack('<L',
                                  self.connection.read(
                                      struct.calcsize('<L')))[0]
        self.data_stream.seek(0)

        for i in range(box_count):
            data = struct.unpack(
                '<ffffff', self.connection.read(struct.calcsize('<ffffff')))
            coords = (data[0], data[1], data[2], data[3])
            classification = data[4]
            confidence = data[5]
            bboxes.append(BoundingBox(coords, confidence, classification))
        self.data_stream.seek(0)

        self.centers = self.box_to_center(bboxes)
        '''except:
Beispiel #11
0
def test_start_inside_box():
    bbox = BB(P(0, 0, 0), P(1, 1, 1))
    assert bbox.is_hit(R(P(0.5, 0.5, 0.5), V(1, 1, 1)))
Beispiel #12
0
def test_ray_rides_edge_of_bbox():
    bbox = BB(P(0, 0, 0), P(1, 1, 1))
    assert bbox.is_hit(R(P(0, 0, 0), V(1, 0, 0)))
Beispiel #13
0
def test_reject_ray_starting_on_edge_and_moving_away():
    # t0 < epsilon
    bbox = BB(P(0, 0, 0), P(1, 1, 1))
    assert not bbox.is_hit(R(P(0, 0, 0), V(-1, -1, -1)))
Beispiel #14
0
 def track(self, frame):
     frame_rect = BoundingBox(None, 0, 0, 0, self.video.width,
                              self.video.height)
     bbox, score = self.tracker.update(frame)
     bbox = bbox.intersected(frame_rect)
     return bbox
Beispiel #15
0
def test_ray_parallel_to_bbox():
    bbox = BB(P(1, 1, 1), P(2, 2, 2))
    assert not bbox.is_hit(R(P(0, 0, 0), V(1, 0, 0)))
    assert not bbox.is_hit(R(P(0, 0, 0), V(-1, 0, 0)))
Beispiel #16
0
def test_bbox_basic_hit():
    bbox = BB(P(1, 1, 1), P(2, 2, 2))
    assert bbox.is_hit(R(P(0, 0, 0), V(1, 1, 1)))
Beispiel #17
0
def test_bbox_basic_miss():
    bbox = BB(P(1, 1, 1), P(2, 2, 2))
    assert not bbox.is_hit(R(P(0, 0, 0), V(-1, 1, 1)))
    assert not bbox.is_hit(R(P(0, 0, 0), V(1, -1, 1)))
    assert not bbox.is_hit(R(P(0, 0, 0), V(1, 1, -1)))
Beispiel #18
0
def test_hit_box_behind_ray():
    bbox = BB(P(1, 1, 1), P(2, 2, 2))
    assert not bbox.is_hit(R(P(0, 0, 0), V(-1, -1, -1)))