Exemplo n.º 1
0
    def rect_min_distance(self, box1, box2):
        """
        Calculates the minimum distance between two given contours
        Args:
            box1:
            box2:
        Returns:
            Returns minimum distance between box1 and box2
        """

        x1, y1, x1b, y1b = box1
        x2, y2, x2b, y2b = box2
        left = x2b < x1
        right = x1b < x2
        bottom = y2b < y1
        top = y1b < y2
        if top and left:
            return dist((x1, y1b), (x2b, y2))
        elif left and bottom:
            return dist((x1, y1), (x2b, y2b))
        elif bottom and right:
            return dist((x1b, y1), (x2, y2b))
        elif right and top:
            return dist((x1b, y1b), (x2, y2))
        elif left:
            return x1 - x2b
        elif right:
            return x2 - x1b
        elif bottom:
            x = y1 - y2b
            return y1 - y2b
        elif top:
            return y2 - y1b
        else:  # rectangles intersect
            return 0.
def kNN(gallery_set, probe_set, meanface, eigenfaces, gallery_labels,
        probe_labels, numPCA):
    '''
	im_in : np.array((M,M))
		input image
	mean_faces : np.array
		average face from training data
	eigen_faces : np.array
		eigenfaces from training data
	numPCA : int
	'''

    Omega_in = data_projection(probe_set, numPCA, eigenfaces, meanface)
    Omega_tr = data_projection(gallery_set, numPCA, eigenfaces, meanface)

    # try to match a gallery image
    correct = 0
    start = timer()
    for i in range(0, len(probe_labels)):
        err_k = 100000
        match = 99999
        for j in range(0, len(gallery_labels)):
            if dist(Omega_in[i], Omega_tr[j]) < err_k:
                err_k = dist(Omega_in[i], Omega_tr[j])
                # print dist(Omega_in[i],Omega_tr[j])
                # print gallery_labels[j],probe_labels[i]
                match = j
        if gallery_labels[match] == probe_labels[i]:
            correct += 1
    end = timer()

    print "Prinicipal Components = {}; ttl time = {:.5f}s; Percent correct = {:.2f}%".format(
        numPCA, end - start,
        float(correct) / len(probe_labels) * 100)
    return float(correct) / len(probe_labels) * 100, end - start
Exemplo n.º 3
0
def calculate_enthusiasm(l_eye,r_eye,l_eyebrow,r_eyebrow):
    if len(l_eye)==6 and len(r_eye)==6 and len(l_eyebrow)==5 and len(r_eyebrow)==5:
        r_enthu_dist = dist(r_eye[2],r_eyebrow[3])
        l_enthu_dist = dist(l_eye[2],l_eyebrow[3])
        enthusiasm_dist = (r_enthu_dist+l_enthu_dist)/2.0
        return enthusiasm_dist
    else:
        print("Error in eye or eyebrow shape")
        return -1
Exemplo n.º 4
0
    def closest(self, item):
        best_dist = dist(self.X_train[0], item)
        best_index = 0

        for i in range(1, len(self.X_train)):
            distance = dist(self.X_train[i], item)
            if distance < best_dist:
                best_dist = distance
                best_index = i
        return self.Y_train[best_index]
Exemplo n.º 5
0
def calculate_EAR(eye):
    if len(eye)==6:
        width = dist(eye[0],eye[3])
        A = dist(eye[1],eye[5])
        B = dist(eye[2],eye[4])
        EAR = (A+B)/width
        return EAR
    else:
        print("Error in eye shape")
        return -1
Exemplo n.º 6
0
    def _shuffle(self, items, ax):
        items_count = len(items)
        ra = items[0].a
        rb = items[0].b
        n_max = 20

        motions_dist = 0
        for _ in range(n_max):
            for ip in range(items_count):
                item = items[ip]
                if not item.is_moved_enought():
                    way_to_update = random.randint(1, 3)
                    if way_to_update == 1:
                        new_p = item.try_to_move(ra, 0, 0, 0, ax, 0, ax)
                    if way_to_update == 2:
                        new_p = item.try_to_move(0, rb, 0, 0, ax, 0, ax)
                    if way_to_update == 3:
                        new_p = item.try_to_move(ra, rb, 0, 0, ax, 0, ax)

                    if self._is_valid_position(items, new_p):
                        d = dist([new_p.x, new_p.y], [item.x, item.y])
                        new_p.add_walked_dist(d)
                        motions_dist += d
                        items[ip] = new_p

            if motions_dist > items_count * ra:
                break

        return items
Exemplo n.º 7
0
 def circle(self, location):
     sqxs = range(max(0, location[0] - self.irad),
                  min(256, location[0] + self.irad + 1))
     sqys = range(max(0, location[1] - self.irad),
                  min(256, location[1] + self.irad + 1))
     square = [(x, y) for x in sqxs for y in sqys]
     return [p for p in square if dist(p, location) <= self.rad]
Exemplo n.º 8
0
def calculate_frown(l_eyebrow,r_eyebrow):
    if len(l_eyebrow)==5 and len(r_eyebrow)==5:
        frown_width = dist(l_eyebrow[0],r_eyebrow[4])
        return frown_width
    else:
        print("Error in eyebrow shape")
        return -1
    def train_star_space(self):
        train_positive_input_batches, train_positive_batch_targets, train_negative_batch_targets, train_dummy_outputs,\
             test_positive_input_batches, test_positive_batch_targets, test_negative_batch_targets, test_dummy_outputs = self.prepare_features_targets()

        self.test_positive_input_batches = test_positive_input_batches
        self.test_positive_batch_targets = test_positive_batch_targets
        self.test_negative_batch_targets = test_negative_batch_targets

        self.build_model_architecture()
        print(self.model.summary())

        self.model.fit([train_positive_input_batches, train_positive_batch_targets, train_negative_batch_targets], train_dummy_outputs, epochs = 10,\
         validation_data = ([test_positive_input_batches, test_positive_batch_targets, test_negative_batch_targets], test_dummy_outputs))

        self.target_encodings = {
            target_id:
            self.target_encoder_model.predict(np.array([target_id]))[0, 0, :]
            for target_id in range(10)
        }

        d = {}
        for i in range(10):
            for j in range(10):
                if i != j and f'{j}_{i}' not in d:
                    d[f'{i}_{j}'] = 1 - dist(self.target_encodings[i],
                                             self.target_encodings[j])

        print({k: v for k, v in sorted(d.items(), key=lambda item: item[1])})
Exemplo n.º 10
0
def calculate_mouth(mouth):
    if len(mouth)==20:
        mouth_dist = dist(mouth[14],mouth[18])
        return mouth_dist
    else:
        print("Error in mouth shape")
        return -1
Exemplo n.º 11
0
    def distance_matrix(self, gdx=None, metric='jaccard'):
        from itertools import combinations
        binary = ['jaccard', 'hamming']
        if metric in binary:
            if gdx:
                M = self.E[:, gdx]
            else:
                M = self.E[:, :]
        else:
            if gdx:
                M = self.M[:, gdx]
            else:
                M = self.M[:, :]

        k = self.M.shape[0]
        D = np.ones((k, k))
        np.fill_diagonal(D, 1)
        comb = combinations(range(k), 2)
        if metric == 'jaccard':
            dist = jaccard
        elif metric == 'hamming':
            from scipy.spatial.distance import hamming as dist
        elif metric == 'pearsonr':
            from scipy.stats import pearsonr
            dist = correlation_dist
        for (i, j) in comb:
            r = dist(M[i, :], M[j, :])
            D[i, j] = r
            D[j, i] = r

        return D
Exemplo n.º 12
0
def minimum_distance(v, w, p, tolerance=1):
#     Return minimum distance between line segment (v,w) and point p
    l2 = dist(v, w)  # i.e. |w-v|^2 -  avoid a sqrt
    if l2 == 0.0:
        return dist(p, v)

#     Consider the line extending the segment, parameterized as v + t (w - v).
#     We find projection of point p onto the line.
#     It falls where t = [(p-v) . (w-v)] / |w-v|^2
    t = np.dot((p - v) / l2, (w - v) / l2)
    if t < 0.0:
        return dist(p, v) + tolerance      # // Beyond the 'v' end of the segment
    elif t > 1.0:
        return dist(p, w) + tolerance  # // Beyond the 'w' end of the segment

    projection = v + t * (w - v) # // Projection falls on the segment
    return dist(p, projection)
Exemplo n.º 13
0
    def onclick(event):
        print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
              (event.button, event.x, event.y, event.xdata, event.ydata))

        node, d = min([(p, dist(p[:2], (event.xdata, event.ydata)))
                       for p in ts.g.nodes_iter()],
                      key=lambda x: x[1])
        print node, d
        assert ts.g.has_node(node)

        with open(policy_filename, 'a') as fout:
            print >> fout, '        {},'.format(node)
def face_detect(img, U, Omega_training, T_0, T_1):
    '''
    Returns the predicted subject if recognised
    else returns 0 for a non-face, -1 for unknown face
    '''
    I = img.flatten()
    I = I - m
    Omega_I = np.dot(U.T, I)
    I_R = np.dot(U, Omega_I)
    d_0 = dist(I_R, I)
    print('d_o is %d' % d_0)
    dist_array = [dist(Omega_I, Omega) for Omega in Omega_training]
    res = min(dist_array)
    print('d_1 is %d' % res)
    index = Img_dict[dist_array.index(res)]
    if d_0 > T_0:
        return 0
    else:
        if res > T_1:
            return -1
        return index
Exemplo n.º 15
0
 def onclick(event):
     print('button=%d, x=%d, y=%d, xdata=%f, ydata=%f' %
           (event.button, event.x, event.y, event.xdata, event.ydata))
     
     node, d = min([(p, dist(p[:2], (event.xdata, event.ydata)))
                     for p in ts.g.nodes_iter()],
                key=lambda x: x[1])
     print node, d
     assert ts.g.has_node(node)
     
     with open(policy_filename, 'a') as fout:
         print>>fout, '        {},'.format(node)
Exemplo n.º 16
0
def astar(start, goal, grid):
    openset = set()
    closedset = set()
    current = grid[start]
    openset.add(current)

    #While the open set is not empty
    while openset:
        current = min(openset, key=lambda o: o.G + o.H
                      )  # current is item with lowest F score in openset

        #If it is the item we want, retrace the path and return it
        if current.point == goal:
            path = []
            while current.parent:
                path.append(current)
                current = current.parent
            path.append(current)
            return path[::-1]

        openset.remove(current)
        closedset.add(current)

        for node in children(current.point, grid):
            if node in closedset:
                continue

            if node in openset:
                new_g = current.G + dist(current.point, node.point)
                if node.G > new_g:
                    node.G = new_g
                    node.parent = current
            else:
                node.G = current.G + dist(current.point, node.point)
                node.H = dist(node.point, goal)
                node.parent = current
                openset.add(node)

    #Throw an exception if there is no path
    raise ValueError('No Path Found')
Exemplo n.º 17
0
    def shuffle_extended_circles(self):
        if self.verbose: print(f"circles shuffeling start.")
        ranges = self.ranges
        size = self.circle_radius
        extended_circles_count = len(self.extended_circles)

        shuffled_circles = deepcopy(self.extended_circles)

        shuffle_step_count = self.shuffles_count
        print_step = int(shuffle_step_count // 10)

        for shuffle_step in range(shuffle_step_count):
            if shuffle_step % print_step == 0:
                if self.verbose:
                    print(
                        f"Percents done: {int(shuffle_step // print_step) * 10}%."
                    )
                if self.verbose: print(f"Shuffles done: {shuffle_step}")

            for i in range(extended_circles_count):
                current_circle = deepcopy(shuffled_circles[i])
                index = current_circle.pop('index', None)

                for k, v in current_circle.items():
                    di = (random.random() - 1 / 2) * size
                    new_k = v + di
                    if new_k - size < 0:
                        current_circle[k] = size
                    elif new_k + size > ranges[k]:
                        current_circle[k] = ranges[k] - size
                    else:
                        current_circle[k] = new_k

                current_circle['index'] = index
                is_intersect = False
                for j in [*range(i), *range(i + 1, extended_circles_count)]:
                    is_intersect = dist(
                        itemgetter('x', 'y')(current_circle),
                        itemgetter('x', 'y')(shuffled_circles[j])) <= 2 * size
                    if is_intersect:
                        break

                if not is_intersect:
                    shuffled_circles[i] = current_circle

        self.shuffled_extended_circles = shuffled_circles
        if self.verbose: print(f"circles shuffeling end.\n")
        return shuffled_circles
Exemplo n.º 18
0
def update_routes_decmcts(netlogo,
                          cars,
                          GRID_SIZE,
                          network,
                          comm_radius,
                          initial=True):
    #First Loop Updates Paths for the individual car
    #only update this the first time to save on overhead
    if initial:
        for car in cars:
            if car.stopped:  # and car.direction == 'east':
                route = UCTPlayGame(car, GRID_SIZE)
                car.push_route_netlogo(netlogo, route, mode='both')
    else:
        #Second Loop Communicates route to neighbors and updates its own route
        comm_rad = comm_radius  #communications radius for cars ~1.5 road lengths
        dist_array = np.zeros((len(cars), len(cars)))
        for i in range(len(cars)):
            for j in range(len(cars)):
                if i != j:
                    if dist(cars[i].location, cars[j].location) <= comm_rad:
                        dist_array[i][j] = 1
        neighbor_cars = []
        for i, car in enumerate(cars):
            if car.stopped:  # or congested:# and
                #if car is near the end, no need to communicate
                if len(car.remaining_route) <= 1:
                    route = UCTPlayGame(car, GRID_SIZE)
                    car.push_route_netlogo(netlogo, route, mode='both')
                    continue

                #cars only communicate if the sense congestions
                congested = look_ahead(
                    car, network, GRID_SIZE
                )  #checks to see if the upcoming routes are conjested
                if congested:
                    dist_cars = dist_array[i]
                    #used to find neighbors to communicate with, could also include degredation
                    neighbor_cars = [
                        cars[j] for j, distance in enumerate(dist_cars)
                        if distance != 0
                    ]
                    if len(neighbor_cars) > 0:
                        route = UCTPlayGame(car, GRID_SIZE, neighbor_cars)
                        car.push_route_netlogo(netlogo, route, mode='both')
                    else:
                        continue
Exemplo n.º 19
0
 def cam_to_raft(self, camcoord):
     '''
     For a given set of coordinates camcoord = (cam_x, cam_y) in the camera coordinate system, returns:
         - the ID of the raft whose center is the closest to camcoord
         - the corresponding coordinates in the raft frame, where raftcoord = (raft_x, raft_y) = (0,0) at the raft centre
     '''
     self.camcoord = camcoord
     self.dist2raft = {}
     for raft in self.raft_list:
         #            print(self.camcoord, self.raftcentercoord[raft])
         self.dist2raft[raft] = dist(camcoord, self.raftcentercoord[raft])
     self.raft_id = min(self.dist2raft, key=self.dist2raft.get)
     self.raftcoord = np.empty(2)
     self.raftcoord[0] = self.camcoord[0] - self.raftcentercoord[
         self.raft_id][0]
     self.raftcoord[1] = self.camcoord[1] - self.raftcentercoord[
         self.raft_id][1]
Exemplo n.º 20
0
def get_MDS(data, n_components=2, metric="euclidean"):
    """
        Calculates the Multi-Dimensional Scaling algorithm using different
        distances
    """
    # Compute the distance between every pair of points
    dist_mat = dist(data, data, metric=metric)  # N x N

    # Calculate the matrix H
    N = data.shape[0]
    H = np.eye(N) - np.ones((N, N)) / N

    # Apply double centering
    S = -0.5 * np.dot(np.dot(H, np.power(dist_mat, 2)), H)

    # Get PCA
    return get_pca(S, n_components=n_components, center_data=False)
Exemplo n.º 21
0
def solve(eqA, eqB):
    points = []
    for eqa in eqA:
        for eqb in eqB:
            a = [[1, -eqa[0]],[1, -eqb[0]]]
            b = [eqa[1],eqb[1]]
            x, y =  np.linalg.solve(a, b) 
            points += [(x,y)]
    filtered_points = []
    for a in points:
        nearest = []
        for b in points:
            if dist(a,b) < 20:
                nearest += [b]
        x, y = [ int(_) for _ in np.mean(nearest, axis=0) ]
        filtered_points += [(x, y)]
    return filtered_points
Exemplo n.º 22
0
    def evt_motion(self, evt):
        if self._mode == 'dragging':

            if evt.inaxes != self.ax:
                self.reset()
                return

            x,y = evt.xdata,evt.ydata

            if self.drag_patch is None:
                self.drag_patch = pl.Circle((x,y), radius=self.r_init, edgecolor=(1,.6,.2,.8), facecolor=(1,1,1,.2), lw=1.5)
                self.drag_pos0 = (x,y)
                self.ax.add_patch(self.drag_patch)

            else:
                dx = x-self.drag_pos0[0]
                dy = y-self.drag_pos0[1]
                dp = np.sqrt(dx**2 + dy**2)
                #dp = [dx,dy][np.argmax(np.abs([dx,dy]))]
                new_r = max(0, self.r_init+dp*self.r_per_pix)
                self.drag_patch.set_radius(new_r)

        elif self._mode == 'remove':

            if evt.inaxes != self.ax:
                return
            x,y = evt.xdata,evt.ydata

            if self.rois is None or len(self.rois) == 0:
                return
            centers = [p.center for p in self.patches]
            best = np.argmin([dist((x,y), c) for c in centers])

            self.update_patches()
            self.patches[best].set_color('red')
            self.patches[best].set_alpha(1.)
            self.fig.canvas.draw()

        else:
            return

        self.fig.canvas.draw()
Exemplo n.º 23
0
    def predict_class(self, input_image=None):

        actual_target = None
        if input_image is None and self.test_positive_input_batches is not None:
            random_idx = np.random.randint(
                0, len(self.test_positive_input_batches))
            input_image = self.test_positive_input_batches[
                random_idx, :].reshape(1, 1, 784)
            actual_target = self.test_positive_batch_targets[random_idx]

        if self.input_encoder_model:
            input_encodings = self.input_encoder_model.predict(input_image)[
                0, 0, :]

        distance_dict = {
            i: 1.0 - dist(self.target_encodings[i], input_encodings)
            for i in range(10)
        }

        return distance_dict, actual_target
def output_func(img, U, Omega_training, T_0, T_1):
    sh = img.shape
    I = img.flatten()
    I = I - m
    I_res = I.reshape(sh)
    fig, (ax1, ax2) = plt.subplots(1, 2)
    ax1.set_title('I - m face')
    im1 = ax1.imshow(I_res, plt.cm.gray)
    Omega_I = np.dot(U.T, I)
    print('PCA coefficients are \n')
    print(Omega_I)
    I_R = np.dot(U, Omega_I)
    Ir_res = I_R.reshape(sh)
    ax2.set_title('Reconstructed face I_r')
    im2 = ax2.imshow(Ir_res, plt.cm.gray)
    dist_array = [dist(Omega_I, Omega) for Omega in Omega_training]
    print('The distances are \n')
    print(dist_array)
    plt.show()
    print('\n\n\n\n\n\n')
Exemplo n.º 25
0
    def raft_to_sensor(self, raftcoord):
        '''
        For a given set of coordinates raftcoord = (raft_x, raft_y) in the raft coordinate system, returns:
            - the ID of the ccd whose center is the closest to raftcoord
            - the corresponding coordinates in the ccd frame, where (slotcoord_x, slotcoord_y) = (0,0) at the CCD centre
        '''
        self.raftcoord = raftcoord
        self.dist2sensor = {}
        for sensor in self.sensor_list:
            #            print(self.camcoord, self.raftcentercoord[raft])
            self.dist2sensor[sensor] = dist(raftcoord,
                                            self.sensorcentercoord[sensor])
        self.sensor_id = min(self.dist2sensor, key=self.dist2sensor.get)
        self.sensorcoord = np.empty(2)
        self.sensorcoord[0] = self.raftcoord[0] - self.sensorcentercoord[
            self.sensor_id][0]
        self.sensorcoord[1] = self.raftcoord[1] - self.sensorcentercoord[
            self.sensor_id][1]

        return self.sensor_id, self.sensorcoord
Exemplo n.º 26
0
    def compare_faces(self, encoding):
        # If faces encoding match, then return fid, otherwise, create one
        edists = []
        if len(self.encodings) == 0:
            return None
        else:

            for old_id, old_encoding in self.encodings.items():
                edist = dist(encoding, old_encoding)
                print(edist, old_id)
                edists.append(edist)

            mindist = min(edists)
            if mindist > float(threshold):
                return 'unknown'
            else:
                names = list(self.encodings.keys())
                minID = names[edists.index(mindist)]

            return minID
Exemplo n.º 27
0
 def converge_txt(s):
     rms = []
     val = []
     headings = ["Iter"]
     headings.extend(["Orb-" + str(i + 1) for i in range(10)])
     headings.extend(["Total"])
     for cnt in [[i, i + 1] for i in range(0, s.shape[0] - 1)]:
         tmp = [
             str(np.array(cnt) + 1).replace("[", "").replace("]",
                                                             "").replace(
                                                                 " ", "->")
         ]
         tmp.extend([
             dist([s[cnt[0]][norb]], [s[cnt[1]][norb]])[0]
             for norb in range(10)
         ])
         tmp.extend([np.sum(tmp[1:])])
         rms.append(tmp)
         val.append(tmp[1:])
     print(tabulate(rms, headers=headings))
     fprint(tabulate(rms, headers=headings))
     return (np.array(val))
Exemplo n.º 28
0
def shape_context_shape(pc,
                        xr,
                        yr,
                        groups=None,
                        sampls=None,
                        r_inner=0.125,
                        r_outer=2,
                        nbins_r=5,
                        nbins_theta=12):
    nbins = nbins_r * nbins_theta

    def get_angle(p1, p2):
        """Return angle in radians"""
        return math.atan2((p2[1] - p1[1]), (p2[0] - p1[0]))

    def compute_one(pt, points, mean_dist):

        distances = np.array([euclidean(pt, p) for p in points])
        r_array_n = distances / mean_dist

        r_bin_edges = np.logspace(np.log10(r_inner), np.log10(r_outer),
                                  nbins_r)

        r_array_q = np.zeros(len(points))
        for m in xrange(nbins_r):
            r_array_q += (r_array_n < r_bin_edges[m])

        fz = r_array_q > 0

        def _get_angles(self, x):
            result = zeros((len(x), len(x)))
            for i in xrange(len(x)):
                for j in xrange(len(x)):
                    result[i, j] = get_angle(x[i], x[j])
            return result

        theta_array = np.array([get_angle(pt, p) for p in points])
        # 2Pi shifted
        theta_array_2 = theta_array + 2 * math.pi * (theta_array < 0)
        theta_array_q = 1 + np.floor(theta_array_2 /
                                     (2 * math.pi / nbins_theta))

        sn = np.zeros((nbins_r, nbins_theta))
        for j in xrange(len(points)):
            if (fz[j]):
                sn[r_array_q[j] - 1, theta_array_q[j] - 1] += 1

        return sn.reshape(nbins)

    rsi = binary_shape(pc, xr, yr, groups)
    pixels = rsi.pixels.squeeze()
    pts = np.argwhere(pixels > 0)
    if sampls:
        pts = pts[np.random.randint(0, pts.shape[0], sampls)]
    mean_dist = dist([0, 0], [xr, yr]) / 2

    sc = np.zeros((xr, yr, nbins))

    for x in xrange(xr):
        for y in xrange(yr):
            sc[x, y, :] = compute_one(np.array([x, y]), pts, mean_dist)

    return Image.init_from_rolled_channels(sc)
    def find_target(self, frame):
        '''returns yaw, centre zero, positive right. None if no target found'''
        # Convert BGR to HSV
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
        # define range of blue color in HSV
        # HUE IS 0 to 180
        lower_orange = np.array([5, 150, 100])
        upper_orange = np.array([20, 255, 255])
        low_lower_red = np.array([2, 100, 70])
        low_upper_red = np.array([5, 150, 150])
        high_lower_red = np.array([160, 100, 30])
        high_upper_red = np.array([200, 240, 240])

        # Threshold the HSV image to get only blue colors
        lmask = cv2.inRange(hsv, low_lower_red, low_upper_red)
        hmask = cv2.inRange(hsv, high_lower_red, high_upper_red)
        mask = cv2.bitwise_or(lmask, hmask)
        # Bitwise-AND mask and original image
        res = cv2.bitwise_and(frame, frame, mask=mask)

        er_mask = cv2.erode(mask, None, iterations=self.erode_iterations)
        dl_mask = cv2.dilate(er_mask, None, iterations=self.dilate_iterations)

        # load the image, convert it to grayscale, blur it slightly,
        # and threshold it
        # find contours in the thresholded image
        cnts = cv2.findContours(dl_mask.copy(), cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
        cnts = cnts[1]
        image = frame.copy()
        # loop over the contours
        target_list = []

        for c in cnts:
            # compute the center of the contour
            M = cv2.moments(c)
            cX = int(M["m10"] / M["m00"])
            cY = int(M["m01"] / M["m00"])

            # draw the contour and center of the shape on the image
            cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
            cv2.circle(image, (cX, cY), 7, (255, 255, 255), -1)
            if c.size < self.target_size:
                cv2.putText(image, "small", (cX - 20, cY - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
            else:
                cv2.putText(image, "large", (cX - 20, cY - 20),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 255), 2)
                target_list.append([c.size, cX, cY])

        targets = np.array(target_list)
        if len(targets) == 0:
            return None
        target_idx = np.argmax(targets, axis=0)
        if len(target_idx) > 1:
            target_idx = target_idx[0]
        largest_target = [
            int(targets[target_idx][1]),
            int(targets[target_idx][2])
        ]
        #largest_target = targets.max(axis=0)
        target_x = largest_target[1]
        image_height, image_width, _ = [
            frame.shape[0], frame.shape[1], frame.shape[2]
        ]
        bottom_point = np.array([round(image_width / 2), round(image_height)])

        cv2.line(image, (int(largest_target[0]), int(largest_target[1])),
                 (int(bottom_point[0]), int(bottom_point[1])), (255, 255, 255),
                 17, -1)
        if self.display_windows:
            cv2.imshow('im_with_keypoints',
                       cv2.resize(image, dsize=(0, 0), fx=0.5, fy=0.5))
            cv2.imshow('mask', cv2.resize(mask, dsize=(0, 0), fx=0.5, fy=0.5))
        pixel_x_location = target_x - image_width / 2
        yaw_offset = pixel_x_location * self.heading_scale_factor
        z = round(dist(largest_target, bottom_point))
        x = image_height - largest_target[1]

        self.angle = np.arcsin(x / z)
        return
Exemplo n.º 30
0
def openpose_face_detector(posePtr, threshold):
    point_top_left = np.zeros(2)
    face_size = 0.0
    score = 0.0

    points_used = set()

    neckScoreAbove = posePtr[1, 2] > threshold
    headNoseScoreAbove = posePtr[0, 2] > threshold
    lEarScoreAbove = posePtr[16, 2] > threshold
    rEarScoreAbove = posePtr[17, 2] > threshold
    lEyeScoreAbove = posePtr[14, 2] > threshold
    rEyeScoreAbove = posePtr[15, 2] > threshold

    counter = 0.0

    if neckScoreAbove and headNoseScoreAbove:
        if (lEyeScoreAbove == lEarScoreAbove
                and rEyeScoreAbove == rEarScoreAbove
                and lEyeScoreAbove != rEyeScoreAbove):
            if lEyeScoreAbove:
                point_top_left += (posePtr[14, 0:2] + posePtr[16, 0:2] +
                                   posePtr[0, 0:2]) / 3.0
                face_size += 0.85 * (dist(posePtr[14, 0:2], posePtr[16, 0:2]) +
                                     dist(posePtr[0, 0:2], posePtr[16, 0:2]) +
                                     dist(posePtr[14, 0:2], posePtr[0, 0:2]))
                points_used = points_used.union([0, 14, 16])
            else:
                point_top_left += (posePtr[15, 0:2] + posePtr[17, 0:2] +
                                   posePtr[0, 0:2]) / 3.0
                face_size += 0.85 * (dist(posePtr[15, 0:2], posePtr[17, 0:2]) +
                                     dist(posePtr[0, 0:2], posePtr[17, 0:2]) +
                                     dist(posePtr[15, 0:2], posePtr[0, 0:2]))
                points_used = points_used.union([0, 15, 17])

        else:
            point_top_left += (posePtr[1, 0:2] + posePtr[0, 0:2]) / 2.0
            face_size += 2.0 * dist(posePtr[1, 0:2], posePtr[0, 0:2])
            points_used = points_used.union([0, 1])

        counter += 1.0

    if lEyeScoreAbove and rEyeScoreAbove:
        point_top_left += (posePtr[14, 0:2] + posePtr[15, 0:2]) / 2.0
        face_size += 3.0 * dist(posePtr[14, 0:2], posePtr[15, 0:2])
        counter += 1.0
        points_used = points_used.union([14, 15])

    if lEarScoreAbove and rEarScoreAbove:
        point_top_left += (posePtr[16, 0:2] + posePtr[17, 0:2]) / 2.0
        face_size += 2.0 * dist(posePtr[16, 0:2], posePtr[17, 0:2])
        counter += 1.0
        points_used = points_used.union([16, 17])

    if counter > 0:
        point_top_left /= counter
        face_size /= counter
        score = np.mean(posePtr[list(points_used), 2])

    return region.RectangleRegion(
        point_top_left[0] - face_size / 2,
        point_top_left[1] - face_size / 2,
        face_size,
        face_size,
        score,
    )
Exemplo n.º 31
0
 def is_intersect(self, other):
     self_xy = [self.x, self.y]
     other_xy = [other.x, other.y]
     return dist(self_xy, other_xy) <= self.r + other.r