Esempio n. 1
0
def calculate_transformation_kabsch(src_points, dst_points):
	"""
	Calculates the optimal rigid transformation from src_points to
	dst_points
	(regarding the least squares error)
	Parameters:
	-----------
	src_points: array
		(3,N) matrix
	dst_points: array
		(3,N) matrix
	
	Returns:
	-----------
	rotation_matrix: array
		(3,3) matrix
	
	translation_vector: array
		(3,1) matrix
	rmsd_value: float
	"""
	assert src_points.shape == dst_points.shape
	if src_points.shape[0] != 3:
		raise Exception("The input data matrix had to be transposed in order to compute transformation.")
		
	src_points = src_points.transpose()
	dst_points = dst_points.transpose()
	
	src_points_centered = src_points - rmsd.centroid(src_points)
	dst_points_centered = dst_points - rmsd.centroid(dst_points)

	rotation_matrix = rmsd.kabsch(src_points_centered, dst_points_centered)
	rmsd_value = rmsd.kabsch_rmsd(src_points_centered, dst_points_centered)

	translation_vector = rmsd.centroid(dst_points) - np.matmul(rmsd.centroid(src_points), rotation_matrix)

	return rotation_matrix.transpose(), translation_vector.transpose(), rmsd_value
Esempio n. 2
0
    def __init__(self, color):
        super(Prism, self).__init__()
        v = 8  # number of vertices

        self.rotate_flag = True
        self.beat = 0  # Helps with on_beat()

        self.vertex_rad = 20
        self.boundary = self.vertex_rad  # Radial boundary for registering a touch on a vertex

        # Generate RGB palette
        self.colors = generate_sub_palette(color, num_colors=v+3)
        self.edge_color = self.colors[-1]
        self.vertex_colors = self.colors[:-1]

        # Keep track of mouse position and time
        self.mouse_pos = Window.mouse_pos
        self.time = 0

        # Graphical elements (vertices and edges)
        self.vertices = {}
        for i in range(v):
            # Random position
            x_pos = randint(int(Window.width * 0.25), int(Window.width * 0.7))
            y_pos = randint(int(Window.height * 0.3), int(Window.height * 0.7))
            pos = x_pos, y_pos

            # Object to draw on screen
            vertex = Vertex(pos=pos, rgb=self.vertex_colors[i], rad=self.vertex_rad)

            # Store in dictionary for fast vertex lookup based on position
            self.vertices[pos] = vertex

        self.centroid = centroid(list(self.vertices.keys()))
        self.angle = 0
        self.rotate = Rotate(origin=self.centroid, angle=self.angle)

        self.original_vertices = self.vertices.copy()

        # Create a list where each element is a 2-tuple of the vertex points
        self.edges = []
        self._gen_edges(self.vertices.keys())

        self.add(PushMatrix())
        self.add(self.rotate)
        self._add_objects()
        self.add(PopMatrix())
Esempio n. 3
0
    def __init__(self, contour: Contour):
        super(ContourMeasurements, self).__init__()
        self._contour = contour
        self.area = cv2.contourArea(contour.points())
        self.centroid = utils.centroid(contour.points())
        self.contour_len = contour.len()
        self.arc_len = cv2.arcLength(contour.points(), closed=True)
        self.fitted_ellipse = FittedEllipse.fromContour(contour)

        if contour.len() >= 5:
            self.approx_points, self.approx_points_angles, self.tails, self.extreme_points = \
                ContourMeasurements.__approximate(contour.points())
        else:
            self.approx_points = ContourMeasurements.empty_points()
            self.approx_points_angles = np.empty((0, ), dtype=np.float32)
            self.tails = ContourMeasurements.empty_points(
            )  # tails - where approx contour turns on ~180deg
            self.extreme_points = ContourMeasurements.empty_points()
Esempio n. 4
0
    # read config
    try:
        stream = open('config.yml', 'r')
        config = load(stream, Loader=Loader)
        stream.close()
    except Exception as e:
        print(str(e))
        raise Exception('ERROR: Failed to load yaml file ' + 'config.yml')

    bounds = locations[config['location']] ['bounds']
    if (args.location):
        latlong = args.location.split(',')
        location = {'lat': latlong[0], 'lng': latlong[1]}
    else:
        location = utils.centroid(bounds)
    
    if (args.find):
        if (not config['find'][criterionName].get('include')):
            config['find'][criterionName]['include'] = {}
        if (not config['find'][criterionName].get('exclude')):
            config['find'][criterionName]['exclude'] = {}

        data = {}
        allData = []
        if (args.cached):
            try:
                stream = open(criterionName + '.all.yml', 'r')
                origAllData = load(stream, Loader=Loader)
                stream.close()
            except Exception as e:
Esempio n. 5
0
        stream.close()
    except Exception as e:
        print(str(e))
        raise Exception('ERROR: Failed to load yaml file ' + 'config.yml')

    location = locations[config['location']] ['location']
    bounds = locations[config['location']] ['bounds']

    if (args.geocode):
        #print('geocode location ' + args.geocode)
        geocode = gmaps.geocode(args.geocode);
        data = {}
        data[args.geocode] = {}
        data[args.geocode]['location'] = geocode[0]['geometry']['location']
        data[args.geocode]['bounds'] = geocode[0]['geometry']['bounds']
        data[args.geocode]['centroid'] = utils.centroid(data[args.geocode]['bounds'])
        print('data:')
        print(dump(data, default_flow_style=False, Dumper=Dumper))

    elif (args.eval):
        # initialize
        funcRecord = config['evaluation']['otherFunctions']['final']
        if (args.func):
            funcRecord = config['evaluation']['value'].get(args.func)
            if (not funcRecord):
                funcRecord = config['evaluation']['otherFunctions'].get(args.func)
            else:
                funcRecord['module'] = args.func
            if (not funcRecord):
                print('ERROR: -func does not refer to a valid function in the config file')
                sys.exit(1)
Esempio n. 6
0
c = 0
f=open(dataset)
for l in f:
  if c < 1:
    c+=1
    continue
  else:
    fields=l.rstrip('\n').split('\t')
    nonce = fields[0]
    sentence = fields[1].replace("___","").split()
    print("--")
    print(nonce)
    print("SENTENCE:",sentence)

  if nonce in dm_dict:
    nonce_v = utils.centroid(dm_dict, sentence)
    nns = utils.sim_to_matrix(dm_dict, nonce_v, len(dm_dict)) 
    print("NEAREST NEIGHBOURS:",nns[:10])

    rr = 0
    n = 1
    for nn in nns:
      if nn == nonce:
        rr = n
      else:
        n+=1

    if rr != 0:
      mrr+=float(1)/float(rr)	
    print(rr,mrr)
    c+=1
Esempio n. 7
0
def task31():

    pkl_path = "boxesScores.pkl"
    video_path = '/home/mar/Desktop/M6/Lab1/AICity_data/train/S03/c010/vdo.avi'
    gt_path = 'ai_challenge_s03_c010-full_annotation.xml'
    threshold = 0.6  # minimum iou to consider the tracking between consecutive frames
    kill_time = 90  # nº of frames to close the track of an object
    video = False
    showVid = True
    compute_score = True

    # Get the bboxes
    frame_bboxes = []
    with (open(pkl_path, "rb")) as openfile:
        while True:
            try:
                frame_bboxes.append(pickle.load(openfile))
            except EOFError:
                break
    frame_bboxes = frame_bboxes[0]
    # correct the data to the desired format
    aux_frame_boxes = []
    for frame_b in frame_bboxes:
        auxiliar,_ = zip(*frame_b)
        aux_frame_boxes.append(list(auxiliar))
    frame_bboxes = aux_frame_boxes

    # Once we have done the detection we can start with the tracking
    cap = cv2.VideoCapture(video_path)
    previous_frame = cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY)
    bbox_per_frame = []
    id_per_frame = []
    frame = frame_bboxes[0]  # load the bbox for the first frame
    # Since we evaluate the current frame and the consecutive, we loop for range - 1
    for Nframe in trange(len(frame_bboxes) - 1,desc="Tracking"):
        next_frame = frame_bboxes[Nframe + 1]
        current_frame = cv2.cvtColor(cap.read()[1], cv2.COLOR_BGR2GRAY)

        # apply optical flow to improve the bounding box and get better iou with the following frame
        # predict flow with block matching
        blockSize = 16
        searchArea = 96
        quantStep = 16
        method = 'cv2.TM_CCORR_NORMED'
        predicted_flow = opticalFlow.compute_block_matching(previous_frame, current_frame, 'backward', searchArea, blockSize,
                                                method, quantStep)

        # assign a new ID to each unassigned bbox
        for i in range(len(frame)):
            new_bbox = frame[i]

            # append the bbox to the list
            bbox_per_id = []
            bbox_per_id.append(list(new_bbox))
            bbox_per_frame.append(bbox_per_id)
            # append the id to the list
            index_per_id = []
            index_per_id.append(Nframe)
            id_per_frame.append(index_per_id)

        # we loop for each track and we compute the iou with each detection of the next frame
        for id in range(len(bbox_per_frame)):
            length = len(bbox_per_frame[id])
            bbox_per_id = bbox_per_frame[id]  # bboxes of a track
            bbox1 = bbox_per_id[length - 1]  # last bbox stored of the track
            index_per_id = id_per_frame[id]  # list of frames where the track appears

            vectorU = predicted_flow[int(bbox1[1]):int(bbox1[3]),int(bbox1[0]):int(bbox1[2]),0]
            vectorV = predicted_flow[int(bbox1[1]):int(bbox1[3]),int(bbox1[0]):int(bbox1[2]),1]
            dx = vectorU.mean()
            dy = vectorV.mean()
            # apply movemement to the bbox
            new_bbox1 = list(np.zeros(4))
            new_bbox1[0] = bbox1[0] + dx
            new_bbox1[2] = bbox1[2] + dx
            new_bbox1[1] = bbox1[1] + dy
            new_bbox1[3] = bbox1[3] + dy

            # don't do anything if the track is closed
            if index_per_id[-1] == -1:
                continue

            # get the list of ious, one with each detection of the next frame
            iou_list = []
            for detections in range(len(next_frame)):
                bbox2 = next_frame[detections]  # detection of the next frame
                iou_list.append(utils.iou(np.array(new_bbox1), bbox2))

            # break the loop if there are no more bboxes in the frame to track
            if len(next_frame) == 0:
                # kill_time control
                not_in_scene = Nframe - index_per_id[-1]  # nº of frames that we don't track this object
                if not_in_scene > kill_time:  # if it surpasses the kill_time, close the track by adding a -1
                    index_per_id.append(-1)
                break

            # assign the bbox to the closest track
            best_iou = max(iou_list)
            # if the mas iou is lower than 0.5, we assume that it doesn't have a correspondence
            if best_iou > threshold:
                best_detection = [j for j, k in enumerate(iou_list) if k == best_iou]
                best_detection = best_detection[0]

                # append to the list the bbox of the next frame
                bbox_per_id.append(list(next_frame[best_detection]))
                index_per_id.append(Nframe + 1)

                # we delete the detection from the list in order to speed up the following comparisons
                del next_frame[best_detection]
            else:
                # kill_time control
                not_in_scene = Nframe - index_per_id[-1]   # nº of frames that we don't track this object
                if not_in_scene > kill_time:  # if it surpasses the kill_time, close the track by adding a -1
                    index_per_id.append(-1)

        frame = next_frame  # the next frame will be the current
        previous_frame = current_frame  # update the frame for next iteration

    if video:
        # Generate colors for each track
        id_colors = []
        for i in range(len(id_per_frame)):
            color = list(np.random.choice(range(256), size=3))
            id_colors.append(color)

        # Define the codec and create VideoWriter object
        vidCapture = cv2.VideoCapture(video_path)
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('task2_1.avi', fourcc, 10.0, (1920,  1080))
        # for each frame draw rectangles to the detected bboxes
        for i in trange(len(frame_bboxes),desc="Video"):
            vidCapture.set(cv2.CAP_PROP_POS_FRAMES, i)
            im = vidCapture.read()[1]
            for id in range(len(id_per_frame)):
                ids = id_per_frame[id]
                if i in ids:
                    id_index = ids.index(i)
                    bbox = bbox_per_frame[id][id_index]
                    color = id_colors[id]
                    cv2.rectangle(im, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),
                                  (int(color[0]), int(color[1]), int(color[2])), 2)
                    cv2.putText(im, 'ID: ' + str(id), (int(bbox[0]), int(bbox[1]) - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.8, (int(color[0]), int(color[1]), int(color[2])), 2)
            if showVid:
                cv2.imshow('Video', im)
            out.write(im)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        vidCapture.release()
        out.release()
        cv2.destroyAllWindows()

    if compute_score:
        # Load gt for plot
        reader = ReadData(gt_path)
        gt, num_iter = reader.getGTfromXML()

        # init accumulator
        acc = mm.MOTAccumulator(auto_id=True)

        # Loop for all frames
        for Nframe in trange(len(frame_bboxes),desc="Score"):

            # get the ids of the tracks from the ground truth at this frame
            gt_list = [item[1] for item in gt if item[0] == Nframe]
            gt_list = np.unique(gt_list)

            # get the ids of the detected tracks at this frame
            pred_list = []
            for ID in range(len(id_per_frame)):
                aux = np.where(np.array(id_per_frame[ID]) == Nframe)[0]
                if len(aux) > 0:
                    pred_list.append(int(ID))

            # compute the distance for each pair
            distances = []
            for i in range(len(gt_list)):
                dist = []
                # compute the ground truth bbox
                bboxGT = gt_list[i]
                bboxGT = [item[3:7] for item in gt if (item[0] == Nframe and item[1] == bboxGT)]
                bboxGT = list(bboxGT[0])
                # compute centroid GT
                centerGT = utils.centroid(bboxGT)
                for j in range(len(pred_list)):
                    # compute the predicted bbox
                    bboxPR = pred_list[j]
                    aux_id = id_per_frame[bboxPR].index(Nframe)
                    bboxPR = bbox_per_frame[bboxPR][aux_id]
                    # compute centroid PR
                    centerPR = utils.centroid(bboxPR)
                    d = utils.euclid_dist(centerGT, centerPR)  # euclidean distance
                    dist.append(d)
                distances.append(dist)

            # update the accumulator
            acc.update(gt_list, pred_list, distances)

        # Compute and show the final metric results
        mh = mm.metrics.create()
        summary = mh.compute(acc, metrics=['idf1'], name='IDF1:')
        strsummary = mm.io.render_summary(summary, formatters={'idf1': '{:.2%}'.format}, namemap={'idf1': 'idf1'})
        print(strsummary)
Esempio n. 8
0
 def __sort_points(self):
     centroid = utils.centroid(self.points)
     self.points.sort(key=lambda p: utils.polar_angle(centroid, p))