예제 #1
0
    def Cast_Votes(self, results, distances, patch_real_centers):
        self.votes = {
            'Weigth': [],
            'RealPosition': [],
            'ProjectedPoint': [],
            'Orientation': [],
            'Obj_id': []
        }

        for patch, neighboor in zip(*np.where(
                distances <= self.Neighboors_distance_threshold)):
            reading = self.CodeBookData[results[patch, neighboor]]
            self.votes['RealPosition'].extend(reading['Translation'] +
                                              patch_real_centers[patch])
            self.votes['Orientation'].extend(reading['euler_angles'])
            self.votes['Obj_id'].append(reading['obj_ids'])
            #weight = np.exp(-distances[patch,neighboor])                               #from original papper
            #Modificacao
            ratio = self.Neighboors_distance_threshold / np.abs(
                np.log(0.0001))  #Votos no threshold tem weight 0.001
            weight = np.exp(
                -distances[patch, neighboor] /
                ratio)  #tentativa de dar mais peso a distancia entre features
            self.votes['Weigth'].append(weight)

        #Project points
        if len(self.votes['RealPosition']) > 0:
            self.votes['ProjectedPoint'] = pat.from_3D_to_image(
                points=self.votes['RealPosition'], Intrins=self.Intrins)
예제 #2
0
    def Cast_Votes(self, Positions, Orientations):
        self.votes_regist = {
            'Position': [],
            'Orientation': [],
            'Projection': []
        }

        self.votes_regist['Position'] = Positions
        self.votes_regist['Orientation'] = Orientations
        self.votes_regist['Projection'] = pat.from_3D_to_image(
            points=Positions, Intrins=self.Intrins)[:, :2]
예제 #3
0
    def Extract_Features(self, RGBDimage):
        #Extract Patchs from Image
        Patches, patch_centers, patch_real = pat.patch_sampling(
            RGBD=RGBDimage,
            metric=self.patch_metric_size,
            spacing=self.patch_spacing,
            Intrins=self.Intrins,
            Extrins=self.Extrins,
            Workspace=self.Workspace)
        if not np.any(Patches):  #In case there are no patches extracted
            return [], [], []

        #convert patches to features, with CAE_Net()
        Patches = self.CAE_Net.wrap_input(Patches)
        features = self.CAE_Net.encode(Patches)
        features = self.CAE_Net.unwrap_features(features)

        return features, patch_centers, patch_real
예제 #4
0
for n, img in enumerate(Names):
    #rgb
    bgr = cv2.imread(directory + '/' + img + '_crop.png', -1)
    b, g, r = cv2.split(bgr)
    rgb = cv2.merge([r, g, b])
    #depth
    depth = cv2.imread(directory + '/' + img + '_depthcrop.png', -1)
    depth = np.expand_dims(depth, axis=2)
    #rgbD
    RGBD = np.concatenate((rgb, depth), axis=2)

    #Extract Patches
    Patches, patch_centers, patch_real = pat.patch_sampling(
        RGBD=RGBD,
        metric=50,
        spacing=10,
        Intrins=Intrinsic,
        Extrins=Extrinsic,
        Workspace=WorkSpace)

    #===============================================================
    #---------------------- Visualize ------------------------------
    #===============================================================
    pat.Visualize_Patches(axes=axes,
                          rgb_img=rgb,
                          Patches=Patches,
                          patch_centers=patch_centers)

    #only run first 3 images
    if n % run_times == run_times - 1:
        print 'RUNTIMEs break'
예제 #5
0
			bgr = cv2.imread(sample + '_crop.png',-1)
			#convert bgr to rgb
			b,g,r = cv2.split(bgr)       # get b,g,r
			rgb = cv2.merge([r,g,b])     # switch it to rgb
			
			#get from dataset a depth sample
			depth = cv2.imread(sample + '_depthcrop.png',-1)
			depth = np.expand_dims(depth,axis=2)

			#compose rgbd image
			RGBD = np.concatenate((rgb,depth),axis=2)
			
			#generate Batch of input patches
			Patches, patch_centers, patch_real = pat.patch_sampling(RGBD = RGBD,
																	metric = 50,
																	spacing = 10,
																	Intrins = [],
																	Extrins = [],
																	Workspace = [])
			if not np.any(Patches):
				#In case there are no patches extracted
				print 'No Patches:', i, sample,
				continue

			#wrap input
			inputs = CAE.wrap_input(Patches)


			#Feed CAE_Net
			outputs = CAE(inputs)
			#calculate loss
			loss = loss_fn(outputs,inputs)
예제 #6
0
    tmp_depth = copy.deepcopy(Kinect.imgDepth)  #copy
    tmp_rgbd = Kinect.get_RGBD()

    #Estimate Objects in image------------------------------------
    start = time.time()
    position_predictions, rotation_predictions = Detector.Estimate_Objects_6DOF(
        RGBDimage=tmp_rgbd)
    if (not np.any(position_predictions)) or (
            not np.any(rotation_predictions)):
        #In case there are no predictions
        print 'Bad sample!'
        continue
    end = time.time()

    #project positions to image:
    projected = pat.from_3D_to_image(position_predictions)
    #Transform to Baxter referencial:
    Baxter_pred = pat.from_CAM_to_WS(points_on_CAM=position_predictions,
                                     CAM_on_WS=Detector.Extrins)

    #===============================================================
    #----------------------- Visualize -----------------------------
    #===============================================================
    #AXES 1-----------------------------
    axes1.set_title('RGB image')
    axes1.axis('off')
    axes1.imshow(tmp_rgb)
    for proj in projected:
        sct1 = axes1.scatter(proj[0], proj[1])

    #AXES 2-----------------------------
예제 #7
0
            continue
    #------------------------------
    location = dataset + '/' + directory + '/'
    ax3d.view_init(0, 0)
    since = time.time()
    for i, img in enumerate(sorted(os.listdir(location + 'rgb'))):

        #get sample
        rgbd, Translation, Rotation = get_sample(location, img)
        if (rgbd == []) or (Translation == []) or (Rotation == []):
            print 'error for:', location + '/' + img
            continue

        #Extract Patches
        Patches, patch_centers, patch_real = pat.patch_sampling(RGBD=rgbd,
                                                                metric=50,
                                                                spacing=10)
        if not np.any(Patches):
            #In case there are no patches extracted
            print 'No Patches:', location + '/' + img
            continue

        #wrap input
        inputs = CAE.wrap_input(Patches)
        #Encrypt Patches
        Features = CAE.encode(inputs)
        #unwrap features
        Features = CAE.unwrap_features(Features)

        #===============================================================
        #-----------------------Predictions-----------------------------