示例#1
0
    def train_dc(self, max_iter=200):
        """ Solve the LatentSVDD optimization problem with a  
		    sequential convex programming/DC-programming
		    approach: 
		    Iteratively, find the most likely configuration of
		    the latent variables and then, optimize for the
		    model parameter using fixed latent states.
		"""
        N = self.sobj.get_num_samples()
        DIMS = self.sobj.get_num_dims()

        # intermediate solutions
        # latent variables
        latent = [0] * N

        #sol = 1.0*uniform(DIMS,1)-0.5
        sol = matrix(0.0, (DIMS, 1))

        psi = matrix(0.0, (DIMS, N))  # (dim x exm)
        old_psi = matrix(0.0, (DIMS, N))  # (dim x exm)
        threshold = 0

        obj = -1
        iter = 0

        # terminate if objective function value doesn't change much
        while iter < max_iter and (
                iter < 3 or sum(sum(abs(np.array(psi - old_psi)))) >= 0.001):
            print('Starting iteration {0}.'.format(iter))
            print(sum(sum(abs(np.array(psi - old_psi)))))
            iter += 1
            old_psi = matrix(psi)
            latent_old = list(latent)

            # 1. linearize
            # for the current solution compute the
            # most likely latent variable configuration
            for i in range(N):
                # min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
                # Hence => ||sol||^2 - max_z  2<sol,Psi(x,z)> - ||Psi(x,z)||^2
                (foo, latent[i], psi[:, i]) = self.sobj.argmax(sol, i)

            # 2. solve the intermediate convex optimization problem
            kernel = Kernel.get_kernel(psi, psi)
            svdd = SVDD(kernel, self.C)
            svdd.train_dual()
            threshold = svdd.get_threshold()
            inds = svdd.get_support_dual()
            alphas = svdd.get_support_dual_values()
            sol = psi[:, inds] * alphas
            #print alphas

        self.sol = sol
        self.latent = latent
        return (sol, latent, threshold)
示例#2
0
	def train_dc(self, max_iter=200):
		""" Solve the LatentSVDD optimization problem with a  
		    sequential convex programming/DC-programming
		    approach: 
		    Iteratively, find the most likely configuration of
		    the latent variables and then, optimize for the
		    model parameter using fixed latent states.
		"""
		N = self.sobj.get_num_samples()
		DIMS = self.sobj.get_num_dims()
		
		# intermediate solutions
		# latent variables
		latent = [0]*N

		#sol = 1.0*uniform(DIMS,1)-0.5
		sol = matrix(0.0, (DIMS,1))

		psi = matrix(0.0, (DIMS,N)) # (dim x exm)
		old_psi = matrix(0.0, (DIMS,N)) # (dim x exm)
		threshold = 0

		obj = -1
		iter = 0 

		# terminate if objective function value doesn't change much
		while iter<max_iter and (iter<3 or sum(sum(abs(np.array(psi-old_psi))))>=0.001):
			print('Starting iteration {0}.'.format(iter))
			print(sum(sum(abs(np.array(psi-old_psi)))))
			iter += 1
			old_psi = matrix(psi)
			latent_old = list(latent)


			# 1. linearize
			# for the current solution compute the 
			# most likely latent variable configuration
			for i in range(N):
				# min_z ||sol - Psi(x,z)||^2 = ||sol||^2 + min_z -2<sol,Psi(x,z)> + ||Psi(x,z)||^2
				# Hence => ||sol||^2 - max_z  2<sol,Psi(x,z)> - ||Psi(x,z)||^2
				(foo, latent[i], psi[:,i]) = self.sobj.argmax(sol, i)

			# 2. solve the intermediate convex optimization problem 
			kernel = Kernel.get_kernel(psi,psi)			
			svdd = SVDD(kernel, self.C)
			svdd.train_dual()
			threshold = svdd.get_threshold()
			inds = svdd.get_support_dual()
			alphas = svdd.get_support_dual_values()
			sol = psi[:,inds]*alphas
			#print alphas

		self.sol = sol
		self.latent = latent
		return (sol, latent, threshold)
示例#3
0
	svdd = SVDD(kernel,0.9)
	svdd.train_dual()

	# generate test data grid
	delta = 0.1
	x = np.arange(-4.0, 4.0, delta)
	y = np.arange(-4.0, 4.0, delta)
	X, Y = np.meshgrid(x, y)    
	(sx,sy) = X.shape
	Xf = np.reshape(X,(1,sx*sy))
	Yf = np.reshape(Y,(1,sx*sy))
	Dtest = np.append(Xf,Yf,axis=0)
	print(Dtest.shape)

	# build test kernel	
	kernel = Kernel.get_kernel(co.matrix(Dtest),Dtrain[:,svdd.get_support_dual()],ktype,kparam)
	# for svdd we need the data norms additionally
	norms = Kernel.get_diag_kernel(co.matrix(Dtest),ktype,kparam)

	(res,state) = svdd.apply_dual(kernel,norms)
	print(res.size)

	# nice visualization
	Z = np.reshape(res,(sx,sy))
	plt.contourf(X, Y, Z)
	plt.contour(X, Y, Z, [np.array(svdd.get_threshold())[0,0]])
	plt.scatter(Dtrain[0,svdd.get_support_dual()],Dtrain[1,svdd.get_support_dual()],40,c='k') 
	plt.scatter(Dtrain[0,:],Dtrain[1,:],10)
	plt.show()

	print('finished')
示例#4
0
	svdd.train_dual()

	# generate test data grid
	delta = 0.1
	x = np.arange(-4.0, 4.0, delta)
	y = np.arange(-4.0, 4.0, delta)
	X, Y = np.meshgrid(x, y)    
	(sx,sy) = X.shape
	Xf = np.reshape(X,(1,sx*sy))
	Yf = np.reshape(Y,(1,sx*sy))
	Dtest = np.append(Xf,Yf,axis=0)
	print(Dtest.shape)

	# build test kernel	
	kernel = Kernel.get_kernel(co.matrix(Dtest),Dtrain[:,svdd.get_support_dual()],ktype,kparam)
	# for svdd we need the data norms additionally
	norms = Kernel.get_diag_kernel(co.matrix(Dtest),ktype,kparam)

	(res,state) = svdd.apply_dual(kernel,norms)
	print(res.size)

	# nice visualization
	Z = np.reshape(res,(sx,sy))
	plt.contourf(X, Y, Z)
	plt.contour(X, Y, Z, [np.array(svdd.get_threshold())[0,0]])
	plt.scatter(Dtrain[0,svdd.get_support_dual()],Dtrain[1,svdd.get_support_dual()],40,c='k') 
	plt.scatter(Dtrain[0,:],Dtrain[1,:],10)
	plt.show()

	print('finished')