Ejemplo n.º 1
0
def runMarginalFW(G,alg_params,model=None,V = [],alpha = [],stepSizeComputation = False,rho_vec =-1):
	quiet = alg_params['quiet'] 
	if stepSizeComputation:
		quiet = True
		assert type(rho_vec)!=int,'Requires rho_vec to be a vector'
		G.rhos_edge = rho_vec
		G.computeNodeRhos()
	######### Setup parameters $$$$$$$$$$
	stdout_initial = -1
	dev_null_f = open(os.devnull,'w')
	if quiet:
		stdout_initial = sys.stdout
		sys.stdout = dev_null_f
	logf,matf,fxn_params,f_obj = setupParams(G,alg_params)
	#print "\t----- Starting Marginal Inference with FW ------ \t"

	#Initial settings
	mus = G.init_vec
	MAX_STEPS = alg_params['max_steps']
	smallest_marginal = np.Inf
	if len(alpha)==0:
		V.append(csr_matrix(mus))
		alpha.append(1)

	#Track statistics
	statistics= setupStatistics(MAX_STEPS,mus.shape,G.nVertices)
	statistics['alpha'] = alpha

	#Extract gurobi model 
	if not alg_params['useMAP'] and model is None:
		model = ILPsolver.defineModel(G,alg_params)

	if alg_params['useMAP'] and alg_params['MAPsolver']!='toulbar2':
		len_gap = 5
	else:
		len_gap = 1
	objWarning =0
	gap_l = [10]*len_gap

	if alg_params['M_truncated_dynamic'] or alg_params['M_truncated']:
		#Additional stats to track 
		#Updated at every iteration 
		statistics['gap_FW'] = []
		statistics['gap_full'] = []
		#Updated when epsilon modified 
		statistics['eps_val'] = []
		statistics['primal_push'] = []
		statistics['iterate_push'] = []
		statistics['marker'] = []
		statistics['eps_val'] =[alg_params['M_eps']]
		#print "------------- M_eps with eps = ",alg_params['M_eps'],' -----------------'

	if alg_params['PreCorrection']:
		alg_params['correction_tol'] = 0.5
		G.init_vec = mus
		mus,val,bound,correctionGap = optutil.corrective(G,alg_params,V,alpha,np.inf,logf)

	#For all the steps
	for it in xrange(MAX_STEPS):
		assert len(alpha)==len(V),'mismatch in alpha and V'
		start_time = time.time()
		val,grad = f_obj(mus,fxn_params,True)
		#################   Running MAP Inference ################
		if not alg_params['useMAP']:
			model = ILPsolver.updateObjective(grad,alg_params,model)
		if alg_params['useMAP']:
			map_potentials = -1*grad
			if alg_params['MAPsolver']=='toulbar2':
				vertex,toulbar2_output,solution_MAP = MAPsolver.runMAP(map_potentials,G.nVertices,G.Edges,
					G.Cardinality,G.graphType,alg_params['toulbar2_uai_file'],alg_params['maxSecondsPerMAP'])
			elif alg_params['MAPsolver']=='MPLP':
				vertex,MPLP_output,solution_MAP = MPLPsolver.runMAP(map_potentials,G.nVertices,G.Edges,
					G.Cardinality,G.graphType,alg_params['toulbar2_uai_file'],alg_params['maxSecondsPerMAP'])
			else:#Use MAP solvers from openGM
				if it<2:
					vInit = None
				else:
					vInit = V[-1].toarray()[0]
				vertex,MAP_output,solution_MAP = GenericSolver.runMAP(map_potentials,G.nVertices,G.Edges,
						G.Cardinality,G.graphType,alg_params['toulbar2_uai_file'],'all',vInit,logf)
		else:
			vertex = ILPsolver.optimize(model)
		################   Code to track usage and save statistics ##################
		if alg_params['use_marginal_polytope']:
			assert np.sum(vertex-vertex.astype(int))<np.exp(-10),"Vertex not integer. Investigate : "+str(vertex)
		###############   Update marginals ###############
		#ipdb.set_trace()

		#Check if epsilon needs to be updated 
		if alg_params['M_truncated_dynamic']:
			g_u0 = np.dot(-1*grad,alg_params['uniform']-mus)
			g_k = np.dot(-1*grad,vertex-mus)
			#If the gap is negative use the correctionGap
			if g_k<0:
				g_k = correctionGap 
			if g_u0==0:
				new_eps = np.inf
			elif g_u0<0:
				new_eps = g_k/(-4*g_u0)
			else:
				new_eps = alg_params['M_eps']
			#Halve epsilon if the gap is *still* negative (shouldn't happen)
			#and if below precision -> set to 0 
			if new_eps < 0:
				new_eps = alg_params['M_eps']/2.
				if new_eps<1e-15:
					new_eps = 0
			if new_eps<alg_params['M_eps']:
				#Modified version to ensuring halving
				new_eps = min(new_eps,alg_params['M_eps']/2.)
				print "************ UPDATING DYN. EPSILON TO ",new_eps," *******************"
				old_eps = alg_params['M_eps']
				for i in xrange(1,len(alpha)):
					alpha[i] = alpha[i]*((1-old_eps)/(1-new_eps))
				alpha[0] = alpha[0] - (1-alpha[0])*(((1-old_eps)/(1-new_eps))*new_eps - old_eps)
				alg_params['M_eps'] = new_eps 
				#Check what an away step would look like 
				step_size,min_fxn_val = optutil.getStepDir(f_obj,fxn_params, mus-alg_params['uniform'],mus,0,alpha[0]/(1-alpha[0]))
				statistics['primal_push'].append(min_fxn_val)
				statistics['iterate_push'].append(mus+step_size*(mus-alg_params['uniform']))
				statistics['eps_val'].append(new_eps)
				statistics['marker'].append(it)
				alg_params['correction_tol'] = 0.5
				G.init_vec = mus
				mus,val,bound,correctionGap = optutil.corrective(G,alg_params,V,alpha,np.inf,logf)

		if alg_params['pairwise']:
			mus,gap,direction,step_size,extra = optutil.PFWstep(mus,grad,vertex,V,alpha,alg_params,f_obj,fxn_params)
		else:
			mus,gap,direction,step_size,extra = optutil.FWstep(mus,grad,vertex,V,alpha,alg_params,f_obj,fxn_params)
			#if is eps -> uses extra['gap_FW'] otherwise uses gap wchich is the FW gap anyways

		#Fully Corrective Variant
		if np.mod(it,alg_params['correctionFreq'])==0 and alg_params['doCorrection']:
			G.init_vec = mus
			if gap < 1:
				alg_params['correction_tol'] = 0.05
			else:
				alg_params['correction_tol'] = 0.5
			mus,val,bound,correctionGap = optutil.corrective(G,alg_params,V,alpha,np.inf,logf)
		#Local Search
		if alg_params['doICM']:
			mus,vertex_last,val = optutil.localsearch(vertex,mus,G,alg_params,f_obj,
				fxn_params,V,alpha,logf)

		#Track statistics
		entropy = np.sum(-1*np.log(mus)*mus)
		val_do_not_use,grad_final = f_obj(mus,fxn_params,True)
		grad_norm = np.max(grad_final)
		dir_norm = np.linalg.norm(direction)		
		if np.min(mus)<smallest_marginal:
			smallest_marginal = np.min(mus)
		updateStatistics(statistics,it,time.time()-start_time,len(V),mus,val,gap,step_size,entropy,grad_norm,dir_norm,np.min(mus))

		#Also track statisitcs for 
		if alg_params['M_truncated'] or alg_params['M_truncated_dynamic']:
			statistics['gap_FW'].append(extra['gap_FW'])
			statistics['gap_full'].append(extra['gap_full'])
			writeStatistics(statistics,it,matf,alsoWrite = ['gap_FW','gap_FW'])
			print 'Gap FW: ',extra['gap_FW']
		else:
			writeStatistics(statistics,it,matf)

		#Error : Objective not decreasing
		if it>2 and statistics['Obj_Val'][it]>statistics['Obj_Val'][it-1] and np.abs(statistics['Obj_Val'][it]-statistics['Obj_Val'][it-1])>1e-7:
			logf.write('####### WARNING. OBJECTIVE NOT DECREASING ######\n')
			logf.write('Step Size: '+str(step_size)+' Diff: '+str(np.abs(statistics['Obj_Val'][it]-statistics['Obj_Val'][it-1]))+'\n')
			print "WARNING: ",'Step Size: '+str(step_size)+'Obj: '+str(statistics['Obj_Val'][it])+' Diff: '+str(np.abs(statistics['Obj_Val'][it]-statistics['Obj_Val'][it-1]))+'\n'
			objWarning +=1
			if objWarning>3:
				logf.write('#### EXITING due to increasing objective #####\n')
				break
			assert False,'Objective should be decreasing'

		################## Print/Write to console ##################
		entropy_mus = np.sum(obj_fxn.computeEntropy(mus[1:np.sum(G.Cardinality)]))
		print "\n"
		status_1 = 	'It: %d, Primal: %.8g, Gap: %.8g, Bound: %.5g ' % (it,val,gap,statistics['Bound'][-1])
		status_2 = 	'Time(s): %.3g, StepSize: %.8g  ' % (statistics['Runtime'][-1],step_size)
		status_3 = 	'Entropy(mus)= %.3g, Norm (p) : %.3g, Norm (g) : %.3g '% (entropy_mus,dir_norm,grad_norm)
		status_4 =  'Min mu (overall) = %.5g, Min mu (current) = %.5g Sum(alphas) = %.3g' %(smallest_marginal,np.min(mus),np.sum(alpha))
		status = status_1+status_2+status_3+status_4
		print status
		logf.write(status+'\n')

		##################  Check stopping criterion ################
		gap_l[np.mod(it,len_gap)] = gap
		#M_eps variants have different stopping criterion 
		if alg_params['M_truncated'] or alg_params['M_truncated_dynamic']:
			#All variants break if global gap is less than tolerance 
			if extra['gap_FW']<alg_params['tol']:
				break 
			#Modify eps 
			if alg_params['M_truncated'] and alg_params['M_eps_iterations']>1 and gap < alg_params['tol']: #Manually modify eps
				old_eps = alg_params['M_eps']
				new_eps = alg_params['M_eps']/2.0
				print "************ UPDATING EPSILON TO ",new_eps," *******************"
				for i in xrange(1,len(alpha)):
					alpha[i] = alpha[i]*((1-old_eps)/(1-new_eps))
				alpha[0] = alpha[0] - (1-alpha[0])*(((1-old_eps)/(1-new_eps))*new_eps - old_eps)
				alg_params['M_eps'] = new_eps 
				#Check what an away step would look like 
				step_size,min_fxn_val = optutil.getStepDir(f_obj,fxn_params, mus-alg_params['uniform'],mus,0,alpha[0]/(1-alpha[0]))
				statistics['primal_push'].append(min_fxn_val)
				statistics['iterate_push'].append(mus+step_size*(mus-alg_params['uniform']))
				statistics['eps_val'].append(new_eps)
				statistics['marker'].append(it)
				alg_params['M_eps_iterations'] = alg_params['M_eps_iterations'] - 1
		else:
			if np.max(gap_l)<alg_params['tol']:
				print "Duality Gap condition reached: ",np.mean(gap_l)
				break

	print "\n----- Done Marginal Inference ------ \n"
	################ Cleanup Code ###############
	if alg_params['M_truncated'] or alg_params['M_truncated_dynamic']:
		old_eps = alg_params['M_eps']
		new_eps = alg_params['M_eps']/2.0
		for i in xrange(1,len(alpha)):
			alpha[i] = alpha[i]*((1-old_eps)/(1-new_eps))
		alpha[0] = alpha[0] - (1-alpha[0])*(((1-old_eps)/(1-new_eps))*new_eps - old_eps)
		alg_params['M_eps'] = new_eps 
		#Check what an away step would look like 
		#print alpha[0]
		step_size,min_fxn_val = optutil.getStepDir(f_obj,fxn_params, mus-alg_params['uniform'],mus,0,alpha[0]/(1-alpha[0]))
		statistics['primal_push'].append(min_fxn_val)
		statistics['iterate_push'].append(mus+step_size*(mus-alg_params['uniform']))
		statistics['eps_val'].append(new_eps)
		statistics['marker'].append(it)

		mat = writeStatistics(statistics,it,matf,returnMAT = True,alsoWrite = ['gap_FW','gap_full','primal_push','eps_val','marker'])
		mat['iterate_push'] = np.vstack(statistics['iterate_push'])
	else:
		mat = writeStatistics(statistics,it,matf,returnMAT = True)

	print "LogZ: ",val*-1+gap
	print "Marginals: ",
	#for m in statistics['IterSet'][it,:np.sum(G.Cardinality)].tolist():
	#	print ('%.4f')%(m),
	n_m = statistics['IterSet'][it,:np.sum(G.Cardinality)]
	print n_m.reshape(G.nVertices,2)
	e_m = statistics['IterSet'][it,np.sum(G.Cardinality):]
	print e_m.reshape(G.nEdges,4)

	dev_null_f.close()
	logf.close()

	if quiet:
		sys.stdout=stdout_initial
	if stepSizeComputation:
		return val*-1+gap
	else:
		return mat
Ejemplo n.º 2
0
def optSpanningTree(G,spanning_tree_params):
	quiet = spanning_tree_params['quiet']
	dev_null_f = open(os.devnull,'w')
	if quiet:
		stdout_initial = sys.stdout
		sys.stdout = dev_null_f
	print "\n----- Starting Marginal Inference - Optimizing Spanning Tree Polytope ------ \n"

	#Dealing with parameters
	alg_params = copy.deepcopy(spanning_tree_params['alg_params'])
	spanning_tree_iter = spanning_tree_params['spanning_tree_iter']
	matf = spanning_tree_params['mat_file_name']
	logfname = spanning_tree_params['log_file_name']

	assert 'usePreprocess' in spanning_tree_params,'usePreprocess not set'
	assert 'FWStrategy' in spanning_tree_params and 'Kval' in spanning_tree_params,'FWStrategy/Kval not set'
	assert 'stepSizeComputation' in spanning_tree_params,'stepSizeComputation not set'

	model = None
	if not alg_params['useMAP']:
		model = ILPsolver.defineModel(G,alg_params)

	#Keep aside a different log/mat file for every iteration of optimizing over the spanning tree
	alg_matf = alg_params['mat_file_name'].replace('.mat','')
	alg_logf = alg_params['log_file_name'].replace('.txt','')

	logf = open(logfname,'w',0)

	#Tracking unique vertices of the marginal polytope
	all_data = []
	vec_len = max(G.init_vec.shape)

	V = []
	alpha = []
	prevUb = np.inf

	########### Running iterations of Frank-Wolfe #################
	for it in xrange(spanning_tree_iter):
		print "\nIteration ",(it+1)

		#Update the name of matlab/log file for every iteration
		alg_params['mat_file_name'] = alg_matf+'-sp'+str(it)+'.mat'
		alg_params['log_file_name'] = alg_logf+'-sp'+str(it)+'.txt'

	################# Strategies for marginal inference with FW #############################
		if spanning_tree_params['FWStrategy'] == 'runK' and it>0:
			#Truncate the number of ILP calls
			alg_params['max_steps'] = spanning_tree_params['Kval']
		elif spanning_tree_params['FWStrategy'] == 'uniform':
			pass
		else:
			pass

		if spanning_tree_params['usePreprocess'] and it>0:
			alg_params['PreCorrection'] = True 
		else:
			alg_params['PreCorrection'] = False 
		
		mat = inf.runMarginalFW(G,alg_params,model,V,alpha)
		if len(alpha) != len(mat['alpha']):
			alpha = mat['alpha']
		val = mat['Obj_Val'][-1]
		mus = mat['IterSet'][-1,:]
		#postprocess
		start_time = time.time()
		mat['Runtime'][-1] = mat['Runtime'][-1]+(time.time()-start_time)
	########################   Update Status ####################################

		#print Status
		status = 'Stats: #Itns: %d, Primal Obj: %.6f,\
				Final Gap: %.6f, Avg Runtime(seconds): %.3f,\
				Last Step Size: %f, #Unique Vertices : %d' % \
				(mat['Obj_Val'].shape[0],mat['Obj_Val'][-1],
				mat['Dual_Gap'][-1],np.mean(mat['Runtime']),
				mat['Step_Size'][-1],len(V))
		#print status
		if quiet:
			stdout_initial.write(str(os.getpid())+" "+str(status)+"\n")

		logf.write(status+'\n')

	#####################  Updating rho vectors ###############################
		start_time = time.time()
		G.init_vec  = mus
		spanning_tree_polytope_vertex,grad = spanUtil.computeMST(G,mus)
		direction = np.reshape(spanning_tree_polytope_vertex,(spanning_tree_polytope_vertex.shape[0],1))-G.rhos_edge
		rhos = G.rhos_edge
		status = "Computing directed spanning tree took "+str(time.time()-start_time)+' seconds'
		#print status
		logf.write(status+'\n')
		#Strategies for computing the step size
		if spanning_tree_params['stepSizeComputation'] == 'linesearch':
			G_copy = copy.deepcopy(G)
			alpha_rho = sciopt.fminbound(lambda a:inf.runMarginalFW(G_copy,alg_params,model,V,alpha,stepSizeComputation=True,rho_vec = rhos+a*direction),0,1,xtol=0.0005)#,disp=3)
			print "Step Size (Vertices) :",alpha_rho
		elif spanning_tree_params['stepSizeComputation'] == 'standard':
			alpha_rho = float(2)/(it+3)
			print "Step Size (Standard) :",alpha_rho
		else:
			assert False,'Invalid stepSizeComputation: '+spanning_tree_params['stepSizeComputation']
		#Update
		G.rhos_edge = G.rhos_edge+alpha_rho*(direction)
		#Update rhos_node and K_C
		G.computeNodeRhos()
		print "Edge Appearance\n",G.rhos_edge," \nDirection\n",spanning_tree_polytope_vertex
		#print "LogZ estimate: ",inf.runMarginalFW(G_copy,alg_params,model,V,alpha,stepSizeComputation=True,rho_vec = rhos+alpha_rho*direction)
	############################# Collect Data and Save ###############################
		#Collect data on this iteration
		data = {}
		data['fw_result'] = mat
		data['rhos'] = G.rhos_edge
		data['alpha_rho'] = alpha_rho
		data['timeTaken'] = time.time()-start_time + np.sum(mat['Runtime'])
		data['dualityGap'] = -1*np.dot(grad,direction) 
		print "Rho Gap: ",data['dualityGap']

		#Postprocessing using the vertices of the marginal polytope
		if spanning_tree_params['usePreprocess'] and it<spanning_tree_iter-1:
			start_time = time.time()
			mus,val,prevUb,gapFW = optutil.corrective(G,alg_params,V,alpha,bound=prevUb)
			G.init_vec = mus
			data['timeTaken'] += (time.time()-start_time)

		#Append stats at the very end
		all_data.append(data)
		all_data_mat = {}
		all_data_mat['final_mus'] = mus
		all_data_mat['final_logz'] = val
		all_data_mat['rho_data'] = all_data
		all_data_mat['MARG_vertices'] = scipy.sparse.vstack(V)
		savemat(matf,all_data_mat)

	print "\n----- Done Marginal Inference ------ \n"
	logf.close()
	if quiet:
		sys.stdout=stdout_initial
	dev_null_f.close()