Example #1
0
def speed_test(EXP_NAME):
	np.random.seed(123)
	torch.manual_seed(2)

	sim = Simulator(train=False)
	TF = Transformer(name=EXP_NAME)
	datarange = [66,70]
	eval_file = open(EXP_NAME+'_spdtest.log', "w", 1)

	if EXP_NAME in ['Tiled','TiledLegacy']:
		if EXP_NAME == 'Tiled':
			selected_ranges = [32,42,51,58,72,197]
		else:
			selected_ranges = [50, 58, 69, 85, 108,170]
		with open(EXP_NAME+'_MOBO_pf.log','r') as f:
			for lidx,line in enumerate(f.readlines()):
				if lidx not in selected_ranges:continue
				print(EXP_NAME,lidx)
				tmp = line.strip().split(' ')
				acc,cr = float(tmp[0]),float(tmp[1])
				C_param = np.array([float(n) for n in tmp[2:]])
				acc1,cr1 = sim.get_one_point(datarange, TF=TF, C_param=C_param)
	else:
		if EXP_NAME == 'JPEG':
			rate_ranges = [7,11,15,21,47,100]
		elif EXP_NAME == 'JPEG2000':
			rate_ranges = range(6)
		elif EXP_NAME == 'WebP':
			rate_ranges = [0,5,37,100]
		for r in rate_ranges:
			print(EXP_NAME,r)
			acc,cr = sim.get_one_point(datarange, TF=TF, C_param=r)
	m,s = TF.get_compression_time()
	eval_file.write(f"{m:.3f} {s:.3f}\n")
Example #2
0
def evaluation(EXP_NAME):
	np.random.seed(123)
	torch.manual_seed(2)

	sim = Simulator(train=False)
	TF = Transformer(name=EXP_NAME)
	datarange = [0,sim.num_batches]
	eval_file = open(EXP_NAME+'_eval.log', "w", 1)

	if EXP_NAME in ['Tiled', 'TiledLegacy','ROI']:
		with open(EXP_NAME+'_MOBO_pf.log','r') as f:
			for line in f.readlines()[::-1]:
				tmp = line.strip().split(' ')
				acc,cr = float(tmp[0]),float(tmp[1])
				C_param = np.array([float(n) for n in tmp[2:]])
				acc1,cr1 = sim.get_one_point(datarange, TF=TF, C_param=C_param)
				eval_file.write(f"{acc1:.3f} {cr1:.3f} {acc:.3f} {cr:.3f}\n")
	elif EXP_NAME == 'RAW':
		acc,cr = sim.get_one_point(datarange, TF=None, C_param=None)
		eval_file.write(f"{acc:.3f} {cr:.3f}\n")
	elif EXP_NAME == 'Scale':
		for i in range(1,33):
			acc,cr = sim.get_one_point(datarange, TF=TF, C_param=i)
			eval_file.write(f"{acc:.3f} {cr:.3f}\n")
	else:
		for i in range(101):
			print(EXP_NAME,i)
			acc,cr = sim.get_one_point(datarange, TF=TF, C_param=i)
			eval_file.write(f"{acc:.3f} {cr:.3f}\n")
Example #3
0
def pareto_front_approx(comp_name,EXP_NAME):
	cfg_file = open(comp_name+'_'+EXP_NAME+'_cfg.log', "w", 1)
	acc_file = open(comp_name+'_'+EXP_NAME+'_acc.log', "w", 1)
	cr_file = open(comp_name+'_'+EXP_NAME+'_cr.log', "w", 1)

	# test wigh 500 iter
	start = time.perf_counter()

	# setup target network
	# so that we only do this once
	sim = Simulator(train=True)
	cgen = C_Generator(name=EXP_NAME,explore=True)
	num_cfg = 500 # number of cfgs to be explored
	datarange = [0,100]
	print(EXP_NAME,'num configs:',num_cfg, 'total batches:', sim.num_batches)

	TF = Transformer(comp_name)
	# the pareto front can be restarted, need to try

	for bi in range(num_cfg):
		print(bi)
		# DDPG-based generator
		C_param = cgen.get()
		# apply the compression param chosen by the generator
		map50,cr = sim.get_one_point(datarange=datarange, TF=TF, C_param=np.copy(C_param))
		# optimize generator
		cgen.optimize((map50,cr),False)
		# write logs
		cfg_file.write(' '.join([str(n) for n in C_param])+'\n')
		acc_file.write(str(float(map50))+'\n')
		cr_file.write(str(cr)+'\n')
	# test wigh 500 iter
	end = time.perf_counter()
	with open(EXP_NAME+'_time.log','w',1) as f:
		f.write(str(end-start)+'s')
Example #4
0
	def objective(x):
		sim = Simulator(train=True)
		TF = Transformer(comp_name)
		datarange = [0,100]
		print('Iter:',d['iter'],x)
		acc,cr = sim.get_one_point(datarange=datarange, TF=TF, C_param=x)
		d['cfg_file'].write(' '.join([str(n) for n in x])+'\n')
		d['acc_file'].write(str(float(acc))+'\n')
		d['cr_file'].write(str(cr)+'\n')
		d['iter'] += 1
		return np.array([float(acc),cr])
Example #5
0
def generate_image_samples(EXP_NAME):
	sim = Simulator(train=True)
	TF = Transformer(name=EXP_NAME,snapshot=True)
	datarange = [0,1]#sim.num_batches]
	selected_lines = [92,152]
	# replace pf file later
	with open(EXP_NAME+'_MOBO_pf.log','r') as f:
		for lcnt,line in enumerate(f.readlines()):
			if lcnt not in selected_lines:
				continue
			tmp = line.strip().split(' ')
			acc,cr = float(tmp[0]),float(tmp[1])
			C_param = np.array([float(n) for n in tmp[2:]])
			acc1,cr1 = sim.get_one_point(datarange, TF=TF, C_param=C_param)
			print(acc1,cr1,C_param)
			break
	m,s = TF.get_compression_time()
	print(m,s)
Example #6
0
	class MyProblem(Problem):
		def __init__(self):
			super().__init__(n_var=6, n_obj=2, n_constr=0, xl=np.array([-.5]*6), xu=np.array([.5]*6))
			self.sim = Simulator(train=True)
			self.TF = Transformer(comp_name)
			self.datarange = [0,100]
			self.cfg_file = open(comp_name+'_NSGA2_cfg.log', "w", 1)
			self.acc_file = open(comp_name+'_NSGA2_acc.log', "w", 1)
			self.cr_file = open(comp_name+'_NSGA2_cr.log', "w", 1)
			self.iter = 0

		def _evaluate(self, x, out, *args, **kwargs):
			points = []
			for row in range(x.shape[0]):
				acc,cr = self.sim.get_one_point(datarange=self.datarange, TF=self.TF, C_param=x[row,:])
				points += [[float(acc),cr]]
				self.cfg_file.write(' '.join([str(n) for n in x[row,:]])+'\n')
				self.acc_file.write(str(float(acc))+'\n')
				self.cr_file.write(str(cr)+'\n')
				print('Iter:',self.iter)
				self.iter += 1
			out["F"] = np.array(points)
Example #7
0
def dual_train(net):
	np.random.seed(123)
	criterion = nn.MSELoss(reduction='sum')
	optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
	log_file = open('training.log', "w", 1)
	log_file.write('Training...\n')

	# setup target network
	# so that we only do this once
	sim = Simulator(10)
	cgen = C_Generator()
	num_cfg = 1#sim.point_per_sim//batch_size
	print('Num batches:',num_cfg,sim.point_per_sim)

	for epoch in range(10):
		running_loss = 0.0
		TF = Transformer('Tiled')
		# the pareto front can be restarted, need to try

		for bi in range(num_cfg):
			inputs,labels = [],[]
			# DDPG-based generator
			C_param = cgen.get()
			# batch result of mAP and compression ratio
			batch_acc, batch_cr = [],[]
			for k in range(batch_size):
				di = bi*batch_size + k # data index
				# start counting the compressed size
				TF.reset()
				# apply the compression param chosen by the generator
				fetch_start = time.perf_counter()
				# the function to get results from cloud model
				sim_result = sim.get_one_point(index=di, TF=TF, C_param=np.copy(C_param))
				fetch_end = time.perf_counter()
				# get the compression ratio
				cr = TF.get_compression_ratio()
				batch_acc += [sim_result]
				batch_cr += [cr]
				print_str = str(di)+str(C_param)+'\t'+str(sim_result)+'\t'+str(cr)+'\t'+str(fetch_end-fetch_start)
				print(print_str)
				log_file.write(print_str+'\n')
				inputs.append(C_param)
				labels.append(sim_result) # accuracy of IoU=0.5
			# optimize generator
			cgen.optimize((np.mean(batch_acc),np.mean(batch_cr)),False)
			log_file.write(print_str+'\n')
			# transform to tensor
			inputs = torch.FloatTensor(inputs)#.cuda()
			labels = torch.FloatTensor(labels)#.cuda()

			# zero gradient
			optimizer.zero_grad()

			# forward + backward + optimize
			outputs = net(inputs)
			loss = criterion(outputs, labels)
			loss.backward()
			optimizer.step()

			# print statistics
			running_loss += loss.item()
			val_loss = abs(torch.mean(labels.cpu()-outputs.cpu()))
			print_str = '{:d}, {:d}, loss {:.6f}, val loss {:.6f}'.format(epoch + 1, bi + 1, loss.item(), val_loss)
			print(print_str)
			log_file.write(print_str + '\n')
		print_str = str(cgen.paretoFront.data.keys())
		print(print_str)
		cgen.optimize(None,True)
		torch.save(net.state_dict(), PATH)