Example #1
0
	def __init__(self, parent=None):
		self.parent = parent
		worker_types = set(config.WORKERS.values())

		# Set bandwidth lock.
		self.set_bandwidth_lock = threading.Lock()

		# Track bandwidth requests.
		self.bandwidth_tracker = BandwidthRequestTracker()

		# Worker messages that deal with releasing or acquiring more bandwidth have to
		# be processed sequentially.
		self.bandwidth_lock = threading.Lock()

		# Will hold all information regarding purchased bandwidth.
		self.bandwidth_values = {}
		self.prev_bandwidth_values = {}

		# Will hold any scheduled bandwidth changes. Keys are worker types and values are
		# Timer objects.
		self.scheduled_bandwidth_changes = {}

		# Will hold a reference to MemFS.
		self.memfs = None

		# Start recording the time for accounting purposes.
		self.start_time = time.time()

		# Start MemFS.
		if config.START_MEMFS:
			self.start_memfs()
			self.memfs_logger = simple_logger.SimpleLogger(config.LOGDIR + '/memfs.log')

		# Used to gather worker statistics.
		self.waiting_for_workers_lock = threading.Lock()
		self.waiting_for_workers = {}
		self.waiting_current_max_bandwidth_lock = threading.Lock()
		self.waiting_current_max_bandwidth = {}
		self.worker_statistics_lock = threading.Lock()
		self.worker_statistics = {}
		for worker_type in worker_types:
			self.waiting_for_workers[worker_type] = 0
			self.worker_statistics[worker_type] = []

		# Start all the workers of with the maximum amount of bandwidth.
		max_bandwidth = max(config.AVAILABLE_BANDWIDTH_VALUES)
		self.bandwidth_loggers = {}
		self.bandwidth_purchased = {}
		for worker_type in worker_types:
			self.bandwidth_loggers[worker_type] = simple_logger.SimpleLogger(config.LOGDIR + '/' + str(worker_type) + '_bandwidth.log')
			self.bandwidth_purchased[worker_type] = []
			self.bandwidth_values[worker_type] = {
				'purchased_at': None,
				'bandwidth': 0
			}
			self.set_bandwidth(worker_type, max_bandwidth, False, True)
		self.prev_bandwidth_values = self.bandwidth_values.copy()

		# Holds information regarding the currently executing workflow.
		self.dag_statistics = {}
		self.dag_statistics_lock = threading.Lock()
		self.sent_task_executables = set()

		# Used to keep track of task ids.
		self.task_id = 1

		# Used to log the cost.
		self.cost_logger = simple_logger.SimpleLogger(config.LOGDIR + '/cost.log')

		# Used to log the new task.
		self.new_task_logger = simple_logger.SimpleLogger(config.LOGDIR + '/new_task.log')

		self.request_bandwidth_lock = threading.Lock()
		self.request_bandwidth = None
Example #2
0
class ResourceManager(object):
	"""
	The resource manager is in charge of acquiring or releasing resources based on
	information from the scheduler and the workers.
	"""
	def __init__(self, parent=None):
		self.parent = parent
		worker_types = set(config.WORKERS.values())

		# Set bandwidth lock.
		self.set_bandwidth_lock = threading.Lock()

		# Track bandwidth requests.
		self.bandwidth_tracker = BandwidthRequestTracker()

		# Worker messages that deal with releasing or acquiring more bandwidth have to
		# be processed sequentially.
		self.bandwidth_lock = threading.Lock()

		# Will hold all information regarding purchased bandwidth.
		self.bandwidth_values = {}
		self.prev_bandwidth_values = {}

		# Will hold any scheduled bandwidth changes. Keys are worker types and values are
		# Timer objects.
		self.scheduled_bandwidth_changes = {}

		# Will hold a reference to MemFS.
		self.memfs = None

		# Start recording the time for accounting purposes.
		self.start_time = time.time()

		# Start MemFS.
		if config.START_MEMFS:
			self.start_memfs()
			self.memfs_logger = simple_logger.SimpleLogger(config.LOGDIR + '/memfs.log')

		# Used to gather worker statistics.
		self.waiting_for_workers_lock = threading.Lock()
		self.waiting_for_workers = {}
		self.waiting_current_max_bandwidth_lock = threading.Lock()
		self.waiting_current_max_bandwidth = {}
		self.worker_statistics_lock = threading.Lock()
		self.worker_statistics = {}
		for worker_type in worker_types:
			self.waiting_for_workers[worker_type] = 0
			self.worker_statistics[worker_type] = []

		# Start all the workers of with the maximum amount of bandwidth.
		max_bandwidth = max(config.AVAILABLE_BANDWIDTH_VALUES)
		self.bandwidth_loggers = {}
		self.bandwidth_purchased = {}
		for worker_type in worker_types:
			self.bandwidth_loggers[worker_type] = simple_logger.SimpleLogger(config.LOGDIR + '/' + str(worker_type) + '_bandwidth.log')
			self.bandwidth_purchased[worker_type] = []
			self.bandwidth_values[worker_type] = {
				'purchased_at': None,
				'bandwidth': 0
			}
			self.set_bandwidth(worker_type, max_bandwidth, False, True)
		self.prev_bandwidth_values = self.bandwidth_values.copy()

		# Holds information regarding the currently executing workflow.
		self.dag_statistics = {}
		self.dag_statistics_lock = threading.Lock()
		self.sent_task_executables = set()

		# Used to keep track of task ids.
		self.task_id = 1

		# Used to log the cost.
		self.cost_logger = simple_logger.SimpleLogger(config.LOGDIR + '/cost.log')

		# Used to log the new task.
		self.new_task_logger = simple_logger.SimpleLogger(config.LOGDIR + '/new_task.log')

		self.request_bandwidth_lock = threading.Lock()
		self.request_bandwidth = None

	def start_memfs(self):
		"""
		Starts MemFS
		:return:
		"""
		# First create the config file.
		print('Starting MemFS')
		memfs_config_file = open(config.MEMFS_CONFIG_FILE, 'w')
		memfs_config_file.write(str(len(config.WORKERS)) + '\n' + str(len(config.WORKERS)) + '\n')
		memfs_config_file.write(str(config.MEMFS_BETA) + '\n' + str(config.MEMFS_BETA) + '\n')
		for worker_address in config.WORKERS:
			memfs_config_file.write(worker_address + '\n')
		memfs_config_file.close()

		# Now start MemFS.
		self.memfs = memfs.MemFS(config.MEMFS_CONFIG_FILE)
		self.memfs.initialize()
		print('MemFS initialized')

	def adapt_memfs(self):
		"""
		Redistributes MemFS partitions if necessary.
		:return:
		"""
		# If the current and previous bandwidth values are all the same, we don't need to redistribute MemFS
		# partitions.
		if self.equal_bandwidth_values(self.bandwidth_values) and self.equal_bandwidth_values(self.prev_bandwidth_values):
			print('No need to adapt MemFS')
			return

		# Otherwise we prepare the bandwidth file to adapt MemFS.
		print("Going to adapt MemFS")
		bandwidth_file = open('/home/' + getpass.getuser() + '/bandwidth.txt', 'w')
		for worker_address in config.WORKERS:
			worker_type = config.WORKERS[worker_address]
			bandwidth_file.write(worker_address + ':' + str(self.bandwidth_values[worker_type]['bandwidth']) + 'mbit\n')
		bandwidth_file.close()

		# Let MemFS adapt itself.
		self.memfs.scale_bandwidth()

	def equal_bandwidth_values(self, bandwidth_values):
		"""
		Determines if all given bandwidth values are the same.
		:param bandwidth_values:
		:return:
		"""
		if len(bandwidth_values) < 1:
			return True
		last = None
		for worker_type in bandwidth_values:
			if last is not None and bandwidth_values[worker_type]['bandwidth'] != last:
				return False
			else:
				last = bandwidth_values[worker_type]['bandwidth']
		return True

	def stop_memfs(self):
		"""
		Stops MemFS
		:return:
		"""
		self.memfs.kill_all()

	def initialize_dag(self, tasks_data):
		"""
		Parse DAG information. We will maintain the total number of tasks for each phase
		of the workflow as well as which tasks still need to complete.
		:param tasks_data:
		:return:
		"""
		self.dag_statistics_lock.acquire()
		for task in tasks_data:
			executable = task['exec']
			if executable not in self.dag_statistics:
				self.dag_statistics[executable] = {'total': 0, 'remaining': []}
				self.dag_statistics[executable]['workers'] = {}

				# Initialize the finished arrays for all workers.
				for worker in config.WORKERS.keys():
					self.dag_statistics[executable]['workers'][worker] = {}
					self.dag_statistics[executable]['workers'][worker]['finished'] = []
					self.dag_statistics[executable]['workers'][worker]['started_at'] = None

			self.dag_statistics[executable]['total'] += 1
			self.dag_statistics[executable]['remaining'].append(self.task_id)
			self.task_id += 1
		self.dag_statistics_lock.release()

	def sent_tasks_to_worker(self, worker, tasks):
		"""
		Indicates that the given tasks have been sent to the given worker. We record when the first of a
		task type has been sent as well as which task types are now running in general.
		:param worker:
		:param tasks:
		:return:
		"""
		self.dag_statistics_lock.acquire()
		now = time.time()
		for task in tasks:
			executable = task['exec']

			# Record the first sending of this task.
			if executable not in self.sent_task_executables:
				self.sent_task_executables.add(executable)
				self.new_task_logger.log(str(now - self.start_time) + ',' + str(executable))

				# Kill any outstanding bandwidth request.
				if config.ADAPT_BANDWIDTH:
					self.request_bandwidth_lock.acquire()
					if self.request_bandwidth is not None:
						self.request_bandwidth.cancel()
						self.request_bandwidth = None

					# Determine if this is a special phase. If so, request bandwidth in 20 seconds.
					if self.dag_statistics[executable]['total'] == 1:
						request_timer = threading.Timer(20, self.request_bandwidth_from_worker, (worker, executable))
						self.request_bandwidth = request_timer
						request_timer.start()
					self.request_bandwidth_lock.release()

			if self.dag_statistics[executable]['workers'][worker]['started_at'] is None:
				self.dag_statistics[executable]['workers'][worker]['started_at'] = now
		self.dag_statistics_lock.release()

	def finished_tasks(self, worker, tasks):
		"""
		Marks the given tasks for the given worker as finished.
		:param worker:
		:param tasks:
		:return:
		"""
		self.dag_statistics_lock.acquire()
		current_exec = None
		for task in tasks:
			# To speed up the search we keep track of the current task executable.
			if current_exec is not None:
				if task in self.dag_statistics[current_exec]['remaining']:
					# Mark the task as finished by the worker.
					if task not in self.dag_statistics[current_exec]['workers'][worker]['finished']:
						self.dag_statistics[current_exec]['workers'][worker]['finished'].append(task)

					# Remove from remaining tasks
					self.dag_statistics[current_exec]['remaining'].remove(task)
					if len(self.dag_statistics[current_exec]['remaining']) == 0 and current_exec in self.sent_task_executables:
						self.sent_task_executables.remove(current_exec)
					continue

			# Otherwise we have no choice but to loop over the DAG to find the task.
			for executable in self.dag_statistics:
				if task in self.dag_statistics[executable]['remaining']:
					current_exec = executable

					# Mark the task as finished by the worker.
					if task not in self.dag_statistics[current_exec]['workers'][worker]['finished']:
						self.dag_statistics[current_exec]['workers'][worker]['finished'].append(task)

					# Remove from remaining tasks.
					self.dag_statistics[executable]['remaining'].remove(task)
					if len(self.dag_statistics[current_exec]['remaining']) == 0 and current_exec in self.sent_task_executables:
						self.sent_task_executables.remove(current_exec)
		self.dag_statistics_lock.release()

	def process_worker_message(self, message):
		"""
		Processes a workers message.
		:param message:
		:return:
		"""
		message_type = message[5]
		worker = message[0]
		data = pickle.loads(message[6])

		# The network of the worker is fully utilized.
		if message_type == 'fully_utilized':
			print('==============================================')
			self.bandwidth_lock.acquire()
			can_report = self.process_worker_fully_utilized(worker, data)
			self.bandwidth_lock.release()
			print('==============================================')

			self.parent.send_message([message[0], PROTOCOL_HEADERS['RSMNG'], 'resource', pickle.dumps({'fully_utilized': can_report})])

		# The network of the worker is under utilized, we can increase.
		elif message_type == 'under_utilized':
			print('==============================================')
			self.bandwidth_lock.acquire()
			can_report = self.process_worker_under_utilized(worker, data)
			self.bandwidth_lock.release()
			print('==============================================')

			self.parent.send_message([message[0], PROTOCOL_HEADERS['RSMNG'], 'resource', pickle.dumps({'under_utilized': can_report})])

		# A worker has sent statistics
		elif message_type == 'worker_statistics':
			self.worker_statistics_lock.acquire()
			print('Received statistics from worker ' + worker)
			worker_type = config.WORKERS[worker]
			self.worker_statistics[worker_type].append(data)
			has_enough = (len(self.worker_statistics[worker_type]) == self.waiting_for_workers[worker_type])
			self.worker_statistics_lock.release()

			# Check if these were the last statistics to come in.
			if has_enough:
				print('Got all needed statistics, going to determine if we can increase bandwidth')
				self.parse_worker_statistics(worker_type)

	def request_bandwidth_from_worker(self, worker, executable):
		"""
		Sends a request to a worker to submit his statistics.
		:param worker:
		:param executable:
		:return:
		"""
		print('Asking ' + str(worker) + ' to send statistics for running ' + str(executable))
		sys.stdout.flush()
		self.parent.send_message([worker, PROTOCOL_HEADERS['RSMNG'], 'get_bandwidth', pickle.dumps(executable)])

		# Ask again in a while.
		self.request_bandwidth_lock.acquire()
		if self.request_bandwidth is not None:
			self.request_bandwidth = threading.Timer(20, self.request_bandwidth_from_worker, (worker, executable))
			self.request_bandwidth.start()
		self.request_bandwidth_lock.release()

	def process_worker_under_utilized(self, worker, data):
		"""
		Processes a request from a worker that states he is under utilizing his bandwidth.
		:param worker:
		:param data:
		:return:
		"""
		# Check if there is an outstanding bandwidth request. If so, do nothing.
		worker_type = config.WORKERS[worker]
		statistics = data['statistics']
		sent_max_bandwidth = statistics['bandwidth']['out']['max']

		minimal_bandwidth = self.get_minimal_bandwidth(statistics)
		if not minimal_bandwidth:
			return False

		# Count the request.
		self.bandwidth_tracker.got_bandwidth_request(data['executable'], minimal_bandwidth)

		# Another worker might have lowered the bandwidth already.
		if sent_max_bandwidth != self.bandwidth_values[worker_type]['bandwidth']:
			return False

		now = time.time()
		last_purchase = self.bandwidth_values[worker_type]['purchased_at']
		time_in_period = (now - last_purchase) % config.BANDWIDTH_BILLING_PERIOD

		# Add 5% buffer time before the end of the billing period if possible.
		buffer_time = (config.BANDWIDTH_BILLING_PERIOD / 100.0) * 5
		schedule_downgrade = config.BANDWIDTH_BILLING_PERIOD - (time_in_period + buffer_time)
		if schedule_downgrade < 0:
			schedule_downgrade = config.BANDWIDTH_BILLING_PERIOD - time_in_period

		# If we can honor the downgrade, cancel any existing downgrade timer before setting a new one.
		if self.bandwidth_tracker.can_honor_request(data['executable'], minimal_bandwidth):
			if worker_type in self.scheduled_bandwidth_changes:
				downgrade = self.scheduled_bandwidth_changes[worker_type]
				if downgrade['to'] != minimal_bandwidth:
					# Cancel the old timer.
					print('Cancelling downgrade timer')
					downgrade['timer'].cancel()
					self.scheduled_bandwidth_changes.pop(worker_type)

					# Set the new timer.
					downgrade_timer = threading.Timer(schedule_downgrade, self.set_bandwidth, (worker_type, minimal_bandwidth))
					self.scheduled_bandwidth_changes[worker_type] = {
						'to': minimal_bandwidth,
						'timer': downgrade_timer
					}
					downgrade_timer.start()
					print('Scheduled downgrade for workers of type ' + str(worker_type) + ' to ' + str(minimal_bandwidth) + 'mbit/s in ' + str(schedule_downgrade) + ' seconds')
				else:
					print('Keeping existing downgrade timer')
			else:
				# Set a new timer.
				downgrade_timer = threading.Timer(schedule_downgrade, self.set_bandwidth, (worker_type, minimal_bandwidth))
				self.scheduled_bandwidth_changes[worker_type] = {
					'to': minimal_bandwidth,
					'timer': downgrade_timer
				}
				downgrade_timer.start()
				print('Scheduled downgrade for workers of type ' + str(worker_type) + ' to ' + str(minimal_bandwidth) + 'mbit/s in ' + str(schedule_downgrade) + ' seconds')
		return False

	def process_worker_fully_utilized(self, worker, data):
		"""
		Processes a request from a worker that states he is fully utilizing his bandwidth.
		:param worker:
		:param data:
		:return:
		"""
		print('Got increase bandwidth request from ' + worker)
		worker_type = config.WORKERS[worker]
		current_bandwidth = self.bandwidth_values[worker_type]['bandwidth']
		statistics = data['statistics']

		# Get the max bandwidth.
		max_bandwidth = self.get_maximal_bandwidth(worker, worker_type, data)
		if not max_bandwidth:
			return False

		# Now we count the request.
		self.bandwidth_tracker.got_bandwidth_request(data['executable'], max_bandwidth)

		# Another worker might have already upped the bandwidth, in that case we do not
		# change it now.
		if statistics['bandwidth']['out']['max'] != current_bandwidth:
			print('Max bandwidth for ' + worker + ' (' + str(statistics['bandwidth']['out']['max']) + ') does not equal listed max of ' + str(current_bandwidth))
			return False

		# Determine if enough requests have come in to up the bandwidth.
		if not self.bandwidth_tracker.can_honor_request(data['executable'], max_bandwidth):
			return False

		# If there is a scheduled downgrade for workers of this type, cancel it.
		if worker_type in self.scheduled_bandwidth_changes:
			print('Cancelling scheduled downgrade for workers of type ' + str(worker_type))
			sys.stdout.flush()
			downgrade_timer = self.scheduled_bandwidth_changes[worker_type]['timer']
			downgrade_timer.cancel()
			self.scheduled_bandwidth_changes.pop(worker_type)

		print('Statistics for increasing bandwidth: ' + str(statistics))
		print('Increasing bandwidth for workers of type ' + str(worker_type) + ' to ' + str(max_bandwidth) + ' mbit/s')
		self.set_bandwidth(worker_type, max_bandwidth)

		return False

	def parse_worker_statistics(self, worker_type):
		"""
		When all statistics for a certain worker type have come in, we can check if it is
		cost effective to a certain bandwidth value.
		:param worker_type:
		:return:
		"""
		self.worker_statistics_lock.acquire()
		self.waiting_for_workers_lock.acquire()
		self.waiting_current_max_bandwidth_lock.acquire()

		# Combine colocated statistics.
		combined_history = {}
		for history in self.worker_statistics[worker_type]:
			for colocated in history:

				# We do not have figures for this number of colocated yet.
				if colocated not in combined_history:
					combined_history[colocated] = {
						'runtime': [history[colocated][0]],
						'bw_out': [history[colocated][1]],
						'bw_in': [history[colocated][2]]
					}
				else:
					# Add the figures, will average later.
					combined_history[colocated]['runtime'].append(history[colocated][0])
					combined_history[colocated]['bw_out'].append(history[colocated][1])
					combined_history[colocated]['bw_in'].append(history[colocated][2])

		# Average out all figures (combine bandwidth in and bandwidth out).
		averaged = {}
		for colocated in combined_history:
			averaged[colocated]['runtime'] = sum(combined_history[colocated]['runtime']) / float(len(combined_history[colocated]['runtime']))
			averaged[colocated]['bw'] = (sum(combined_history[colocated]['bw_out']) / float(len(combined_history[colocated]['bw_out']))) + \
										(sum(combined_history[colocated]['bw_in']) / float(len(combined_history[colocated]['bw_in'])))

		# See if setting the max bandwidth makes sense by looking at the throughput.
		nr_colocated_for_max = self.get_nr_colocated_tasks_for_bandwidth(averaged, max(config.AVAILABLE_BANDWIDTH_VALUES))
		runtime_for_max = self.get_runtime_for_nr_colocated_tasks(averaged, nr_colocated_for_max)
		throughput_for_max = nr_colocated_for_max / float(runtime_for_max)

		# Clear the collected values.
		self.waiting_for_workers = {}
		self.worker_statistics[worker_type] = {}

		# Tell the workers they can resume reporting bandwidth.
		for w in config.WORKERS:
				if config.WORKERS[w] == worker_type:
					print('Worker ' + w + ' can resume reporting bandwidth')
					self.parent.send_message([w, PROTOCOL_HEADERS['RSMNG'], 'resource', pickle.dumps(True)])

		self.waiting_current_max_bandwidth_lock.release()
		self.waiting_for_workers_lock.release()
		self.worker_statistics_lock.release()

	def get_nr_colocated_tasks_for_bandwidth(self, data, target_bandwidth):
		"""
		Applies a linear function to the given data to get the number of colocated
		tasks for the given target bandwidth.
		:param data:
		:param target_bandwidth:
		:return:
		"""
		colocated = data.keys()
		bandwidth = [data[c]['bw'] for c in data]

		# Apply a linear function.
		def curve(x, a, b):
			return a * x + b

		popt, pcov = curve_fit(curve, colocated, bandwidth)
		a = popt[0]
		b = popt[1]
		return (target_bandwidth - b) / a

	def get_runtime_for_nr_colocated_tasks(self, data, target_colocated):
		"""
		Applies a linear function to the given data to get the runtime for the
		given number of colocated tasks.
		:param data:
		:param target_colocated:
		:return:
		"""
		colocated = data.keys()
		runtime = [data[c]['runtime'] for c in data]

		# Apply a linear function.
		def curve(x, a, b):
			return a * x + b

		popt, pcov = curve_fit(curve, colocated, runtime)
		a = popt[0]
		b = popt[1]
		return curve(target_colocated, a, b)

	def get_maximal_bandwidth(self, worker, worker_type, data):
		"""
		Returns the new maximum bandwidth based on the current strategy and the current bandwidth.
		:param worker:
		:param worker_type:
		:param data:
		:return:
		"""
		if config.MAX_BANDWIDTH_STRATEGY == 'Performance':
			return max(config.AVAILABLE_BANDWIDTH_VALUES)
		else:
			send_workers = []
			for w in config.WORKERS:
				if config.WORKERS[w] == worker_type and w != worker:
					send_workers.append(w)

			# Indicate that we are waiting for workers.
			self.waiting_for_workers_lock.acquire()
			if self.waiting_for_workers[worker_type] > 0:
				# We are already waiting for statistics to come in.
				self.waiting_for_workers_lock.release()
				return False
			else:
				self.waiting_for_workers[worker_type] = len(send_workers) + 1
			self.waiting_for_workers_lock.release()

			# Save the current max, so we know where to increase from when we have gathered statistics.
			self.waiting_current_max_bandwidth_lock.acquire()
			self.waiting_current_max_bandwidth[worker_type] = data['statistics']['bandwidth']['out']['max']
			self.waiting_current_max_bandwidth_lock.release()

			self.worker_statistics_lock.acquire()
			self.worker_statistics[worker_type].append(data['history'])
			self.worker_statistics_lock.release()

			# Send the actual messages. Once the last one is received, we will parse the information.
			for w in send_workers:
				# The workers can not report bandwidth in the meantime
				self.parent.send_message([w, PROTOCOL_HEADERS['RSMNG'], 'resource', pickle.dumps(False)])
				self.parent.send_message([w, PROTOCOL_HEADERS['RSMNG'], 'statistics', pickle.dumps(data['executable'])])
			return False

	def get_minimal_bandwidth(self, statistics):
		"""
		Tries to find the minimal bandwidth value that satisfies the minimum and maximum allowed
		network utilization based on the current bandwidth.
		:param statistics:
		:return:
		"""
		bandwidth_values = config.AVAILABLE_BANDWIDTH_VALUES.keys()
		bandwidth_values.sort()

		# The maximum of the out/in bandwidth determines the new value.
		current_out = statistics['bandwidth']['out']['current']
		current_in = statistics['bandwidth']['in']['current']
		if current_out > current_in:
			current_bandwidth = current_out
		else:
			current_bandwidth = current_in

		# The ideal bandwidth lies between the minimum and maximum allowed percentage.
		for bandwidth in bandwidth_values:

			# We want the smallest bandwidth that is larger than the current value.
			# Because the list is sorted, we can return the first one we find that
			# will not lead to contention. Also add a little buffer of 10% below the contention
			# limit.
			if (bandwidth * ((config.MAX_BANDWIDTH_PERCENT - 10) / 100.0)) > int(current_bandwidth):
				sys.stdout.flush()
				return bandwidth

		# If there is no bandwidth larger than the current value, do nothing.
		sys.stdout.flush()
		return False

	def set_bandwidth(self, worker_type, bandwidth, adapt_memfs=True, first=False):
		"""
		Sets the bandwidth of workers with the given type to a certain value.
		:param worker_type:
		:param bandwidth:
		:param adapt_memfs:
		:return:
		"""
		# Don't do anything if there is no bandwidth control binary or we are already at the target bandwidth.
		if config.BANDWIDTH_CONTROL_BINARY == '' or (self.bandwidth_values[worker_type]['bandwidth'] == bandwidth):
			print('Not setting bandwidth for workers of type ' + str(worker_type) + ' to ' + str(bandwidth) + ' Mbit/s')
			return

		self.set_bandwidth_lock.acquire()
		print('Setting bandwidth of workers with type ' + str(worker_type) + ' to: ' + str(bandwidth) + ' Mbit/s')
		workers = []
		for worker_address in config.WORKERS:
			if config.WORKERS[worker_address] == worker_type:
				workers.append(worker_address)
		tmp_file_path = config.LOGDIR + '/tmp_bandwidth_file'
		tmp_file = open(tmp_file_path, 'w')
		for worker in workers:
			tmp_file.write(worker + ':' + str(bandwidth) + 'mbit\n')
		tmp_file.close()

		# Save the purchase time.
		if first:
			purchase_time = self.start_time
		else:
			purchase_time = time.time()
		command = config.BANDWIDTH_CONTROL_BINARY + ' --set-all-file ' + tmp_file_path + ' --port ' + str(config.BANDWIDTH_CONTROL_PORT) + ' > /dev/null'
		proc = psutil.Popen(command, shell=True)
		proc.wait()
		os.remove(tmp_file_path)

		# Save the purchased bandwidth.
		self.prev_bandwidth_values = self.bandwidth_values.copy()
		self.bandwidth_values[worker_type] = {
			'purchased_at': purchase_time,
			'bandwidth': bandwidth
		}
		logger = self.bandwidth_loggers[worker_type]
		logger.log(str(purchase_time - self.start_time) + ',' + str(bandwidth))
		self.bandwidth_purchased[worker_type].append((purchase_time - self.start_time, config.AVAILABLE_BANDWIDTH_VALUES[bandwidth]))

		# Adapt MemFS if needed.
		if adapt_memfs and config.START_MEMFS:
			self.adapt_memfs()

		# Remove from scheduled changes.
		if worker_type in self.scheduled_bandwidth_changes:
			self.scheduled_bandwidth_changes.pop(worker_type)

		self.set_bandwidth_lock.release()

	def calculate_cost(self):
		"""
		Calculates the total cost for running up until now and logs it.
		:return:
		"""
		total = 0
		end = time.time() - self.start_time
		for worker_type in self.bandwidth_purchased:
			# Get the number of workers of this type.
			nr_workers = 0
			for worker_address in config.WORKERS:
				if config.WORKERS[worker_address] == worker_type:
					nr_workers += 1
			purchased_for_type = self.bandwidth_purchased[worker_type]
			for idx, purchased in enumerate(purchased_for_type):
				# We subtract the next purchase time.
				if idx < (len(purchased_for_type) - 1):
					reference_time = purchased_for_type[idx + 1][0]
				else:
					reference_time = end
				running_time = reference_time - purchased[0]

				# Make sure to round up.
				total += (math.ceil(running_time / float(config.BANDWIDTH_BILLING_PERIOD)) * purchased[1] * nr_workers)
		self.cost_logger.log(str(total))