Beispiel #1
0
def peer_pass(peer, peer_dir, tar_dir, sched_size, sample_size, iteration,
		schedule_method='smart'):

	uuid = peer.get_uuid()

	os.mkdir(peer_dir)

	dao = MongoDAO()

	old_stats = dao.get_all_stats(uuid)

	# Result file
	result_path = os.path.join(peer_dir, "ping.json")

	sched = call_get_schedule(uuid, sched_size, method=schedule_method)
	#print(sched)

	uuid_sched = [target['uuid'] for target in sched['result']]
	puf = mp.PingUploadConstructor(uuid)

	print("Got schedule:\n{}".format(pformat(uuid_sched)))
	for target in sched['result']:
		target_uuid = target['uuid']
		target_ip = target['ip']

		samples = peer.sample_link(target_uuid, sample_size, iteration)
		#print("Generated samples for target {}\n{}".format(target_uuid,
			#pformat(samples)))
		puf.add_result(samples, target_uuid, target_ip)

	dict_object = puf.construct()

	# Write the data to file
	with open(result_path, "w") as fd:
		json.dump(dict_object, fd)

	print("Uploading results for {}".format(uuid))
	result = upload_results(uuid, result_path, tar_dir)
	print(result)

	new_stats = dao.get_all_stats(uuid)

	peer = update_stats_for_links(peer, iteration, old_stats, new_stats)

	# TODO This is where deltas could be calculated!!!
	#print(old_stats)
	#print(new_stats)

	dao.close()

	return peer
Beispiel #2
0
def full_coverage_pass(peers, sample_size, iteration):
	print("Running full coverage pass")
	start_dir = os.path.join(DUMPDIR, "{}_full".format(str(iteration)))
	tar_dir = os.path.join(start_dir, "tar")

	os.mkdir(start_dir)
	os.mkdir(tar_dir)

	new_peers = []

	for peer in peers:
		uuid = peer.get_uuid()
		puf = mp.PingUploadConstructor(uuid)

		peer_dir = os.path.join(start_dir, uuid)
		os.mkdir(peer_dir)
		result_path = os.path.join(peer_dir, "ping.json")

		for link in peer._links.itervalues():
			target_uuid = link._target_uuid
			samples = link.sample_dist(sample_size)
			print("Generated samples for target {}\n{}".format(target_uuid,
				pformat(samples)))
			puf.add_result(samples, target_uuid, "127.0.0.1")


		dict_object = puf.construct()

		# Write the data to file
		with open(result_path, "w") as fd:
			json.dump(dict_object, fd)

		upload_results(uuid, result_path, tar_dir)

		# Update stats
		dao = MongoDAO()
		old_stats = StatisticsSet()
		new_stats = dao.get_all_stats(uuid)

		peer = update_stats_for_links(peer, iteration, old_stats, new_stats)
		new_peers.append(peer)

		dao.close()

	return new_peers
Beispiel #3
0
def full_coverage_pass(peers, sample_size, iteration):
    print("Running full coverage pass")
    start_dir = os.path.join(DUMPDIR, "{}_full".format(str(iteration)))
    tar_dir = os.path.join(start_dir, "tar")

    os.mkdir(start_dir)
    os.mkdir(tar_dir)

    new_peers = []

    for peer in peers:
        uuid = peer.get_uuid()
        puf = mp.PingUploadConstructor(uuid)

        peer_dir = os.path.join(start_dir, uuid)
        os.mkdir(peer_dir)
        result_path = os.path.join(peer_dir, "ping.json")

        for link in peer._links.itervalues():
            target_uuid = link._target_uuid
            samples = link.sample_dist(sample_size)
            print("Generated samples for target {}\n{}".format(
                target_uuid, pformat(samples)))
            puf.add_result(samples, target_uuid, "127.0.0.1")

        dict_object = puf.construct()

        # Write the data to file
        with open(result_path, "w") as fd:
            json.dump(dict_object, fd)

        upload_results(uuid, result_path, tar_dir)

        # Update stats
        dao = MongoDAO()
        old_stats = StatisticsSet()
        new_stats = dao.get_all_stats(uuid)

        peer = update_stats_for_links(peer, iteration, old_stats, new_stats)
        new_peers.append(peer)

        dao.close()

    return new_peers
Beispiel #4
0
def peer_pass(peer,
              peer_dir,
              tar_dir,
              sched_size,
              sample_size,
              iteration,
              schedule_method='smart'):

    uuid = peer.get_uuid()

    os.mkdir(peer_dir)

    dao = MongoDAO()

    old_stats = dao.get_all_stats(uuid)

    # Result file
    result_path = os.path.join(peer_dir, "ping.json")

    sched = call_get_schedule(uuid, sched_size, method=schedule_method)
    #print(sched)

    uuid_sched = [target['uuid'] for target in sched['result']]
    puf = mp.PingUploadConstructor(uuid)

    print("Got schedule:\n{}".format(pformat(uuid_sched)))
    for target in sched['result']:
        target_uuid = target['uuid']
        target_ip = target['ip']

        samples = peer.sample_link(target_uuid, sample_size, iteration)
        #print("Generated samples for target {}\n{}".format(target_uuid,
        #pformat(samples)))
        puf.add_result(samples, target_uuid, target_ip)

    dict_object = puf.construct()

    # Write the data to file
    with open(result_path, "w") as fd:
        json.dump(dict_object, fd)

    print("Uploading results for {}".format(uuid))
    result = upload_results(uuid, result_path, tar_dir)
    print(result)

    new_stats = dao.get_all_stats(uuid)

    peer = update_stats_for_links(peer, iteration, old_stats, new_stats)

    # TODO This is where deltas could be calculated!!!
    #print(old_stats)
    #print(new_stats)

    dao.close()

    return peer
Beispiel #5
0
def start_control_server():
    import argparse

    from twisted.internet import reactor
    from twisted.logger import Logger, globalLogPublisher, STDLibLogObserver

    from cheesepi.server.control import (CheeseRPCServerFactory,
                                         CheeseRPCServer)
    from cheesepi.server.storage.mongo import MongoDAO

    # Argument parsing
    parser = argparse.ArgumentParser()
    parser.add_argument('--port',
                        type=int,
                        default=18080,
                        help='Port to listen on')
    args = parser.parse_args()

    init_logging()

    # Make twisted logging write to pythons logging module
    globalLogPublisher.addObserver(
        STDLibLogObserver(name="cheesepi.server.control"))

    # Use twisted logger when in twisted
    log = Logger()

    # Logging
    #log = Logger()
    #globalLogPublisher.addObserver(PrintingObserver())

    #dao = MongoDAO()
    dao = MongoDAO('localhost', 27017)
    control_server = CheeseRPCServer(dao).getStreamFactory(
        CheeseRPCServerFactory)

    reactor.listenTCP(args.port, control_server)
    log.info("Starting control server on port %d..." % args.port)
    reactor.run()
Beispiel #6
0
	def process(self):
		if not self._extracted:
			raise Exception("Data not extracted.")

		#self.log.info("Processing files in {}".format(self._extract_path))

		from cheesepi.server.storage.mongo import MongoDAO
		from pprint import pformat

		# Process every file in the extracted folder
		files = [os.path.join(self._extract_path, f)
				for f in os.listdir(self._extract_path)]
		for filename in files:
			try:
				dao = MongoDAO('localhost', 27017)

				#parser = ResultParser.fromFile(filename)
				with ResultParser.fromFile(filename) as parser:
					results = parser.parse()
					#self.log.info("Results {}".format(results))
					peer_id = parser.get_peer_id()
					self.log.info("Peer id {}".format(peer_id))

					stats = dao.get_stats_set_for_results(peer_id, results)
					#self.log.info("Fetched old stats")
					#self.log.info("Fetched:\n{}".format(pformat(stats.toDict())))

					upload_count = dao.get_result_count(peer_id)
					stats.absorb_results(results, upload_index=upload_count+1)
					#self.log.info("\n\nRESULT COUNT = {} for peer {}\n\n".format(result_count, peer_id))
					#self.log.info("Absorbed new results")
					#self.log.info("Absorbed:\n{}".format(pformat(stats.toDict())))

					bulk_writer = dao.get_bulk_writer()

					bulk_writer = dao.bulk_write_stats_set(bulk_writer, peer_id, stats)

					# Write results
					bulk_writer = dao.bulk_write_results(bulk_writer, peer_id, results)

					res = bulk_writer.execute()
					self.log.info("Bulk wrote to database with result: {}".format(res))

					#parser.write_to_db()

					#for result in results:
						#res = dao.write_result(peer_id, result)
						#self.log.info(res)

				#from pprint import PrettyPrinter
				#printer = PrettyPrinter(indent=2)
				#printer.pprint(output)

			except UnsupportedResultType as e:
				# TODO This suppresses the full stack trace for the moment, but
				# should be removed once all parsers have been implemented. This
				# is here to declutter the log while developing
				self.log.warn("{}".format(e))
			except Exception as e:
				self.log.exception("Error parsing file {}".format(filename))
			finally:
				dao.close()
Beispiel #7
0
	def __init__(self, uuid):
		self.dao = MongoDAO('localhost', 27017)
		self._uuid = uuid
Beispiel #8
0
class PingScheduler(Scheduler):

	log = logging.getLogger("cheesepi.server.scheduling.PingScheduler")

	def __init__(self, uuid):
		self.dao = MongoDAO('localhost', 27017)
		self._uuid = uuid

	def get_random_schedule(self, num=1, ignore_uuids=None):
		"""
		Get a random schedule, does not include self and does not allow for
		duplicates.
		"""
		schedule = []

		if ignore_uuids is None:
			ignore_uuids = [self._uuid]
		else:
			ignore_uuids.append(self._uuid)

		for i in range(0, num):
			entity = self.dao.get_random_entity(ignore_uuids=ignore_uuids)

			if entity is None:
				# We've reached the limit of entities available for scheduling
				break

			schedule.append(entity)
			ignore_uuids.append(entity.get_uuid())

		return schedule

	def get_round_robin_schedule(self, num=1):

		schedule = self.dao.get_sequential_entities(self._uuid, num)

		return schedule

	def get_schedule(self, num=1, ignore_uuids=None):

		if ignore_uuids is None:
			ignore_uuids = [self._uuid]
		else:
			ignore_uuids.append(self._uuid)

		self.log.info("Scheduling for {}".format(self._uuid))
		from pprint import pformat

		#self.log.info("Generating schedule with blind ratio = {}".format(
			#BLIND_SCHEDULE_RATIO))

		# num is 1, we randomize if it will be random or not to achieve full coverage
		if num == 1:
			x = random.uniform(0.0, 1.0)
			if x <= BLIND_SCHEDULE_RATIO:
				non_blind_num = 0
			else:
				non_blind_num = 1
		else:
			non_blind_num = int(num - (num*BLIND_SCHEDULE_RATIO))

		blind_num = num - non_blind_num

		#self.log.info("Non blinds = {}".format(non_blind_num))

		schedule = []
		stats = self.dao.get_all_stats(self._uuid)
		#self.log.info(pformat(stats.toDict()))

		priority_sorted_targets = []

		for s in stats:
			target = s.get_target()
			target_uuid = target.get_uuid()

			if target_uuid not in ignore_uuids:
				#print(pformat(s.toDict()))
				delay = s.get_delay()
				# self.log.info("\ndm1: {}\ndm2: {}\ndm3: {}\ndm4: {}\nsumdm: {}".format(
				# 	delay._dm1, delay._dm2, delay._dm3, delay._dm4,
				# 	delay._dm1 + delay._dm2 + delay._dm3 + delay._dm4)
				# )

				delay_variance = delay.get_exp_variance()
				#print(target)
				#print(delay_variance)

				# Primitive bias towards variance
				heapq.heappush(priority_sorted_targets, (-delay_variance, target))

		#print(priority_sorted_targets)

		for i in range(0, min(non_blind_num,len(priority_sorted_targets))):
			target = heapq.heappop(priority_sorted_targets)
			target_uuid = target[1].get_uuid()

			schedule.append(target[1])

			ignore_uuids.append(target_uuid)

		if len(schedule) < non_blind_num:
			# schedule length needs to be filled with more blinds
			blind_num = blind_num + (non_blind_num - len(schedule))

		random_schedule = self.get_random_schedule(blind_num,
				ignore_uuids=ignore_uuids)

		schedule.extend(random_schedule)
		#for i in range(0, blind_num):
			##self.log.info("Adding blind to schedule")
			#entity = self.dao.get_random_entity(ignore_uuid=self._uuid)
			#schedule.append(entity)

		return schedule
Beispiel #9
0
 def __init__(self, uuid):
     self.dao = MongoDAO('localhost', 27017)
     self._uuid = uuid
Beispiel #10
0
class PingScheduler(Scheduler):

    log = logging.getLogger("cheesepi.server.scheduling.PingScheduler")

    def __init__(self, uuid):
        self.dao = MongoDAO('localhost', 27017)
        self._uuid = uuid

    def get_random_schedule(self, num=1, ignore_uuids=None):
        """
		Get a random schedule, does not include self and does not allow for
		duplicates.
		"""
        schedule = []

        if ignore_uuids is None:
            ignore_uuids = [self._uuid]
        else:
            ignore_uuids.append(self._uuid)

        for i in range(0, num):
            entity = self.dao.get_random_entity(ignore_uuids=ignore_uuids)

            if entity is None:
                # We've reached the limit of entities available for scheduling
                break

            schedule.append(entity)
            ignore_uuids.append(entity.get_uuid())

        return schedule

    def get_round_robin_schedule(self, num=1):

        schedule = self.dao.get_sequential_entities(self._uuid, num)

        return schedule

    def get_schedule(self, num=1, ignore_uuids=None):

        if ignore_uuids is None:
            ignore_uuids = [self._uuid]
        else:
            ignore_uuids.append(self._uuid)

        self.log.info("Scheduling for {}".format(self._uuid))
        from pprint import pformat

        #self.log.info("Generating schedule with blind ratio = {}".format(
        #BLIND_SCHEDULE_RATIO))

        # num is 1, we randomize if it will be random or not to achieve full coverage
        if num == 1:
            x = random.uniform(0.0, 1.0)
            if x <= BLIND_SCHEDULE_RATIO:
                non_blind_num = 0
            else:
                non_blind_num = 1
        else:
            non_blind_num = int(num - (num * BLIND_SCHEDULE_RATIO))

        blind_num = num - non_blind_num

        #self.log.info("Non blinds = {}".format(non_blind_num))

        schedule = []
        stats = self.dao.get_all_stats(self._uuid)
        #self.log.info(pformat(stats.toDict()))

        priority_sorted_targets = []

        for s in stats:
            target = s.get_target()
            target_uuid = target.get_uuid()

            if target_uuid not in ignore_uuids:
                #print(pformat(s.toDict()))
                delay = s.get_delay()
                # self.log.info("\ndm1: {}\ndm2: {}\ndm3: {}\ndm4: {}\nsumdm: {}".format(
                # 	delay._dm1, delay._dm2, delay._dm3, delay._dm4,
                # 	delay._dm1 + delay._dm2 + delay._dm3 + delay._dm4)
                # )

                delay_variance = delay.get_exp_variance()
                #print(target)
                #print(delay_variance)

                # Primitive bias towards variance
                heapq.heappush(priority_sorted_targets,
                               (-delay_variance, target))

        #print(priority_sorted_targets)

        for i in range(0, min(non_blind_num, len(priority_sorted_targets))):
            target = heapq.heappop(priority_sorted_targets)
            target_uuid = target[1].get_uuid()

            schedule.append(target[1])

            ignore_uuids.append(target_uuid)

        if len(schedule) < non_blind_num:
            # schedule length needs to be filled with more blinds
            blind_num = blind_num + (non_blind_num - len(schedule))

        random_schedule = self.get_random_schedule(blind_num,
                                                   ignore_uuids=ignore_uuids)

        schedule.extend(random_schedule)
        #for i in range(0, blind_num):
        ##self.log.info("Adding blind to schedule")
        #entity = self.dao.get_random_entity(ignore_uuid=self._uuid)
        #schedule.append(entity)

        return schedule
    def process(self):
        if not self._extracted:
            raise Exception("Data not extracted.")

        #self.log.info("Processing files in {}".format(self._extract_path))

        from cheesepi.server.storage.mongo import MongoDAO
        from pprint import pformat

        # Process every file in the extracted folder
        files = [
            os.path.join(self._extract_path, f)
            for f in os.listdir(self._extract_path)
        ]
        for filename in files:
            try:
                dao = MongoDAO('localhost', 27017)

                #parser = ResultParser.fromFile(filename)
                with ResultParser.fromFile(filename) as parser:
                    results = parser.parse()
                    #self.log.info("Results {}".format(results))
                    peer_id = parser.get_peer_id()
                    self.log.info("Peer id {}".format(peer_id))

                    stats = dao.get_stats_set_for_results(peer_id, results)
                    #self.log.info("Fetched old stats")
                    #self.log.info("Fetched:\n{}".format(pformat(stats.toDict())))

                    upload_count = dao.get_result_count(peer_id)
                    stats.absorb_results(results,
                                         upload_index=upload_count + 1)
                    #self.log.info("\n\nRESULT COUNT = {} for peer {}\n\n".format(result_count, peer_id))
                    #self.log.info("Absorbed new results")
                    #self.log.info("Absorbed:\n{}".format(pformat(stats.toDict())))

                    bulk_writer = dao.get_bulk_writer()

                    bulk_writer = dao.bulk_write_stats_set(
                        bulk_writer, peer_id, stats)

                    # Write results
                    bulk_writer = dao.bulk_write_results(
                        bulk_writer, peer_id, results)

                    res = bulk_writer.execute()
                    self.log.info(
                        "Bulk wrote to database with result: {}".format(res))

                    #parser.write_to_db()

                    #for result in results:
                    #res = dao.write_result(peer_id, result)
                    #self.log.info(res)

                #from pprint import PrettyPrinter
                #printer = PrettyPrinter(indent=2)
                #printer.pprint(output)

            except UnsupportedResultType as e:
                # TODO This suppresses the full stack trace for the moment, but
                # should be removed once all parsers have been implemented. This
                # is here to declutter the log while developing
                self.log.warn("{}".format(e))
            except Exception as e:
                self.log.exception("Error parsing file {}".format(filename))
            finally:
                dao.close()
Beispiel #12
0
def main_loop(peers, iterations=1, sched_size=1, sample_size=10,
		schedule_method="smart", full_coverage_start=False):

	print("Running test with {} iterations ".format(iterations) +
		"with schedules of size {} ".format(sched_size) +
		"and sample size of {}".format(sample_size))

	# Make sure the peers are present as entities in the database
	results = register_peers(peers)
	#print(results)

	iteration_index = 0

	if full_coverage_start:
		peers = full_coverage_pass(peers, sample_size, iteration_index)
		iteration_index = iteration_index + 1
		#print(results)

	# Maybe initialize with one iteration complete coverage???

	for i in range(0, iterations):
		# Create directory
		ITER_DIR = os.path.join(DUMPDIR, str(iteration_index))

		peers = measurement_pass(peers, ITER_DIR, iteration_index, schedule_method=schedule_method)
		iteration_index = iteration_index + 1

	# If we don't sleep there's a possibility that the last data written
	# doesn't get taken into account when querying the database. There shouldn't
	# be any race conditions on the server however.... I think...
	print("DONE. Sleeping so database can catch up...")
	time.sleep(1)

	from cheesepi.server.storage.mongo import MongoDAO
	from cheesepi.server.storage.models.PingStatistics import PingStatistics

	from statsmodels.sandbox.distributions.extras import pdf_mvsk

	dao = MongoDAO()

	for peer_index, peer in enumerate(peers):
		print(peer_index)

		peer_uuid = peer.get_uuid()
		print("SOURCE: {}".format(peer_uuid))

		stats = dao.get_all_stats(peer.get_uuid())
		for stat_index, stat in enumerate(stats):
			assert isinstance(stat, PingStatistics)

			target_uuid = stat.get_target().get_uuid()

			print("TARGET: {}".format(target_uuid))

			delay_model = stat.get_delay()

			# num_samples = delay_model._n

			# print("m={}, v={}, s={}, k={}".format(delay_model._m1,
			# 	delay_model._new_variance, delay_model._skew, delay_model._kurtosis))

			# pdf = pdf_mvsk([delay_model._m1, delay_model._new_variance,
			# 		delay_model._skew, delay_model._kurtosis])

			link = peer.get_link(target_uuid)

			orig_dists = link.get_dist_params()
			print(orig_dists)

			# # Boundaries
			# xmax = max(link._all_samples)
			# xmin = min(link._all_samples)
			# xmax = xmax + float(xmax)/10
			# xmin = float(xmin)/2
			# x_plot = np.linspace(xmin, xmax, xmax-xmin)

			# # Distribution y-values
			# y_model_plot = np.array([pdf(x) for x in x_plot])
			# y_orig_plot = orig_dist.pdf(x_plot)

			# # Histogram
			# hist_y, hist_x = np.histogram(link._all_samples, bins=np.linspace(xmin, xmax,
			# 	xmax-xmin), density=True)

			# Save datasets
			#dm = ds.DistributionModelData(delay_model)
			ds.DistData(peer_uuid, target_uuid, orig_dists, delay_model,
					link._all_samples).pickle(os.path.join(ROOTDIR,
						"dist_{}_{}.pickle".format(peer_index, stat_index)))

			#print(link._historical_delta_mean)
			#print(link._historical_delta_variance)
			#print(link._historical_delta_skew)
			#print(link._historical_delta_kurtosis)
			ds.DeltaData(peer_uuid, target_uuid, link._historical_delta_uni_mean,
					link._historical_delta_uni_variance, link._historical_delta_exp_mean,
					link._historical_delta_exp_variance).pickle(os.path.join(ROOTDIR,
						"delta_{}_{}.pickle".format(peer_index, stat_index)))


			real_means = []
			real_variances = []
			real_skews = []
			real_kurtosiss = []
			for d in link._dists:
				dist = d[1]
				real_means.append((d[0], dist.get_mean()))
				real_variances.append((d[0], dist.get_variance()))
				real_skews.append((d[0], dist.get_skew()))
				real_kurtosiss.append((d[0], dist.get_kurtosis()))

			ds.ValuesData(peer_uuid, target_uuid, real_means,
					real_variances, real_skews,
					real_kurtosiss,
					link._historical_uni_mean,
					link._historical_uni_variance, link._historical_exp_mean,
					link._historical_exp_variance).pickle(os.path.join(ROOTDIR,
						"values_{}_{}.pickle".format(peer_index, stat_index)))
Beispiel #13
0
def main_loop(peers,
              iterations=1,
              sched_size=1,
              sample_size=10,
              schedule_method="smart",
              full_coverage_start=False):

    print("Running test with {} iterations ".format(iterations) +
          "with schedules of size {} ".format(sched_size) +
          "and sample size of {}".format(sample_size))

    # Make sure the peers are present as entities in the database
    results = register_peers(peers)
    #print(results)

    iteration_index = 0

    if full_coverage_start:
        peers = full_coverage_pass(peers, sample_size, iteration_index)
        iteration_index = iteration_index + 1
        #print(results)

    # Maybe initialize with one iteration complete coverage???

    for i in range(0, iterations):
        # Create directory
        ITER_DIR = os.path.join(DUMPDIR, str(iteration_index))

        peers = measurement_pass(peers,
                                 ITER_DIR,
                                 iteration_index,
                                 schedule_method=schedule_method)
        iteration_index = iteration_index + 1

    # If we don't sleep there's a possibility that the last data written
    # doesn't get taken into account when querying the database. There shouldn't
    # be any race conditions on the server however.... I think...
    print("DONE. Sleeping so database can catch up...")
    time.sleep(1)

    from cheesepi.server.storage.mongo import MongoDAO
    from cheesepi.server.storage.models.PingStatistics import PingStatistics

    from statsmodels.sandbox.distributions.extras import pdf_mvsk

    dao = MongoDAO()

    for peer_index, peer in enumerate(peers):
        print(peer_index)

        peer_uuid = peer.get_uuid()
        print("SOURCE: {}".format(peer_uuid))

        stats = dao.get_all_stats(peer.get_uuid())
        for stat_index, stat in enumerate(stats):
            assert isinstance(stat, PingStatistics)

            target_uuid = stat.get_target().get_uuid()

            print("TARGET: {}".format(target_uuid))

            delay_model = stat.get_delay()

            # num_samples = delay_model._n

            # print("m={}, v={}, s={}, k={}".format(delay_model._m1,
            # 	delay_model._new_variance, delay_model._skew, delay_model._kurtosis))

            # pdf = pdf_mvsk([delay_model._m1, delay_model._new_variance,
            # 		delay_model._skew, delay_model._kurtosis])

            link = peer.get_link(target_uuid)

            orig_dists = link.get_dist_params()
            print(orig_dists)

            # # Boundaries
            # xmax = max(link._all_samples)
            # xmin = min(link._all_samples)
            # xmax = xmax + float(xmax)/10
            # xmin = float(xmin)/2
            # x_plot = np.linspace(xmin, xmax, xmax-xmin)

            # # Distribution y-values
            # y_model_plot = np.array([pdf(x) for x in x_plot])
            # y_orig_plot = orig_dist.pdf(x_plot)

            # # Histogram
            # hist_y, hist_x = np.histogram(link._all_samples, bins=np.linspace(xmin, xmax,
            # 	xmax-xmin), density=True)

            # Save datasets
            #dm = ds.DistributionModelData(delay_model)
            ds.DistData(peer_uuid, target_uuid, orig_dists, delay_model,
                        link._all_samples).pickle(
                            os.path.join(
                                ROOTDIR, "dist_{}_{}.pickle".format(
                                    peer_index, stat_index)))

            #print(link._historical_delta_mean)
            #print(link._historical_delta_variance)
            #print(link._historical_delta_skew)
            #print(link._historical_delta_kurtosis)
            ds.DeltaData(peer_uuid, target_uuid,
                         link._historical_delta_uni_mean,
                         link._historical_delta_uni_variance,
                         link._historical_delta_exp_mean,
                         link._historical_delta_exp_variance).pickle(
                             os.path.join(
                                 ROOTDIR, "delta_{}_{}.pickle".format(
                                     peer_index, stat_index)))

            real_means = []
            real_variances = []
            real_skews = []
            real_kurtosiss = []
            for d in link._dists:
                dist = d[1]
                real_means.append((d[0], dist.get_mean()))
                real_variances.append((d[0], dist.get_variance()))
                real_skews.append((d[0], dist.get_skew()))
                real_kurtosiss.append((d[0], dist.get_kurtosis()))

            ds.ValuesData(
                peer_uuid, target_uuid, real_means, real_variances, real_skews,
                real_kurtosiss, link._historical_uni_mean,
                link._historical_uni_variance, link._historical_exp_mean,
                link._historical_exp_variance).pickle(
                    os.path.join(
                        ROOTDIR,
                        "values_{}_{}.pickle".format(peer_index, stat_index)))