Exemple #1
0
def peer_pass(peer,
              peer_dir,
              tar_dir,
              sched_size,
              sample_size,
              iteration,
              schedule_method='smart'):

    uuid = peer.get_uuid()

    os.mkdir(peer_dir)

    dao = MongoDAO()

    old_stats = dao.get_all_stats(uuid)

    # Result file
    result_path = os.path.join(peer_dir, "ping.json")

    sched = call_get_schedule(uuid, sched_size, method=schedule_method)
    #print(sched)

    uuid_sched = [target['uuid'] for target in sched['result']]
    puf = mp.PingUploadConstructor(uuid)

    print("Got schedule:\n{}".format(pformat(uuid_sched)))
    for target in sched['result']:
        target_uuid = target['uuid']
        target_ip = target['ip']

        samples = peer.sample_link(target_uuid, sample_size, iteration)
        #print("Generated samples for target {}\n{}".format(target_uuid,
        #pformat(samples)))
        puf.add_result(samples, target_uuid, target_ip)

    dict_object = puf.construct()

    # Write the data to file
    with open(result_path, "w") as fd:
        json.dump(dict_object, fd)

    print("Uploading results for {}".format(uuid))
    result = upload_results(uuid, result_path, tar_dir)
    print(result)

    new_stats = dao.get_all_stats(uuid)

    peer = update_stats_for_links(peer, iteration, old_stats, new_stats)

    # TODO This is where deltas could be calculated!!!
    #print(old_stats)
    #print(new_stats)

    dao.close()

    return peer
Exemple #2
0
def peer_pass(peer, peer_dir, tar_dir, sched_size, sample_size, iteration,
		schedule_method='smart'):

	uuid = peer.get_uuid()

	os.mkdir(peer_dir)

	dao = MongoDAO()

	old_stats = dao.get_all_stats(uuid)

	# Result file
	result_path = os.path.join(peer_dir, "ping.json")

	sched = call_get_schedule(uuid, sched_size, method=schedule_method)
	#print(sched)

	uuid_sched = [target['uuid'] for target in sched['result']]
	puf = mp.PingUploadConstructor(uuid)

	print("Got schedule:\n{}".format(pformat(uuid_sched)))
	for target in sched['result']:
		target_uuid = target['uuid']
		target_ip = target['ip']

		samples = peer.sample_link(target_uuid, sample_size, iteration)
		#print("Generated samples for target {}\n{}".format(target_uuid,
			#pformat(samples)))
		puf.add_result(samples, target_uuid, target_ip)

	dict_object = puf.construct()

	# Write the data to file
	with open(result_path, "w") as fd:
		json.dump(dict_object, fd)

	print("Uploading results for {}".format(uuid))
	result = upload_results(uuid, result_path, tar_dir)
	print(result)

	new_stats = dao.get_all_stats(uuid)

	peer = update_stats_for_links(peer, iteration, old_stats, new_stats)

	# TODO This is where deltas could be calculated!!!
	#print(old_stats)
	#print(new_stats)

	dao.close()

	return peer
Exemple #3
0
def full_coverage_pass(peers, sample_size, iteration):
	print("Running full coverage pass")
	start_dir = os.path.join(DUMPDIR, "{}_full".format(str(iteration)))
	tar_dir = os.path.join(start_dir, "tar")

	os.mkdir(start_dir)
	os.mkdir(tar_dir)

	new_peers = []

	for peer in peers:
		uuid = peer.get_uuid()
		puf = mp.PingUploadConstructor(uuid)

		peer_dir = os.path.join(start_dir, uuid)
		os.mkdir(peer_dir)
		result_path = os.path.join(peer_dir, "ping.json")

		for link in peer._links.itervalues():
			target_uuid = link._target_uuid
			samples = link.sample_dist(sample_size)
			print("Generated samples for target {}\n{}".format(target_uuid,
				pformat(samples)))
			puf.add_result(samples, target_uuid, "127.0.0.1")


		dict_object = puf.construct()

		# Write the data to file
		with open(result_path, "w") as fd:
			json.dump(dict_object, fd)

		upload_results(uuid, result_path, tar_dir)

		# Update stats
		dao = MongoDAO()
		old_stats = StatisticsSet()
		new_stats = dao.get_all_stats(uuid)

		peer = update_stats_for_links(peer, iteration, old_stats, new_stats)
		new_peers.append(peer)

		dao.close()

	return new_peers
Exemple #4
0
def full_coverage_pass(peers, sample_size, iteration):
    print("Running full coverage pass")
    start_dir = os.path.join(DUMPDIR, "{}_full".format(str(iteration)))
    tar_dir = os.path.join(start_dir, "tar")

    os.mkdir(start_dir)
    os.mkdir(tar_dir)

    new_peers = []

    for peer in peers:
        uuid = peer.get_uuid()
        puf = mp.PingUploadConstructor(uuid)

        peer_dir = os.path.join(start_dir, uuid)
        os.mkdir(peer_dir)
        result_path = os.path.join(peer_dir, "ping.json")

        for link in peer._links.itervalues():
            target_uuid = link._target_uuid
            samples = link.sample_dist(sample_size)
            print("Generated samples for target {}\n{}".format(
                target_uuid, pformat(samples)))
            puf.add_result(samples, target_uuid, "127.0.0.1")

        dict_object = puf.construct()

        # Write the data to file
        with open(result_path, "w") as fd:
            json.dump(dict_object, fd)

        upload_results(uuid, result_path, tar_dir)

        # Update stats
        dao = MongoDAO()
        old_stats = StatisticsSet()
        new_stats = dao.get_all_stats(uuid)

        peer = update_stats_for_links(peer, iteration, old_stats, new_stats)
        new_peers.append(peer)

        dao.close()

    return new_peers
Exemple #5
0
	def process(self):
		if not self._extracted:
			raise Exception("Data not extracted.")

		#self.log.info("Processing files in {}".format(self._extract_path))

		from cheesepi.server.storage.mongo import MongoDAO
		from pprint import pformat

		# Process every file in the extracted folder
		files = [os.path.join(self._extract_path, f)
				for f in os.listdir(self._extract_path)]
		for filename in files:
			try:
				dao = MongoDAO('localhost', 27017)

				#parser = ResultParser.fromFile(filename)
				with ResultParser.fromFile(filename) as parser:
					results = parser.parse()
					#self.log.info("Results {}".format(results))
					peer_id = parser.get_peer_id()
					self.log.info("Peer id {}".format(peer_id))

					stats = dao.get_stats_set_for_results(peer_id, results)
					#self.log.info("Fetched old stats")
					#self.log.info("Fetched:\n{}".format(pformat(stats.toDict())))

					upload_count = dao.get_result_count(peer_id)
					stats.absorb_results(results, upload_index=upload_count+1)
					#self.log.info("\n\nRESULT COUNT = {} for peer {}\n\n".format(result_count, peer_id))
					#self.log.info("Absorbed new results")
					#self.log.info("Absorbed:\n{}".format(pformat(stats.toDict())))

					bulk_writer = dao.get_bulk_writer()

					bulk_writer = dao.bulk_write_stats_set(bulk_writer, peer_id, stats)

					# Write results
					bulk_writer = dao.bulk_write_results(bulk_writer, peer_id, results)

					res = bulk_writer.execute()
					self.log.info("Bulk wrote to database with result: {}".format(res))

					#parser.write_to_db()

					#for result in results:
						#res = dao.write_result(peer_id, result)
						#self.log.info(res)

				#from pprint import PrettyPrinter
				#printer = PrettyPrinter(indent=2)
				#printer.pprint(output)

			except UnsupportedResultType as e:
				# TODO This suppresses the full stack trace for the moment, but
				# should be removed once all parsers have been implemented. This
				# is here to declutter the log while developing
				self.log.warn("{}".format(e))
			except Exception as e:
				self.log.exception("Error parsing file {}".format(filename))
			finally:
				dao.close()
    def process(self):
        if not self._extracted:
            raise Exception("Data not extracted.")

        #self.log.info("Processing files in {}".format(self._extract_path))

        from cheesepi.server.storage.mongo import MongoDAO
        from pprint import pformat

        # Process every file in the extracted folder
        files = [
            os.path.join(self._extract_path, f)
            for f in os.listdir(self._extract_path)
        ]
        for filename in files:
            try:
                dao = MongoDAO('localhost', 27017)

                #parser = ResultParser.fromFile(filename)
                with ResultParser.fromFile(filename) as parser:
                    results = parser.parse()
                    #self.log.info("Results {}".format(results))
                    peer_id = parser.get_peer_id()
                    self.log.info("Peer id {}".format(peer_id))

                    stats = dao.get_stats_set_for_results(peer_id, results)
                    #self.log.info("Fetched old stats")
                    #self.log.info("Fetched:\n{}".format(pformat(stats.toDict())))

                    upload_count = dao.get_result_count(peer_id)
                    stats.absorb_results(results,
                                         upload_index=upload_count + 1)
                    #self.log.info("\n\nRESULT COUNT = {} for peer {}\n\n".format(result_count, peer_id))
                    #self.log.info("Absorbed new results")
                    #self.log.info("Absorbed:\n{}".format(pformat(stats.toDict())))

                    bulk_writer = dao.get_bulk_writer()

                    bulk_writer = dao.bulk_write_stats_set(
                        bulk_writer, peer_id, stats)

                    # Write results
                    bulk_writer = dao.bulk_write_results(
                        bulk_writer, peer_id, results)

                    res = bulk_writer.execute()
                    self.log.info(
                        "Bulk wrote to database with result: {}".format(res))

                    #parser.write_to_db()

                    #for result in results:
                    #res = dao.write_result(peer_id, result)
                    #self.log.info(res)

                #from pprint import PrettyPrinter
                #printer = PrettyPrinter(indent=2)
                #printer.pprint(output)

            except UnsupportedResultType as e:
                # TODO This suppresses the full stack trace for the moment, but
                # should be removed once all parsers have been implemented. This
                # is here to declutter the log while developing
                self.log.warn("{}".format(e))
            except Exception as e:
                self.log.exception("Error parsing file {}".format(filename))
            finally:
                dao.close()