Example #1
0
def find_user(username, domain_name):
    '''
    Returns user id (str).

            Parameters:
                    username (str): user name.
                    domain_name (str): domain name.

            Returns:
                    user_id (str): User id.

            In case of error:
                    Returns -1 (int).
    '''
    url = f'https://{domain_name}/api/v1/accounts/{username}'

    json_user, status_code = shared.fetch(url)
    json_user = json_user.text
    if json_user == -1:
        return -1
    dict_data = shared.load_json(json_user)
    if not isinstance(dict_data, dict):
        sys.stderr.write('Error parsing JSON data.\n')
        return -1

    if 'id' in dict_data:
        user_id = dict_data['id']
        return user_id
    else:
        sys.stderr.write('No user id.\n')
        return -1
Example #2
0
def radio_play(play_status, json_dir, audio_dir):
    global schedule_dict
    if play_status == False:
        #        list_programs(schedule_dict)
        year, month, day = sh.set_date()
        logging.debug("Current date set")
        raw_schedule_dict = sh.load_json(year, month, day, json_dir)
        logging.debug("JSON imported")
        schedule_dict = sh.convert_dict_dates(raw_schedule_dict)
        logging.debug("Dict times converted: %s records",
                      str(len(schedule_dict)))
        play_file_index, play_file = find_audio_file(schedule_dict)
        logging.debug("Found file to play: %s", play_file)
        start_time_str = find_start_time(schedule_dict, play_file_index)
        logging.debug("Found start time: %s", start_time_str)
        popen = subprocess.Popen([
            'omxplayer', '-o', 'local', audio_dir + play_file,
            '--pos=' + start_time_str
        ],
                                 stdout=subprocess.PIPE,
                                 universal_newlines=True)
        play_status = True
        logging.info("Started playing")
        time.sleep(0.25)
        return play_status
    else:
        subprocess.call(['killall', 'omxplayer.bin'])
        play_status = False
        logging.info("Stopped playing")
        time.sleep(0.25)
        return play_status
Example #3
0
def scrape(url, domain_name, user_id):
    '''
    Returns list of posts (list) and URL for the next url to scrape (str) as a tuple.

            Parameters:
                    url (str): API URL.
                    domain_name (str): domain name.
                    user_id (str): User id.

            Returns:
                    lst_out (list), url (str): List of posts and next url.

            In case of error:
                    Returns -1, -1 (integers).
    '''
    # Example URLs:
    # See the most recent posts.
    # https://{domain_name}/api/v1/accounts/{user_id}/statuses?with_muted=true&limit=40&exclude_reblogs=true
    # {max_id}: Only posts older than this post's id will be shown.
    # https://{domain_name}/api/v1/accounts/{user_id}/statuses?max_id={max_id}&with_muted=true&limit=40&exclude_reblogs=true

    lst_posts = []
    lst_out = []

    json_posts, status_code = shared.fetch(url)
    if json_posts == -1:
        return -1, -1
    json_posts = json_posts.text
    lst_posts = shared.load_json(json_posts)
    if not isinstance(lst_posts, list):
        sys.stderr.write('Broken list of posts.\n')
        return -1, -1

    if not lst_posts:
        url = ''
        return lst_posts, url

    for post in lst_posts:
        if isinstance(post, dict):
            dict_post = {}
            if 'created_at' in post.keys():
                dict_post['datetime'] = post['created_at']
            if 'content' in post.keys():
                dict_post['content'] = post['content']
            if 'url' in post.keys():
                dict_post['url'] = post['url']

            lst_out.append(dict_post)

    # Use the id of the last post to keep fetching older posts.
    last_post = lst_posts[-1]
    if 'id' in last_post.keys():
        max_id = last_post['id']

    url = f'https://{domain_name}/api/v1/accounts/{user_id}/statuses?max_id={max_id}&with_muted=true&limit=40&exclude_reblogs=true'
    return lst_out, url
Example #4
0
def get_tasks(files):
	gateway_count = 1

	# make the same "random" choices every time 
	shared.seed_random(12356)

	tasks = []
	for path in sorted(glob.glob(files)):
		state = shared.load_json(path)
		tasks.append((path, ping.get_random_nodes(state, gateway_count)))
	return tasks
Example #5
0
def scrape(domain_name, payload):
    '''
    Returns list of posts (list) and post id for the next posts to fetch (str) as a tuple.

            Parameters:
                    domain_name (str): domain name.
                    payload (dict): Request payload

            Returns:
                    lst_out (list), untilId (str): List of posts and id of the last post.

            In case of error:
                    Returns -1, -1 (integers).
    '''
    url = f'https://{domain_name}/api/users/notes'
    lst_posts = []
    lst_out = []
    untilId = ''

    json_posts, status_code = shared.fetch(url,
                                           http_method='POST',
                                           payload=payload)
    if json_posts == -1:
        return -1, -1
    json_posts = json_posts.text
    lst_posts = shared.load_json(json_posts)
    if not isinstance(lst_posts, list):
        sys.stderr.write('Broken list of posts.\n')
        return -1, -1

    if not lst_posts:
        url = ''
        return lst_posts, untilId

    for post in lst_posts:
        if isinstance(post, dict):
            dict_post = {}
            if 'createdAt' in post.keys():
                dict_post['datetime'] = post['createdAt']
            if 'text' in post.keys():
                dict_post['content'] = post['text']
            if 'id' in post.keys():
                dict_post['url'] = f'https://{domain_name}/notes/{post["id"]}'

            lst_out.append(dict_post)

    # Use the id of the last post to keep fetching older posts.
    last_post = lst_posts[-1]
    if 'id' in last_post.keys():
        untilId = last_post['id']

    return lst_out, untilId
Example #6
0
def run(protocol, files, csvfile):
    shared.seed_random(1234)

    for path in sorted(glob.glob(files)):
        state = shared.load_json(path)
        (node_count, link_count) = shared.json_count(state)

        print(f'run {protocol} on {path}')

        network.apply(state=state,
                      link_command=get_tc_command,
                      remotes=remotes)

        shared.sleep(10)

        for offset in range(0, 60, 2):
            tmp_ms = shared.millis()
            traffic_beg = traffic.traffic(remotes)
            traffic_ms = shared.millis() - tmp_ms

            tmp_ms = shared.millis()
            software.start(protocol)
            software_ms = shared.millis() - tmp_ms

            # Wait until wait seconds are over, else error
            shared.sleep(offset)

            paths = ping.get_random_paths(state, 2 * 200)
            paths = ping.filter_paths(state, paths, min_hops=2, path_count=200)
            ping_result = ping.ping(paths=paths,
                                    duration_ms=2000,
                                    verbosity='verbose',
                                    remotes=remotes)

            traffic_end = traffic.traffic(remotes)

            sysload_result = shared.sysload(remotes)

            software.clear(remotes)

            # add data to csv file
            extra = (['node_count', 'traffic_ms', 'software_ms', 'offset_ms'],
                     [node_count, traffic_ms, software_ms, offset * 1000])
            shared.csv_update(csvfile, '\t', extra,
                              (traffic_end - traffic_beg).getData(),
                              ping_result.getData(), sysload_result)

        network.clear(remotes)
Example #7
0
def run(protocol, files, csvfile):
    for path in sorted(glob.glob(files)):
        state = shared.load_json(path)
        (node_count, link_count) = shared.json_count(state)

        # Limit node count to 300
        if node_count > 300:
            continue

        print(f'run {protocol} on {path}')

        network.apply(state=state,
                      link_command=get_tc_command,
                      remotes=remotes)

        shared.sleep(10)

        software_start_ms = shared.millis()
        software.start(protocol, remotes)
        software_startup_ms = shared.millis() - software_start_ms

        shared.sleep(300)

        start_ms = shared.millis()
        traffic_beg = traffic.traffic(remotes)

        paths = ping.get_random_paths(state, 2 * 200)
        paths = ping.filter_paths(state, paths, min_hops=2, path_count=200)
        ping_result = ping.ping(remotes=remotes,
                                paths=paths,
                                duration_ms=300000,
                                verbosity='verbose')

        traffic_ms = shared.millis() - start_ms
        traffic_end = traffic.traffic(remotes)

        sysload_result = shared.sysload(remotes)

        software.clear(remotes)
        network.clear(remotes)

        # add data to csv file
        extra = (['node_count', 'traffic_ms', 'software_startup_ms'],
                 [node_count, traffic_ms, software_startup_ms])
        shared.csv_update(csvfile, '\t', extra,
                          (traffic_end - traffic_beg).getData(),
                          ping_result.getData(), sysload_result)
Example #8
0
    def download_metadata(self, n_threads=5):
        web_request_queue = Queue()
        json_serialization_queue = Queue()

        urls = self.paginated_media_urls
        if len(urls) > 1:
            for url in urls:
                web_request_queue.put(url)

            web_thread = lambda: ThreadMetadataRequest(
                web_request_queue,
                json_serialization_queue,
                self.session
            )

            pool_size = min(len(urls), n_threads)
            web_pool = [web_thread() for x in range(pool_size)]
            json_serializer = ThreadJSONWriter(
                json_serialization_queue,
                self.metadata_filepath
            )

            for thread in web_pool:
                thread.setDaemon(True)
                thread.start()
            json_serializer.start()

            web_request_queue.join()
            json_serialization_queue.join()
        else:
            json_response = self._grab_json(urls[0])
            media_entries = json_response['media']

            media_dict = list_of_dicts_to_dict(
                media_entries, promote_to_key='_id')

            exists = osp.isfile(self.metadata_filepath)
            filemode = 'r+w' if exists else 'w'
            with open(self.metadata_filepath, filemode) as f:
                try:
                    cached_meta = load_json(f) if exists else {}
                except ValueError:
                    cached_meta = {}

                cached_meta.update(media_dict)
                dump_json(cached_meta, f)
                self._metadata = cached_meta
Example #9
0
def downloader(hours_ahead, audio_dir, json_dir):
    year, month, day = sh.set_date()
    logging.debug('Current date set')
    rec_start_time, rec_end_time = get_record_times(year, month, day,
                                                    hours_ahead)
    logging.debug('Got record times')
    raw_schedule_dict = sh.load_json(year, month, day, json_dir)
    logging.debug('JSON imported')
    schedule_dict = sh.convert_dict_dates(raw_schedule_dict)
    logging.debug('Dict times converted: %s records', str(len(schedule_dict)))
    download_list = get_download_list(schedule_dict, rec_start_time,
                                      rec_end_time, audio_dir)
    logging.info('Download list compiled')
    init_download(download_list, audio_dir)
    logging.info('Downloads completed')
    # TO DO: Check success of downloads
    return
Example #10
0
def run(protocol, csvfile):
    for path in sorted(glob.glob(f'../../data/grid4/*.json')):
        state = shared.load_json(path)
        (node_count, link_count) = shared.json_count(state)

        print(f'run {protocol} on {path}')

        network.apply(state=state,
                      link_command=get_tc_command,
                      remotes=remotes)
        shared.sleep(10)

        software_start_ms = shared.millis()
        software.start(protocol, remotes)
        software_startup_ms = shared.millis() - software_start_ms

        shared.sleep(30)

        paths = ping.get_random_paths(state, 2 * link_count)
        paths = ping.filter_paths(state,
                                  paths,
                                  min_hops=2,
                                  path_count=link_count)
        ping_result = ping.ping(remotes=remotes,
                                paths=paths,
                                duration_ms=30000,
                                verbosity='verbose')

        sysload_result = shared.sysload(remotes)

        software.clear(remotes)

        # add data to csv file
        extra = (['node_count',
                  'software_startup_ms'], [node_count, software_startup_ms])
        shared.csv_update(csvfile, '\t', extra, ping_result.getData(),
                          sysload_result)

        network.clear(remotes)

        # abort benchmark when less then 40% of the pings arrive
        if ping_result.transmitted == 0 or (ping_result.received /
                                            ping_result.transmitted) < 0.4:
            break
Example #11
0
def run(protocol, csvfile):
	shared.seed_random(1377)

	for path in sorted(glob.glob(f'../../data/freifunk/*.json')):
		state = shared.load_json(path)

		(node_count, link_count) = shared.json_count(state)
		dataset_name = '{}-{:04d}'.format(os.path.basename(path)[9:-5], node_count)

		# limit to what the host can handle
		if node_count > 310:
			continue

		print(f'run {protocol} on {path}')

		state = network.apply(state=state, link_command=get_tc_command, remotes=remotes)
		shared.sleep(10)

		software.start(protocol, remotes)

		shared.sleep(300)

		start_ms = shared.millis()
		traffic_beg = traffic.traffic(remotes)

		paths = ping.get_random_paths(state, 2 * node_count)
		paths = shared.filter_paths(state, paths, min_hops=2, path_count=node_count)
		ping_result = shared.ping(remotes=remotes, paths=paths, duration_ms=300000, verbosity='verbose')

		sysload_result = shared.sysload(remotes)

		traffic_ms = shared.millis() - start_ms
		traffic_end = traffic.traffic(remotes)
		software.clear(remotes)

		# add data to csv file
		extra = (['dataset_name', 'node_count', 'traffic_ms'], [dataset_name, node_count, traffic_ms])
		shared.csv_update(csvfile, '\t', extra, (traffic_end - traffic_beg).getData(), ping_result.getData(), sysload_result)

		network.clear(remotes)
Example #12
0
def run(protocol, tasks, csvfile):
	for path, gateways in tasks:
		state = shared.load_json(path)
		(node_count, link_count) = shared.json_count(state)

		# Limit node count to 300
		if node_count > 300:
			continue

		print(f'run {protocol} on {path}')

		network.apply(state=state, remotes=remotes)

		shared.sleep(10)

		software_start_ms = shared.millis()
		software.start(protocol, remotes)
		software_startup_ms = shared.millis() - software_start_ms

		shared.sleep(30)

		start_ms = shared.millis()
		traffic_beg = traffic.traffic(remotes)

		paths = ping.get_paths_to_gateways(state, gateways)
		ping_result = ping.ping(remotes=remotes, paths=paths, duration_ms=300000, verbosity='verbose')

		traffic_ms = shared.millis() - start_ms
		traffic_end = traffic.traffic(remotes)

		sysload_result = shared.sysload(remotes)

		software.clear(remotes)
		network.clear(remotes)

		# add data to csv file
		extra = (['node_count', 'traffic_ms', 'software_startup_ms'], [node_count, traffic_ms, software_startup_ms])
		shared.csv_update(csvfile, '\t', extra, (traffic_end - traffic_beg).getData(), ping_result.getData(), sysload_result)
Example #13
0
#!/usr/bin/python3

from shared import load_json, plot_many, get_v_function_arg

v_functions = get_v_function_arg()

if v_functions == "small":
    h1 = load_json("networks/legacy.training.json")
    h2 = load_json("networks/n_tuple_small.training.json")

    plot_many([("Legacy", h1), ("N-Tuple Small", h2)])
elif v_functions == "medium":
    h = load_json("networks/n_tuple_medium.training.json")

    plot_many([("N-Tuple Medium", h)])
Example #14
0
    def deserialize_metadata(self, return_iterator=False):
        if self._metadata_exists():
            with open(self.metadata_filepath, 'r') as f:
                metadata = load_json(f)

            return metadata
Example #15
0
                          (traffic_end - traffic_begin).getData(),
                          ping_result.getData(), sysload_result)

        return (100.0 * ping_result.received / ping_result.send)


# Keep track of tests that exceed the machines resources and skip bigger networks
drop_test = set()
protocols = [
    'babel', 'batman-adv', 'bmx6', 'bmx7', 'cjdns', 'olsr1', 'olsr2', 'ospf',
    'yggdrasil'
]

for topology in ['line', 'grid4', 'grid8', 'rtree']:
    for path in sorted(glob.glob(f'../../data/{topology}/*.json')):
        state = shared.load_json(path)
        (node_count, link_count) = shared.json_count(state)

        # No test to be done for this topology
        if all((f'{p}_{topology}' in drop_test) for p in protocols):
            continue

        # Create network
        network.apply(state=state, remotes=remotes)

        for protocol in protocols:
            if f'{protocol}_{topology}' in drop_test:
                continue

            pc = run(topology, path, state)