Пример #1
0
def do_diff_langfile(args):
    """Calculate a pack file given two lang files."""
    from_json = common.load_json(args.fileorig)
    to_json = common.load_json(args.filetrans)
    if "filename" in args and args.filename is not None:
        file_path = args.filename.split('/')
    else:
        file_path = ["lang", "sc", os.path.basename(args.fileorig)]
    result = {}
    # This is arbitrary. "foobar" or "en_US" would also work.
    from_locale = args.from_locale
    iterator = common.walk_langfile_json({from_locale: from_json}, [], [])
    for langlabel, dict_path, _ in iterator:
        text = common.get_data_by_dict_path(to_json, dict_path)
        if text is None:
            continue
        trans = {"orig": langlabel[from_locale], "text": text}

        result[common.serialize_dict_path(file_path, dict_path)] = trans
    # we are already sorted by game order
    if args.sort_order == "alpha":
        result = common.sort_dict(result)
    if args.resultfile == '-':
        common.save_json_to_fd(sys.stdout, result)
    else:
        common.save_json(args.resultfile, result)
Пример #2
0
Файл: plot.py Проект: amsaha/don
    def __init__ (self, in_json_filename,
                  compute_dot_file, compute_svg_file,
                  network_dot_file, network_svg_file,
                  combined_dot_file, combined_svg_file,
                  highlight_file):
        self.json_filename = in_json_filename

        self.compute_dot_file = compute_dot_file
        self.compute_svg_file = compute_svg_file
        self.network_dot_file = network_dot_file
        self.network_svg_file = network_svg_file
        self.combined_dot_file = combined_dot_file
        self.combined_svg_file = combined_svg_file
        self.highlight_file = highlight_file

        settings['debug'] = True

        self.highlight_info = None
        if highlight_file:
            self.highlight_info = load_json(self.highlight_file)
            if not self.highlight_info.get('net_info'):
                self.highlight_info['net_info'] = {'pass'   : [],
                                                   'fail'   : []
                                                   }
                                                  

        self.info = load_json(self.json_filename)
        self.outfile = None

        self.colors = {
                'vms'       :   '#ff9933',
                'tap'       :   '#99ffff',
                'qbr'       :   '#9966ff',
                'br-int'    :   '#ff6666',
                'br-tun'    :   '#ff6666',
                'qvb'       :   '#ffcc00',
                'qvo'       :   '#ffcc00',
                'tun'       :   '#ffcc00',
                'int'       :   '#ffcc00',
                'routers'   :   '#ff9933',

                'vlan'      :   [],

                'error'     :   '#f00000',
                'edge'      :   '#0066cc',

                'dontcare'  :   '#909090',
                'pass'      :   '#b2f379',
                'fail'      :   '#f00000',
                'edge_pass' :   '#009900',
                'floating_ip':  '#b3ffb3',
                }
        self.__set_vlan_color_table()
        pprint.pprint(self.info)
Пример #3
0
def get_all_threads(folder_path):
    """
        Returns list of all threads in shared folder.
    """
    result = []

    # search through all directories
    for tmpdir, subdirs, subfiles in os.walk(folder_path + '/.Comments'):
        # ignore hidden files
        if '.' in tmpdir and '.Comments' not in tmpdir:
            continue

        for subdir in subdirs:
            if '.' not in subdir and comment_file_name_pattern.match(subdir):
                if not len(os.listdir(os.path.join(tmpdir, subdir))) > 0:
                    continue

                # getting timestamp info about freshest comment
                comments = sorted(os.listdir(os.path.join(tmpdir, subdir)), reverse=True)
                lastcomment = load_json(os.path.join(tmpdir, subdir, comments[1]))

                # if comments doesn't contain meta file - ignore thread
                if 'meta' not in comments:
                    continue

                metadata = load_json(os.path.join(tmpdir, subdir, 'meta'))
                data = {
                    'fullpath': os.path.join(tmpdir, subdir),
                    'timestamp': metadata['timestamp'],
                    'name': metadata['topic'],
                    'type': 'thread',
                    'path': tmpdir.replace(folder_path + '/.Comments', tmpdir == folder_path + '/.Comments' and '/' or ''),
                    'numberofcomments': len(os.listdir(os.path.join(tmpdir, subdir))) - 1,
                    'unreadcomment': False,
                    'lastcomment': lastcomment['timestamp']
                }

                # searching for unread comments
                for comment in comments:
                    if comment == 'meta':
                        continue

                    comm = load_json(os.path.join(tmpdir, subdir, comment))
                    if config['uid'] not in comm['readby']:
                        data['unreadcomment'] = True
                        break

                result.append(data)

    return result
Пример #4
0
    def __init__(self, src_vm, src_ip, dst_vm, dst_ip, json_file):
        self.info = load_json(json_file)
        vm_list = pprint.pformat(sorted(self.info['vms'].keys()))
        if not self.info['vms'].has_key(src_vm):
            error('VM [%s] does not exist in %s !' % (src_vm, vm_list))
            return None
        if not self.info['vms'].has_key(dst_vm):
            error('VM [%s] does not exist in %s !' % (dst_vm, vm_list))
            return None

        self.src_vm = src_vm
        self.src_ip = src_ip
        self.dst_vm = dst_vm
        self.dst_ip = dst_ip

        (self.src_port_id,
         self.src_port_tag) = get_port_ovs_id_tag(self.info, src_vm, src_ip)
        (self.dst_port_id,
         self.dst_port_tag) = get_port_ovs_id_tag(self.info, dst_vm, dst_ip)

        if not self.src_port_id:
            error('%s does not have port with IP %s' % (src_vm, src_ip))
            return None
        if not self.dst_port_id:
            error('%s does not have port with IP %s' % (dst_vm, dst_ip))
            return None

        debug(src_ip + ': ' + str(self.src_port_id))
        debug(dst_ip + ': ' + str(self.dst_port_id))

        pass
Пример #5
0
    def __init__ (self, src_vm, src_ip, dst_vm, dst_ip, json_file):
        self.info = load_json(json_file)
        vm_list = pprint.pformat(sorted(self.info['vms'].keys()))
        if not self.info['vms'].has_key(src_vm):
            error('VM [%s] does not exist in %s !' % (src_vm, vm_list))
            return None
        if not self.info['vms'].has_key(dst_vm):
            error('VM [%s] does not exist in %s !' % (dst_vm, vm_list))
            return None

        self.src_vm = src_vm
        self.src_ip = src_ip
        self.dst_vm = dst_vm
        self.dst_ip = dst_ip

        (self.src_port_id, self.src_port_tag) = get_port_ovs_id_tag(self.info, src_vm, src_ip)
        (self.dst_port_id, self.dst_port_tag) = get_port_ovs_id_tag(self.info, dst_vm, dst_ip)

        if not self.src_port_id:
            error('%s does not have port with IP %s' % (src_vm, src_ip))
            return None
        if not self.dst_port_id:
            error('%s does not have port with IP %s' % (dst_vm, dst_ip))
            return None

        debug(src_ip + ': ' + str(self.src_port_id))
        debug(dst_ip + ': ' + str(self.dst_port_id))

        pass
def get_vehicle_schema():
    """
    Create the schema for the Agency GET /vehicles endpoint.
    """
    # load schema template and insert definitions
    schema = common.load_json("./templates/agency/get_vehicle.json")
    definitions = common.load_definitions(
        "propulsion_types",
        "string",
        "timestamp",
        "vehicle_type",
        "uuid"
    )
    schema["definitions"].update(definitions)

    # merge the state machine definitions and transition combinations rule
    state_machine_defs, transitions = common.vehicle_state_machine("state", "prev_events")
    schema["definitions"].update(state_machine_defs)
    schema["allOf"].append(transitions)

    # merge common vehicle information, with Agency tweaks
    vehicle = common.vehicle_definition(provider_name=False)
    schema["required"] = vehicle["required"] + schema["required"]
    schema["properties"] = { **vehicle["properties"], **schema["properties"] }

    # verify and return
    return common.check_schema(schema)
Пример #7
0
def main():
    #parser = argparse.ArgumentParser(description="PWR scheduling using genetic algorithm")
    #parser.add_argument('problem', help="JSON file of problem")

    #args = parser.parse_args()
    input_filepath = './artifacts/art15.json'

    raw_dict = load_json(input_filepath)
    prepared_dict = parse_raw_course_dict(raw_dict)
    scoring_dict = raw_dict.get("scoring", {})

    pop_sizes = [10, 20, 50, 100, 200]
    pop_size_def = 50
    cross_probs = [0.6, 0.7, 0.8]
    cross_prob_def = 0.7
    mutate_probs = [0.02, 0.05, 0.1, 0.15, 0.2]
    mutate_prob_def = 0.1
    stale_vals = [5, 10, 15, 20, 25]
    stale_val_def = 15
    #alg_gen_report = genetic_algorithm(
    #            prepared_dict, 200, 0.8,
    #            0.2, 20, scoring_dict)
    #print(alg_gen_report.printable_summary())
    test_nothing(pop_size_def, cross_prob_def, mutate_prob_def, stale_val_def,
                 prepared_dict, scoring_dict)
Пример #8
0
def edit_comment(request):
    """
        Edit existing comment. Returns nothing.
        Keys in request.body:
            comment - dict with new comment's data. Keys:
                comment - new content which will replace the old one
                timestamp - timestamp of the moment first version of comment was created
                uid - ID of comment's author
            fullthreadpath - full path of thread directory where the comment files are stored
    """
    try:
        if not check_connection():
            return HttpResponseServerError('Brak połączenia z Internetem.')

        edited_comment = json.loads(request.body)['comment']
        comment_path = os.path.join(json.loads(request.body)['threadfullpath'],
                                   edited_comment['timestamp'] + file_name_separator + edited_comment['uid'])

        # get old comment and perform changes
        comment = load_json(comment_path)
        comment['history'].append({'timestamp': get_timestamp(),
                                   'comment': edited_comment['comment']})
        comment['comment'] = edited_comment['comment']

        save_json(comment_path, comment)

        return JsonResponse({}, safe=False)
    except Exception:
        return HttpResponseServerError('Wystąpił nieznany błąd podczas edycji komentarza.')
Пример #9
0
def analyze (json_filename, params):
    settings['debug']               = True
    BASE_DIR = os.path.dirname(os.path.dirname(__file__))
    print 'analyze BASE_DIR ---> ',BASE_DIR
    CUR_DIR = os.getcwd()
    os.chdir(BASE_DIR + '/ovs')
    NEW_DIR = os.getcwd()
    #return BASE_DIR + ':' + CUR_DIR + ':' + NEW_DIR
    debug('This is what I am going to analyze')
    my_info = load_json(json_filename)

    for test in test_suite.keys():
        flag = 'test:' + test
        if params[flag] or params['test:all']:
            (result, cmds) = test_suite[test]['func'](my_info)
            if result:
                test_suite[test]['result'] = 'PASS'
            else:
                test_suite[test]['result'] = 'FAIL'
            lines = test_suite[test]['formatter'](cmds,
                                                  result,
                                                  my_info,
                                                  test_suite[test]['help'])
            test_suite[test]['html'] = lines

    debug(params['test:report_file'])
    f = report_file_open(params['test:report_file'])
    for test in test_suite.keys():
        if test_suite[test]['html']:
            for line in test_suite[test]['html']:
                f.write(line)
    report_file_close(f)
    os.chdir(CUR_DIR)
Пример #10
0
def start_btsync(request):
    """
        Run BitTorrent Sync application
    """
    global pid

    if is_btsync_active():
        return HttpResponseRedirect('/')

    # If wrong structure of config file, return an error
    if 'btsync_conf_file' not in config:
        return HttpResponse('Klucz btsync_conf_file nie istnieje w pliku konfiguracyjnym')

    # If btsync-folder doesn't exist, create it
    btsync_conf = load_json(config['btsync_conf_file'])
    if not os.path.exists(btsync_conf['storage_path']):
        os.makedirs(btsync_conf['storage_path'])

    # If BTSync config file doesn't exist, create a new one
    if not path.isfile(config['btsync_conf_file']):
        create_empty_btsync_config_file()

    # Start BTSync process
    if platform.system() == 'Windows':
        pass                                # for the future
    elif platform.system() == 'Linux':
        pid = subprocess.Popen([config['btsync_exe_file'], '--config', config['btsync_conf_file']])
        while not is_btsync_active():     pass      # need this line to wait for BTSync to start

        if 'uid' not in config:
            config['uid'] = get_uid(config['btsync_server_address'])
            save_json(os.path.join(config['application_path'], 'config.json'), config)

    return HttpResponseRedirect('/')
Пример #11
0
def analyze(json_filename, params):
    settings['debug'] = True
    BASE_DIR = os.path.dirname(os.path.dirname(__file__))
    print 'analyze BASE_DIR ---> ', BASE_DIR
    CUR_DIR = os.getcwd()
    os.chdir(BASE_DIR + '/ovs')
    NEW_DIR = os.getcwd()
    #return BASE_DIR + ':' + CUR_DIR + ':' + NEW_DIR
    debug('This is what I am going to analyze')
    my_info = load_json(json_filename)

    for test in test_suite.keys():
        flag = 'test:' + test
        if params[flag] or params['test:all']:
            (result, cmds) = test_suite[test]['func'](my_info)
            if result:
                test_suite[test]['result'] = 'PASS'
            else:
                test_suite[test]['result'] = 'FAIL'
            lines = test_suite[test]['formatter'](cmds, result, my_info,
                                                  test_suite[test]['help'])
            test_suite[test]['html'] = lines

    debug(params['test:report_file'])
    f = report_file_open(params['test:report_file'])
    for test in test_suite.keys():
        if test_suite[test]['html']:
            for line in test_suite[test]['html']:
                f.write(line)
    report_file_close(f)
    os.chdir(CUR_DIR)
Пример #12
0
def do_split(args):
    """Split a large packfile to multiple ones according to a mapfile"""
    sorter = get_sorter(args)

    big_pack = common.load_json(args.bigpack)
    map_file = common.load_json(args.mapfile)
    unused_map_files = set(map_file.keys())
    results = {}
    missings = {}
    error = False
    for file_dict_path_str, trans in big_pack.items():
        file_path, _ = common.unserialize_dict_path(file_dict_path_str)
        file_path_str = "/".join(file_path)
        to_file_str = map_file.get(file_path_str)
        if to_file_str is None:
            missings[file_path_str] = missings.get(file_path_str, 0) + 1
        else:
            unused_map_files.discard(file_path_str)

        results.setdefault(to_file_str, {})[file_dict_path_str] = trans

    for missing_file, count in missings.items():
        error = True
        print("missing pack reference for", missing_file,
              "(%d occurences)" % count)

    if error:
        print("Aborting...")
        sys.exit(1)

    for to_file_str, smaller_pack in results.items():
        to_file = to_file_str.split('/')[args.strip:]
        if not to_file:
            print("strip parameter", args.strip, "is too large for path",
                  to_file_str)
            print("Aborting...")
            sys.exit(1)

        actual_dir = os.path.join(args.outputpath, os.sep.join(to_file[:-1]))
        os.makedirs(actual_dir, exist_ok=True)
        smaller_pack = sorter(smaller_pack)
        common.save_json(os.path.join(actual_dir, to_file[-1]), smaller_pack)

    if unused_map_files:
        print(len(unused_map_files),
              "keys where not used in the map file, e.g.:",
              "\n".join(f for i, f in zip(range(10), unused_map_files)))
Пример #13
0
def get_comments_from_path(request):
    """
        Return all the comments from specified directory INSIDE the shared folder.
        Keys in request.body:
            insidepath - relative path inside shared folder
            folderpath - path of shared folder
    """
    try:
        data = json.loads(request.body)
        result = []

        full_path = os.path.join(data['folderpath'], '.Comments', data['insidepath'][1:])

        if not os.path.isdir(full_path):
            return JsonResponse([], safe=False)

        for comment_folder in os.listdir(full_path):
            if not comment_file_name_pattern.match(comment_folder):
                continue

            # get info about thread
            temp_path = os.path.join(full_path, comment_folder)
            meta = load_json(os.path.join(temp_path, 'meta'))
            thread_data = {
                'fullpath': temp_path,
                'timestamp': meta['timestamp'],
                'name': meta['topic'],
                'type': 'thread',
                'path': temp_path.replace(data['folderpath'], ''),
                'numberofcomments': len(os.listdir(temp_path)) - 1,
                'unreadcomment': False,
            }

            # get comments
            for comment_file in os.listdir(temp_path):
                if comment_file == 'meta':
                    continue

                comment = load_json(os.path.join(temp_path, comment_file))
                comment['topic'] = thread_data

                result.append(comment)

        return JsonResponse({'comments': result}, safe=False)
    except Exception:
        return HttpResponseServerError('Wystąpił nieznany błąd podczas pobierania komentarzy.')
Пример #14
0
def return_res_time(result_path, result_json_file, start_time):
    res = common.load_json(
        result_path.replace('"', '') + '/' + result_json_file)
    common.remove_res_point_arr(res)
    start_time = time.mktime(
        datetime.datetime.strptime(
            start_time, "%Y-%m-%d-%H:%M:%S").timetuple())
    return (res, start_time)
Пример #15
0
def main():
    with closing(open(sys.argv[1], 'r')) as raw_html:
        soup = BeautifulSoup(raw_html, 'lxml')

    uwagi_hide_list = soup.find_all('tr', class_='uwagi_hide')
    for uwagi_hide in uwagi_hide_list:
        uwagi_hide.extract()

    group_tag = soup.find_all('b', string=re.compile("Grupy zajęciowe"))[0]
    group_tag_parent = group_tag.parent
    info_table = group_tag_parent.find_all('table', recursive=False)[-2]
    info_tr_list = info_table.find_all('tr', recursive=False)[3:-1]

    group_repo = load_json(sys.argv[2])

    for line_1, line_2, line_3 in chunks(info_tr_list, 3):
        line_1_td_list = line_1.find_all('td', recursive=False)

        #print("Kod grupy: {}".format(line_1_td_list[0].text.strip()))
        group_code = line_1_td_list[0].text.strip()
        #print("Kod kursu: {}".format(line_1_td_list[1].text.strip()))
        course_code = line_1_td_list[1].text.strip()

        line_2_td_list = line_2.find_all('td', recursive=False)
        # print("Prowadzący: {}".format(
        #    re.sub(r'\s+', ' ', line_2_td_list[0].text.strip())))
        #profesor_name = re.sub(r'\s+', ' ', line_2_td_list[0].text.strip())

        line_3_table_td_list = line_3.find('table').find_all('td')
        time_list = [td.text for td in line_3_table_td_list]
        formatted_time_list = []

        for time_element in time_list:
            hours = re.findall(re.compile(r'\d\d:\d\d'), time_element)
            dow_text = time_element.strip()[:2]
            dow_num = DOW_TEXT_NUM_DICT[dow_text]
            ftw_dict = {
                'start': hours[0].replace(':', ''),
                'end': hours[1].replace(':', ''),
                'dow': dow_num
            }
            par_ind = time_element.strip()[3:5]
            if par_ind in ('TP', 'TN'):
                ftw_dict['par'] = 1 if par_ind == 'TN' else 2
            formatted_time_list.append(ftw_dict)

        if 'courses' not in group_repo:
            group_repo['courses'] = {}

        if course_code not in group_repo['courses']:
            course_dict = {}
            group_repo['courses'][course_code] = course_dict
        else:
            course_dict = group_repo['courses'][course_code]

        course_dict[group_code] = formatted_time_list

    save_json(sys.argv[2], group_repo)
Пример #16
0
def geographies_schema():
    """
    Create the schema for the Geographies endpoint.
    """
    # load schema template and insert definitions from geography
    geography = geography_schema()
    schema = common.load_json("./templates/geography/geographies.json")
    schema["definitions"].update(geography["definitions"])

    return common.check_schema(schema)
Пример #17
0
def post_stops_schema():
    """
    Create the schema for the Agency POST /stops endpoint.
    """
    # load schema template and insert definitions
    schema = common.load_json("./templates/agency/post_stops.json")
    stops = common.stop_definitions()
    schema["definitions"].update(stops)

    # verify and return
    return common.check_schema(schema)
Пример #18
0
def do_make_mapfile(args):
    """Create a default mapfile with a one file to one file mapping."""
    json = common.load_json(args.bigpack)
    result = {}
    prefix = args.prefix
    if prefix and not prefix.endswith('/'):
        prefix += '/'
    for file_dict_path_str in json.keys():
        file_path, _ = common.unserialize_dict_path(file_dict_path_str)
        path = "/".join(file_path)
        result[path] = prefix + path
    common.save_json(args.mapfile, result)
Пример #19
0
def post_vehicle_telemetry_schema():
    """
    Create the schema for the Agency POST /vehicles/telemetry endpoint.
    """
    # load schema template and insert definitions
    schema = common.load_json("./templates/agency/post_vehicle_telemetry.json")
    definitions = common.load_definitions("timestamp", "uuid")
    definitions["vehicle_telemetry"] = vehicle_telemetry()
    schema["definitions"].update(definitions)

    # verify and return
    return common.check_schema(schema)
Пример #20
0
def get_all_comments(request):
    """
        Returns all comments written in whole shared folder.
        Request.body contains only 'path' key.
    """
    try:
        path = json.loads(request.body)['path'] + '/.Comments'
        result = []

        for tmpdir, subdirs, subfiles in os.walk(path):
            for subdir in subdirs:
                if not comment_file_name_pattern.match(subdir):
                    continue

                # get info about thread (needed in gui to take shortcut to thread or gather stats)
                meta = load_json(os.path.join(tmpdir, subdir, 'meta'))
                thread_data = {
                    'fullpath': os.path.join(tmpdir, subdir),
                    'timestamp': meta['timestamp'],
                    'name': meta['topic'],
                    'type': 'thread',
                    'path': os.path.join(tmpdir, subdir).replace(path, ''),
                    'numberofcomments': len(os.listdir(os.path.join(tmpdir, subdir))) - 1,
                    'unreadcomment': False,
                }

                # finally get comments
                for commentfile in os.listdir(os.path.join(tmpdir, subdir)):
                    if commentfile == 'meta':
                        continue

                    comment = load_json(os.path.join(tmpdir, subdir, commentfile))
                    comment['topic'] = thread_data

                    result.append(comment)

        return JsonResponse({'comments': result, 'stats': get_stats(result)}, safe=False)
    except Exception:
        return HttpResponseServerError('Wystąpił nieznany błąd podczas pobierania komentarzy.')
Пример #21
0
def do_migrate(args):
    """Migrate one or more pack file according to a migration file."""
    sorter = get_sorter(args)
    sparse_reader = get_sparse_reader(args)
    plan = common.load_json(args.migration_plan)
    plan = types.SimpleNamespace(to_delete=set(plan["delete"]),
                                 unchanged=set(plan["unchanged"]),
                                 migrate=plan["migrate"])
    iterator = common.transform_file_or_dir(args.inputpath, args.outputpath)
    for input_file, output_file, _ in iterator:
        try:
            src_pack = common.load_json(input_file)
        except OSError as error:
            print("Cannot read", input_file, ":", str(error))
            continue
        except ValueError as error:
            print("File", input_file, "contains invalid JSON:", str(error))
            continue

        dst_pack = migrate_pack(args, plan, sparse_reader, src_pack)

        common.save_json(output_file, sorter(dst_pack))
Пример #22
0
 def load_from_file(self, filename, unknown_option=lambda key, val: False):
     json = common.load_json(filename)
     for key, value in json.items():
         if key not in self.default_options:
             if unknown_option(key, value):
                 continue
             raise ValueError("Unknown configuration option: %s" % key)
         value_type = self.default_options[key].__class__
         if value.__class__ is not value_type:
             raise ValueError("Bad value type for option %s,"
                              "expected %s got %s" %
                              (key, value_type, value.__class__))
         setattr(self, key, value)
Пример #23
0
def do_filter(args):
    """Filter a pack and write the result to another one"""
    sorter = get_sorter(args)
    walker = get_walker(args)

    walker.set_file_path_filter(args.filter_file_path)
    walker.set_dict_path_filter(args.filter_dict_path)
    walker.set_tags_filter(args.filter_tags)
    walker.set_orig_filter(args.filter_orig)

    pack = common.load_json(args.inputpack)
    new_pack = dict(walker.walk_pack(pack))
    new_pack = sorter(new_pack)
    common.save_json(args.outputpack, new_pack)
Пример #24
0
def geography_schema():
    """
    Create the schema for the Geography endpoint.
    """
    # load schema template and insert definitions
    schema = common.load_json("./templates/geography/geography.json")
    definitions = common.load_definitions("string", "timestamp", "uuid",
                                          "version")
    definitions.update(
        common.load_definitions("timestamp", "uuid_array", allow_null=True))
    schema["definitions"].update(definitions)

    # verify and return
    return common.check_schema(schema)
Пример #25
0
def path(params):
    global src_info
    global dst_info
    global net_info

    src_info = None
    dst_info = None
    net_info = None

    settings['debug'] = True
    BASE_DIR = os.path.dirname(os.path.dirname(__file__))
    CUR_DIR = os.getcwd()
    if not re.search('/openstack_dashboard/don/', CUR_DIR):
        os.chdir(BASE_DIR + '/ovs')
    NEW_DIR = os.getcwd()
    debug(BASE_DIR + ':' + CUR_DIR + ':' + NEW_DIR)

    src_ip = params['src_ip']
    dst_ip = params['dst_ip']
    json_file = params['json_file']
    router = params['router']

    debug('Json_file: ' + json_file)

    info = load_json(json_file)
    qrouter = router_to_namespace(info, router)
    params['qrouter'] = qrouter

    src_info = get_port_info(info, src_ip)
    dst_info = get_port_info(info, dst_ip)

    if src_info is None:
        return "Source ip not found on the network"
    if dst_info is None:
        return "Destination ip not found on the network"
    if qrouter is None:
        return "No such router information found on the network"

    # src and dst are in the same network
    if src_info['tag'] == dst_info['tag']:
        path_same_network(params)
    else:
        status_update('The source and destination are in different networks')
        next_hop_list = get_next_hop(src_info, dst_info, qrouter, params)
        if len(next_hop_list) == 0:
            error('Could not find next hop list from %s to %s' %
                  (src_ip, dst_ip))
        path_same_network(params, next_hop_list)

    pass
Пример #26
0
def get_next_hop (src_info, dst_info, qrouter, params):
    next_hop_list = []
    next_hop = None

    username    = params['username']
    passwd      = params['passwd']
    src_ip      = src_info['ip']
    dst_ip      = dst_info['ip']

    remote_cmd = ' ip route get %s' % dst_ip

    cmd = 'sudo ip netns exec ' + qrouter
    cmd += ' python run_nms_cmd.py --host_ip %s --username "%s" --passwd "%s" --cmd "%s" ' % \
                            (src_ip, username, passwd, remote_cmd)

    output  = run_remote_cmd(cmd)
    a = json.loads(output)

    if not a['pass']:
        return []

    json_file   = params['json_file']
    info = load_json(json_file)

    next_hop = {}
    for cmd in a['command_list']:
        if re.search('ip route get', cmd['cmd']):
            m = re.search('\S+\s+via\s+(\S+)', cmd['output'][0])
            if m:
                next_hop['ip'] = m.group(1)
                next_hop['dev'] = 'qr-' + ip_to_intf(info, next_hop['ip'])
                next_hop['nms'] = intf_to_namespace(info, next_hop['dev'])
                break

    next_hop_list.append(next_hop)

    cmd = 'sudo ip netns exec ' + next_hop['nms']
    cmd += remote_cmd

    output  = run_remote_cmd(cmd).split('\n')

    prev_nms = next_hop['nms']
    next_hop = {}
    m = re.search('\S+\s+dev\s+(\S+)', output[0])
    if m:
        next_hop['dev'] = m.group(1)
        next_hop['nms'] = prev_nms

    next_hop_list.append(next_hop)
    return next_hop_list
Пример #27
0
def path(params):
    global src_info
    global dst_info
    global net_info

    src_info = None
    dst_info = None
    net_info = None

    settings['debug'] = True
    BASE_DIR = os.path.dirname(os.path.dirname(__file__))
    CUR_DIR = os.getcwd()
    if not re.search('/openstack_dashboard/don/', CUR_DIR):
        os.chdir(BASE_DIR + '/ovs')
    NEW_DIR = os.getcwd()
    debug(BASE_DIR + ':' + CUR_DIR + ':' + NEW_DIR)

    src_ip = params['src_ip']
    dst_ip = params['dst_ip']
    json_file = params['json_file']
    router = params['router']

    debug('Json_file: ' + json_file)

    info = load_json(json_file)
    qrouter = router_to_namespace(info, router)
    params['qrouter'] = qrouter

    src_info = get_port_info(info, src_ip)
    dst_info = get_port_info(info, dst_ip)

    if src_info is None:
        return "Source ip not found on the network"
    if dst_info is None:
        return "Destination ip not found on the network"
    if qrouter is None:
        return "No such router information found on the network"

    # src and dst are in the same network
    if src_info['tag'] == dst_info['tag']:
        path_same_network(params)
    else:
        status_update('The source and destination are in different networks')
        next_hop_list = get_next_hop(src_info, dst_info, qrouter, params)
        if len(next_hop_list) == 0:
            error('Could not find next hop list from %s to %s' %
                  (src_ip, dst_ip))
        path_same_network(params, next_hop_list)

    pass
Пример #28
0
def get_folders(request):
    """
        Get list if all shared folders with additional info such as list of users in folder
    """
    try:
        # getting data from BTSync
        data = requests.get(
            "http://" + config["btsync_server_address"] + "/api",
            params={"method": "get_folders"},
            auth=("team", "sync"),
        ).json()

        # modifying data and adding other info
        for dat in data:
            # get folder name
            dat["name"] = dat["dir"].split("/")[-1]

            # if folder .Users is present, get info about users in shared folder
            if os.path.isdir(dat["dir"] + "/.Users"):
                dat["uid"] = config["uid"]
                dat["identity"] = load_json(dat["dir"] + "/.Users/" + config["uid"] + ".json")["identity"]

                files = os.listdir(dat["dir"] + "/.Users")
                users = []

                for fil in files:
                    user = load_json(dat["dir"] + "/.Users/" + fil)
                    # add suffix ' (You)' to know which username in the list is mine
                    if user["identity"] == dat["identity"]:
                        user["identity"] += " (Ty)"
                    users.append(user)

                dat["users"] = users

        return JsonResponse(data, safe=False)
    except Exception:
        return HttpResponseServerError("Wystąpił błąd podczas pobierania listy folderów.")
def endpoint_schema(endpoint, extra_definitions={}):
    """
    Generate the Provider payload schema for the given endpoint.
    """
    # load common schema template and update metadata
    schema = common.load_json("./templates/provider/endpoint.json")
    schema["$id"] = schema["$id"].replace("endpoint.json", f"{endpoint}.json")
    schema["title"] = schema["title"].replace("endpoint", endpoint)

    # merge custom definitions with relevant common definitions
    definitions = common.load_definitions("string", "timestamp", "uuid",
                                          "version")
    definitions.update(common.point_definition())
    definitions.update(common.mds_feature_point_definition())
    definitions.update(extra_definitions)

    endpoint_schema = common.load_json(f"./templates/provider/{endpoint}.json")

    # for all but stops, merge standard vehicle info with items schema
    if endpoint not in ["stops"]:
        items = endpoint_schema[endpoint]["items"]
        vehicle = common.vehicle_definition()
        items["required"] = vehicle["required"] + items["required"]
        items["properties"] = {**vehicle["properties"], **items["properties"]}
        definitions.update(
            common.load_definitions("propulsion_type", "propulsion_types",
                                    "vehicle_type"))

    # merge endpoint schema into the endpoint template
    data_schema = schema["properties"]["data"]
    data_schema["required"] = [endpoint]
    data_schema["properties"] = endpoint_schema

    # insert definitions
    schema["definitions"].update(definitions)

    return schema
Пример #30
0
def __gen_navigation():
    """Generate app navigation code."""
    print 'Generating navigation'

    # iterate over json pages and build react-native navigation routes
    routes_list = []
    for page_data in pages_array:
        # compute navigation component (e.g. "pages.Glossary")
        comp_class = __get_page_component_classname_from_page_data(page_data)
        comp_class = 'pages.generated.{}'.format(comp_class)

        # store route code
        routes_list.append("'{}': {{ title: \"{}\", component: {} }}".format(
            '#{}'.format(page_data['id']),
            page_data.get('title', '').encode('utf-8'), comp_class))

    # assemble routes data
    routes_code = 'const generatedRoutes = {{\n  {},\n}};'.format(
        ',\n  '.join(routes_list))

    # Load "glossary bindings" json file which contains correspondences
    # between glossary words and app pages ids.
    # We translate the correspondences to a Javascript object.
    glossary_bindings = common.load_json(common.content_glossary_bindings_fn)
    bindings = []
    for (word, page_id) in glossary_bindings.iteritems():
        bindings.append("  '{}': '#{}'".format(word, page_id))

    bindings_code = 'const glossaryBindings = {{\n{}\n}};'.format(
        ',\n'.join(bindings))

    # compute jinja template replacements
    replacements = {
        'generatedRoutes': routes_code.decode('utf-8'),
        'glossaryBindings': bindings_code.decode('utf-8')
    }

    # generate navigator_data.js file
    target_file = os.path.join(common.target_navigation_dir,
                               'navigator_data.js')

    with open(target_file, 'w') as f:
        tmpl = j2_env.get_template('navigator_data.tmpl.js')
        rendered_tmpl = tmpl.render(**replacements)
        f.write(rendered_tmpl.encode('utf-8'))

    return True
Пример #31
0
def get_dominated_threads(request):
    """
        Get threads which fulfil user's requirements
        Keys in request.body:
            path - directory from which threads will be returned
            threshold - (>|<) (10|30|50|70|90)% for example '> 30%' or '< 90%'
            users - dictionary of users in shared folder. Keys of this dict:
                isDominating - bool indicating whether user is checked
                uid - user ID
    """
    try:
        data = json.loads(request.body)

        files = get_all_threads(data['path'])
        threshold = float(data['threshold'][2:4]) / 100     # threshold as fraction
        threads = []
        dominating_users_uid = []

        for user in data['users']:
            if user['isDominating']:
                dominating_users_uid.append(user['uid'])

        for fil in files:
            if fil['type'] in ['folder', 'file']:
                continue

            # main loop for increasing counters of users
            comments = os.listdir(fil['fullpath'])
            users_comments = 0
            limit = fil['numberofcomments'] * threshold

            for comment_path in comments:
                if comment_path == 'meta':
                    continue

                comment = load_json(os.path.join(fil['fullpath'], comment_path))
                if comment['uid'] in dominating_users_uid:
                    users_comments += 1

            if (data['threshold'][0] == '>' and float(users_comments) > limit) or \
                    (data['threshold'][0] == '<' and float(users_comments) < limit):
                threads.append(fil)

        return JsonResponse(threads, safe=False)
    except Exception:
        return HttpResponseServerError('Wystąpił nieznany błąd podczas filtrowania wątków.')
Пример #32
0
def get_thread_comments(request):
    """
        Returns all comments in specified thread and statistics data.
        Keys in request.body:
            fullthreadpath - full directory path to folder with comment files
            sortinguid - if not '': sort all the comments not with timestamp, but user['readby'] timestamp
    """
    result = []

    try:
        data = json.loads(request.body)
        full_thread_path = data['fullthreadpath']

        # get comment files and mark them as read by user
        for commentfile in os.listdir(full_thread_path):
            if commentfile == 'meta':
                continue

            comment = load_json(os.path.join(full_thread_path, commentfile))

            # marking as read
            if config['uid'] not in comment['readby']:
                comment['readby'][config['uid']] = get_timestamp()

            save_json(os.path.join(full_thread_path, commentfile), comment)

            # needed for UI purposes
            comment['editing'] = False
            comment['historing'] = False

            result.append(comment)

        # if sortinguid not null, modify 'timestamp' to sort properly
        if data['sortinguid']:
            for res in result:
                if data['sortinguid'] not in res['readby']:
                    result.remove(res)
                else:
                    res['timestamp'] = res['readby'][data['sortinguid']]

        # need to sort, because os.listdir doesn't return sorted list of files :(
        result = sorted(result, key=lambda comm: comm['timestamp'])

        return JsonResponse({'comments': result, 'stats': get_stats(result)}, safe=False)
    except Exception:
        return HttpResponseServerError('Wystąpił nieznany błąd podczas pobierania komentarzy.')
Пример #33
0
def post_vehicle_schema():
    """
    Create the schema for the Agency POST /vehicles endpoint.
    """
    # load schema template and insert definitions
    schema = common.load_json("./templates/agency/post_vehicle.json")
    definitions = common.load_definitions("propulsion_types", "string",
                                          "vehicle_type", "uuid")
    schema["definitions"].update(definitions)

    # merge common vehicle information, with Agency tweaks
    vehicle = common.vehicle_definition(provider_name=False, provider_id=False)

    schema["required"] = vehicle["required"] + schema["required"]
    schema["properties"] = {**vehicle["properties"], **schema["properties"]}

    # verify and return
    return common.check_schema(schema)
Пример #34
0
def put_stops_schema():
    """
    Create the schema for the Agency POST /stops endpoint.
    """
    # load schema template and insert definitions

    # the PUT body allows a small subset of fields
    schema = common.load_json("./templates/agency/put_stops.json")

    stops = common.stop_definitions()
    needed_defs = ["stop_status", "uuid", "vehicle_type_counts"]
    for key in [k for k in stops.keys() if k not in needed_defs]:
        del stops[key]

    schema["definitions"].update(stops)

    # verify and return
    return common.check_schema(schema)
Пример #35
0
def process_ping (filename, ip=None, check_ssh_connectivity_only=False):
    if not os.path.isfile(filename):
        return False

    status_update('Trying to read ' + filename)
    with open(filename) as f:
        lines = f.readlines()
    pprint.pprint(lines)

    info = load_json(filename)
    if not check_ssh_connectivity_only:
        return info.get('pass', False)

    cmd_list = info['command_list']
    for cmd in cmd_list:
        m = re.search('ssh (\S+) with provided username and passwd', cmd['cmd'])
        if m:
            if ip == m.group(1):
                return cmd['pass']
    return False
Пример #36
0
def generate_page_data(page_id, input_page_dir, descriptor):
    print '\tGenerating page "{}"'.format(page_id)

    # The editor saves a json file inside each page directory.
    # Such file describe the various page tokens so we abort the generation
    # if this file is not found.
    input_json_fn = os.path.join(input_page_dir, 'page.json')
    page_descriptor = common.load_json(input_json_fn)
    if page_descriptor is None:
        return None

    # Remove and recreate output directory if exists
    output_page_dir = common.get_content_page_dir(page_id)
    if os.path.isdir(output_page_dir):
        shutil.rmtree(output_page_dir)
    os.mkdir(output_page_dir)

    # Call custom import function to complete page generation
    importfun = descriptor['importfun']
    return importfun(page_id, page_descriptor, input_page_dir, output_page_dir)
Пример #37
0
def do_merge(args):
    """Merge multiple pack files into one big pack file."""
    sorter = get_sorter(args)

    big_result = {}
    error = False
    for usable_path, _ in common.walk_files(args.inputpath):
        for file_dict_path_str, value in common.load_json(usable_path).items():
            if big_result.setdefault(file_dict_path_str, value) != value:
                print("Multiple different value found for", file_dict_path_str)
                error = True
    if error:
        if args.allow_mismatch:
            print("Continuing anyway...")
        else:
            print("Aborting...")
            sys.exit(1)
    big_result = sorter(big_result)

    common.save_json(args.bigpack, big_result)
Пример #38
0
def post_vehicle_event_schema():
    """
    Create the schema for the Agency POST /vehicles/:id/event endpoint.
    """
    # load schema template and insert definitions
    schema = common.load_json("./templates/agency/post_vehicle_event.json")
    definitions = common.load_definitions("timestamp", "uuid")
    definitions["vehicle_telemetry"] = vehicle_telemetry()
    schema["definitions"].update(definitions)

    # merge the state machine definitions and transition combinations rule
    state_machine_defs, transitions = common.vehicle_state_machine(
        "vehicle_state", "event_types")
    schema["definitions"].update(state_machine_defs)
    schema["allOf"].append(transitions)

    # add the conditionally-required trip_id rule
    trip_id_ref = common.load_definitions("trip_id_reference")
    schema["allOf"].append(trip_id_ref)

    # verify and return
    return common.check_schema(schema)
def policy_schema():
    """
    Create the schema for the Policy endpoint.
    """
    # load schema template and insert definitions
    schema = common.load_json("./templates/policy/policy.json")
    definitions = common.load_definitions("currency", "day", "propulsion_type",
                                          "string", "timestamp", "uuid",
                                          "uuid_array", "vehicle_event",
                                          "vehicle_state", "vehicle_type",
                                          "version")
    definitions.update(
        common.load_definitions("days",
                                "iso_time",
                                "propulsion_types",
                                "timestamp",
                                "uuid_array",
                                "vehicle_types",
                                allow_null=True))
    schema["definitions"].update(definitions)

    # verify and return
    return common.check_schema(schema)
Пример #40
0
def import_structure_json(structure_id, structure_dir, structures_json):
    # Make sure json descriptor exists
    json_descriptor_fn = os.path.join(structure_dir, 'page.json')
    json_descriptor = common.load_json(json_descriptor_fn)
    if json_descriptor is None:
        return

    # Append structure description to the output json object
    s_type = json_descriptor['structuretype']
    if s_type not in structures_json:
        structures_json[s_type] = []

    # Copy structure markdown file (that contains structure's free text
    # description) to the content "structures" directory
    common.import_editor_markdown_file(structure_dir,
                                       common.content_structures_dir,
                                       structure_id,
                                       create_empty_mdfile=False)

    # Append structure id to json data.
    # The structure id is used to associate a structure to its free text
    json_descriptor['structureid'] = structure_id
    structures_json[s_type].append(json_descriptor)
Пример #41
0
import json, re
from common import load_json

j = load_json()
for line in j.split('\n'):
    if re.match(r'^\[\[Category:.+\]\]$', line):
        c = re.match(r'^\[\[Category:(.*)\]\]$', line)
        print(c.group(1).split('|')[0])
Пример #42
0
import pprint
import sys

from common import load_json

if len(sys.argv) != 2:
    print ('Usage: ' + sys.argv[0] + ' <json file to display>')
    exit(1)

info = load_json(sys.argv[1])
pprint.pprint(info)
Пример #43
0
def get_file_list_from_location(folder_path, location):
    """
        Returns list of files inside specified location.
    """
    result = []
    location = location[1:]
    location_full_path = location == '/' and folder_path or os.path.join(folder_path, location)

    # getting sorted folders only
    for temp in [f for f in sorted(os.listdir(location_full_path)) if os.path.isdir(os.path.join(location_full_path, f)) and '.' not in f]:
        full_path = os.path.join(folder_path, location, temp)
        result.append({
            'fullpath': full_path,
            'name': temp,
            'type': 'folder',
            'insidepath': full_path.replace(folder_path, '')
        })

    # getting sorted files only
    for temp in [f for f in sorted(os.listdir(location_full_path)) if os.path.isfile(os.path.join(location_full_path, f))]:
        full_path = full_path = os.path.join(folder_path, location, temp)
        result.append({
            'fullpath': full_path,
            'name': temp,
            'type': 'file',
            'unrolled': False,
            'threads': [],
            'insidepath': full_path.replace(folder_path, '')
        })

    # if location contains no comments, then return result
    if not os.path.exists(os.path.join(folder_path, '.Comments', location)) or not os.path.isdir(
            os.path.join(folder_path, '.Comments', location)):
        return result

    # getting threads only
    for temp in os.listdir(os.path.join(folder_path, '.Comments', location)):
        full_path = os.path.join(folder_path, '.Comments', location, temp)

        if not comment_file_name_pattern.match(temp) or not os.path.isdir(full_path):
            continue

        # getting metadata and lastcomment for it's timestamp
        metadata = load_json(os.path.join(full_path, 'meta'))
        comments = sorted(os.listdir(full_path), reverse=True)
        lastcomment = load_json(os.path.join(full_path, comments[1]))

        # main thread data
        data = {
            'timestamp': metadata['timestamp'],
            'name': metadata['topic'],
            'type': 'thread',
            'numberofcomments': len(os.listdir(full_path)) - 1,
            'unreadcomment': False,
            'lastcomment': lastcomment['timestamp'],
            'fullpath': full_path,
            'insidepath': full_path.replace(folder_path, '')
        }

        # searching for unread comments
        for comment in comments[1:]:
            comm = load_json(os.path.join(full_path, comment))
            if config['uid'] not in comm['readby'].keys():
                data['unreadcomment'] = True
                break

        # updating files which are thread about
        if len(metadata['fileabout']) > 0:
            for res in result:
                if res['type'] != 'file':
                    continue

                if res['insidepath'] == metadata['fileabout']:
                    if data['unreadcomment']:
                        res['unreadcomment'] = True
                    res['threads'].append(copy.deepcopy(data))
        else:
            result.append(data)

    return result
Пример #44
0
    def check_text(self, file_path, dict_path, text, orig, tags, get_text):
        if text in self.whitelist:
            return
        warn_func = self.create_warn_function(file_path, dict_path, text)

        plain_text = ""
        for rendered_text in self.parse_text(text, orig, warn_func, get_text):
            plain_text += rendered_text.plain

        plain_text = self.do_all_replacements(plain_text, warn_func)

        for paragraph in grammalecte.text.getParagraph(plain_text):
            self.check_paragraph(paragraph, warn_func)


settings = common.load_json("config.json")

sparse_reader = common.string_cache('en_US')
sparse_reader.load_from_file(settings["string_cache_file"])

grammar_checker = GrammalecteChecker(sparse_reader, settings.get('check', {}),
                                     False)

french = common.PackFile()
french.load(settings["packfile"])
grammar_checker.check_pack(french)

print("Translations with errors: ", len(grammar_checker.all_spells))
print("Grammar mistakes: ", grammar_checker.grammar_count)
print("Spelling mistakes: ", grammar_checker.spell_count)
print("Most common bad words:")