コード例 #1
0
    def read_data(self, termin):

        query = "SELECT wahlkreis.id, bezirk.bnr, sprengel.snr, sprengel.berechtigte, " \
                "sprengel.abgegeben, sprengel.ungueltige, parteistimmen.pbez, parteistimmen.stimmanzahl " \
                "FROM wahlkreis " \
                "INNER JOIN bezirk ON wahlkreis.id = bezirk.wkid " \
                "INNER JOIN sprengel ON bezirk.bnr = sprengel.bnr " \
                "AND sprengel.termin = '" + termin + "' " \
                "INNER JOIN parteistimmen ON parteistimmen.termin = '" + termin + "' " \
                "AND parteistimmen.bnr = bezirk.bnr " \
                "AND parteistimmen.snr = sprengel.snr;"

        r = self.s.execute(query).fetchall()

        header = OrderedSet(["WK", "BZ", "SPR", "WBER", "ABG", "UNG"])
        data = []
        l = {}
        first_party = None
        for i in range(0, len(r)):
            current_party = r[i]["pbez"]
            if first_party is None or current_party == first_party:
                l = {}
                first_party = current_party
                l["WK"] = r[i]["id"]
                l["BZ"] = r[i]["bnr"]
                l["SPR"] = r[i]["snr"]
                l["WBER"] = r[i]["berechtigte"]
                l["ABG"] = r[i]["abgegeben"]
                l["UNG"] = r[i]["ungueltige"]
                data.append(l)
            l[current_party] = r[i]["stimmanzahl"]
            header.add(current_party)

        return data, list(header)
コード例 #2
0
ファイル: dbaccess.py プロジェクト: mritter-tgm/Wahlanalyse
    def load_into_csv_list(self):
        session = self.connector.get_session()

        query = "SELECT Wahlkreis.wahlkreisnr, Bezirk.bezirknr, Sprengel.sprengelnr, Sprengel.wahlberechtigte, " \
                "Sprengel.abgegebene, Sprengel.ungueltige, Stimmabgabe.abkuerzung, Stimmabgabe.anzahl " \
                "FROM Wahlkreis " \
                "INNER JOIN Bezirk ON Wahlkreis.wahlkreisnr = Bezirk.wahlkreisnr " \
                "INNER JOIN Sprengel ON Bezirk.bezirknr = Sprengel.bezirknr " \
                "AND Sprengel.termin = '" + self.wahltermin + "' " \
                                                              "INNER JOIN Stimmabgabe ON Stimmabgabe.termin = '" + self.wahltermin + "' " \
                                                                                                                                     "AND Stimmabgabe.Bezirknr = Bezirk.bezirknr " \
                                                                                                                                     "AND Stimmabgabe.sprengelnr = Sprengel.sprengelnr;"
        result = session.execute(query).fetchall()

        header = OrderedSet(["WK", "BZ", "SPR", "WBER", "ABG.", "UNG."])
        datalist = []
        line = {}
        first_party = None
        for i in range(0, len(result)):
            current_party = result[i]["abkuerzung"]
            if first_party is None or current_party == first_party:
                if line:
                    datalist.append(line)
                line = {}
                first_party = current_party
                line["WK"] = result[i]["wahlkreisnr"]
                line["BZ"] = result[i]["bezirknr"]
                line["SPR"] = result[i]["sprengelnr"]
                line["WBER"] = result[i]["wahlberechtigte"]
                line["ABG."] = result[i]["abgegebene"]
                line["UNG."] = result[i]["ungueltige"]
            line[current_party] = result[i]["anzahl"]
            header.add(current_party)

        return datalist, list(header)
コード例 #3
0
    def load(self):

        session = self.connector.get_session()

        query = "SELECT Wahlkreis.wahlkreisnr, Bezirk.bezirknr, Sprengel.sprengelnr, Sprengel.wahlberechtigte, Sprengel.abgegebene, Sprengel.ungueltige, Stimmabgabe.abkuerzung, Stimmabgabe.anzahl FROM Wahlkreis INNER JOIN Bezirk ON Wahlkreis.wahlkreisnr = Bezirk.wahlkreisnr INNER JOIN Sprengel ON Bezirk.bezirknr = Sprengel.bezirknr AND Sprengel.termin = '" + self.wahltermin + "' INNER JOIN Stimmabgabe ON Stimmabgabe.termin = '" + self.wahltermin + "' AND Stimmabgabe.Bezirknr = Bezirk.bezirknr AND Stimmabgabe.sprengelnr = Sprengel.sprengelnr;"
        result = session.execute(query).fetchall()

        header = OrderedSet(["WK", "BZ", "SPR", "WBER", "ABG", "UNG"])
        rawdata = []
        line = {}
        erste_partei = None
        for i in range(0, len(result)):
            aktuelle_partei = result[i]["abkuerzung"]
            if erste_partei is None or aktuelle_partei == erste_partei:
                if line:
                    rawdata.append(line)
                line = {}
                erste_partei = aktuelle_partei
                line["WK"] = result[i]["wahlkreisnr"]
                line["BZ"] = result[i]["bezirknr"]
                line["SPR"] = result[i]["sprengelnr"]
                line["WBER"] = result[i]["wahlberechtigte"]
                line["ABG"] = result[i]["abgegebene"]
                line["UNG"] = result[i]["ungueltige"]
            line[aktuelle_partei] = result[i]["anzahl"]
            header.add(aktuelle_partei)

        return rawdata, list(header)
コード例 #4
0
def fake_ordering_service(config_ledger, config_state, db_manager):
    ordering_service = FakeSomething(
        db_manager=db_manager,
        post_batch_rejection=lambda *args, **kwargs: True,
        _logger=FakeSomething(info=lambda *args, **kwargs: True),
        requestQueues={
            0: OrderedSet(),
            1: OrderedSet(),
            2: OrderedSet()
        })
    ordering_service._revert = functools.partial(OrderingService._revert,
                                                 ordering_service)
    return ordering_service
コード例 #5
0
ファイル: hosts.py プロジェクト: PhilipVis/Addeter
def update_hosts_file(hosts_urls, password):
    update_hosts_set = OrderedSet()

    reset_hosts_file(password)
    hosts_file_text = read_hosts_file(password)
    for line in hosts_file_text.splitlines():
        update_hosts_set.add(line + "\n")

    update_hosts_set.add(ADDETER_START + "\n")
    update_hosts_set.update(load_remote_hosts(hosts_urls))
    update_hosts_set.add(ADDETER_END + "\n")

    write_hosts_file(password, update_hosts_set)
コード例 #6
0
def test_freshness_pre_prepare_only_when_no_requests_for_ledger(
        tconf, replica_with_requests, ordered, refreshed):
    replica, requests = replica_with_requests
    for ordered_ledger_id in ordered:
        replica.requestQueues[ordered_ledger_id] = OrderedSet(
            [requests[ordered_ledger_id].key])

    # send 3PC batch for requests
    assert len(replica.outBox) == 0
    set_current_time(replica, tconf.Max3PCBatchWait + 1)
    replica.send_3pc_batch()
    assert len(replica.outBox) == len(ordered)

    # wait for freshness timeout
    set_current_time(replica, FRESHNESS_TIMEOUT + 1)

    # order requests
    for i in range(len(ordered)):
        replica.order_3pc_key((0, i + 1))
    assert len(replica.outBox) == 2 * len(ordered)
    check_and_pop_ordered(replica, ordered)

    # refresh state for unordered
    replica.send_3pc_batch()
    assert len(replica.outBox) == len(refreshed)
    for refreshed_ledger_id in refreshed:
        check_and_pop_freshness_pre_prepare(replica, refreshed_ledger_id)
コード例 #7
0
ファイル: conftest.py プロジェクト: sebastillar/indy-plenum
def orderer(consensus_data, internal_bus, external_bus, name, write_manager,
            txn_roots, state_roots, bls_bft_replica, tconf, stasher,
            validators):
    orderer = OrderingService(
        data=consensus_data(name),
        timer=QueueTimer(),
        bus=internal_bus,
        network=external_bus,
        write_manager=write_manager,
        bls_bft_replica=bls_bft_replica,
        freshness_checker=FreshnessChecker(
            freshness_timeout=tconf.STATE_FRESHNESS_UPDATE_INTERVAL),
        primaries_selector=RoundRobinConstantNodesPrimariesSelector(
            validators),
        stasher=stasher)
    orderer._data.node_mode = Mode.participating
    orderer._data.primary_name = "Alpha:0"
    orderer.get_txn_root_hash = lambda ledger, to_str=False: txn_roots[ledger]
    orderer.get_state_root_hash = lambda ledger, to_str=False: state_roots[
        ledger]
    orderer.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet()
    orderer._revert = lambda *args, **kwargs: None
    orderer.db_manager.stores[LAST_SENT_PP_STORE_LABEL] = \
        FakeSomething(store_last_sent_pp_seq_no=lambda b, c: None)
    return orderer
コード例 #8
0
def orderer(consensus_data, internal_bus, external_bus, name, write_manager,
            txn_roots, state_roots, bls_bft_replica, tconf, stasher):
    orderer = OrderingService(
        data=consensus_data(name),
        timer=QueueTimer(),
        bus=internal_bus,
        network=external_bus,
        write_manager=write_manager,
        bls_bft_replica=bls_bft_replica,
        freshness_checker=FreshnessChecker(
            freshness_timeout=tconf.STATE_FRESHNESS_UPDATE_INTERVAL),
        stasher=stasher)
    orderer._data.node_mode = Mode.participating
    orderer._data.primary_name = "Alpha:0"
    orderer.get_txn_root_hash = lambda ledger, to_str=False: txn_roots[ledger]
    orderer.get_state_root_hash = lambda ledger, to_str=False: state_roots[
        ledger]
    orderer.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet()
    orderer._revert = lambda *args, **kwargs: None
    orderer.db_manager.stores[LAST_SENT_PP_STORE_LABEL] = \
        FakeSomething(store_last_sent_pp_seq_no=lambda b, c: None)
    future_primaries_handler = FuturePrimariesBatchHandler(
        write_manager.database_manager, FakeSomething(nodeReg={}, nodeIds=[]))
    future_primaries_handler.get_primaries = lambda *args, **kwargs: orderer._data.primaries
    write_manager.register_batch_handler(future_primaries_handler)
    return orderer
コード例 #9
0
def set_status_filters(filter_args):
    status_filters = filter_args.get('status', [])
    return list(
        OrderedSet(
            chain((status_filters or REQUESTED_STATUSES),
                  DELIVERED_STATUSES if 'delivered' in status_filters else [],
                  SENDING_STATUSES if 'sending' in status_filters else [],
                  FAILURE_STATUSES if 'failed' in status_filters else [])))
コード例 #10
0
ファイル: hosts.py プロジェクト: PhilipVis/Addeter
def reset_hosts_file(password):
    reset_hosts_set = OrderedSet()
    reading_addeter_lines = False

    hosts_file_text = read_hosts_file(password)

    if (ADDETER_START in hosts_file_text):

        for line in hosts_file_text.splitlines():
            if (reading_addeter_lines):
                if (line == ADDETER_END):
                    reading_addeter_lines = False
            elif (line == ADDETER_START):
                reading_addeter_lines = True
            else:
                reset_hosts_set.add(line + "\n")

        write_hosts_file(password, reset_hosts_set)
コード例 #11
0
def set_status_filters(filter_args):
    status_filters = filter_args.get("status", [])
    return list(
        OrderedSet(
            chain(
                (status_filters or REQUESTED_STATUSES),
                DELIVERED_STATUSES if "delivered" in status_filters else [],
                SENDING_STATUSES if "sending" in status_filters else [],
                FAILURE_STATUSES if "failed" in status_filters else [],
            )))
コード例 #12
0
def replica(tconf, viewNo, inst_id, ledger_ids, mock_timestamp, fake_requests,
            txn_roots, state_roots, request):
    node = ReplicaFakeNode(viewNo=viewNo,
                           quorums=Quorums(
                               getValueFromModule(request,
                                                  'nodeCount',
                                                  default=4)),
                           ledger_ids=ledger_ids)
    bls_bft_replica = FakeSomething(
        gc=lambda *args: None,
        update_pre_prepare=lambda params, l_id: params,
        validate_pre_prepare=lambda a, b: None,
        validate_prepare=lambda a, b: None,
        update_prepare=lambda a, b: a,
        process_prepare=lambda a, b: None,
        process_pre_prepare=lambda a, b: None,
        process_order=lambda *args: None)
    replica = Replica(node,
                      instId=inst_id,
                      isMaster=inst_id == 0,
                      config=tconf,
                      bls_bft_replica=bls_bft_replica,
                      get_current_time=mock_timestamp,
                      get_time_for_3pc_batch=mock_timestamp)
    node.add_replica(replica)
    ReplicaFakeNode.master_last_ordered_3PC = replica.last_ordered_3pc

    replica._ordering_service.last_accepted_pre_prepare_time = replica.get_time_for_3pc_batch(
    )
    replica.primaryName = "Alpha:{}".format(replica.instId)
    replica.primaryNames[replica.viewNo] = replica.primaryName

    replica._ordering_service.get_txn_root_hash = lambda ledger, to_str=False: txn_roots[
        ledger]
    replica._ordering_service.get_state_root_hash = lambda ledger, to_str=False: state_roots[
        ledger]
    replica._ordering_service._revert = lambda ledgerId, stateRootHash, reqCount: None
    replica._ordering_service.post_batch_creation = lambda three_pc_batch: None

    replica._ordering_service.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet()

    replica._ordering_service._get_primaries_for_ordered = lambda pp: [
        replica.primaryName
    ]
    replica._ordering_service._get_node_reg_for_ordered = lambda pp: [
        "Alpha", "Beta", "Gamma", "Delta"
    ]

    def reportSuspiciousNodeEx(ex):
        assert False, ex

    replica.node.reportSuspiciousNodeEx = reportSuspiciousNodeEx

    return replica
コード例 #13
0
def replica(replica):
    replica.node.requests = Requests()
    replica.isMaster = True
    replica.node.replica = replica
    replica.node.doDynamicValidation = functools.partial(randomDynamicValidation, replica.node)
    replica.node.applyReq = lambda self, *args, **kwargs: True
    replica.stateRootHash = lambda self, *args, **kwargs: base58.b58encode(randomString(32)).decode()
    replica.txnRootHash = lambda self, *args, **kwargs: base58.b58encode(randomString(32)).decode()
    replica.node.onBatchCreated = lambda self, *args, **kwargs: True
    replica.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet()
    return replica
コード例 #14
0
def reduceList(nestedList):
    MainList = []

    def subList(x):
        if type(x) is list:
            for item in x:
                subList(item)
        else:
            MainList.append(x)

    subList(nestedList)
    return list(OrderedSet(MainList))
def ord_delay(nodes):
    delay_msgs = dict()
    processing_methods = dict()
    for n in nodes:
        delay_msgs.setdefault(n.name, OrderedSet())
        processing_methods[n.name] = n.try_processing_ordered
        n.try_processing_ordered = lambda msg: delay_msgs[n.name].add(msg)

    yield

    for n in nodes:
        n.try_processing_ordered = processing_methods[n.name]
        for msg in delay_msgs[n.name]:
            n.try_processing_ordered(msg)
コード例 #16
0
ファイル: conftest.py プロジェクト: scroogeT/indy-plenum
def orderer(consensus_data, internal_bus, external_bus, name, write_manager, txn_roots, state_roots, bls_bft_replica):
    orderer = OrderingService(data=consensus_data(name),
                              timer=QueueTimer(),
                              bus=internal_bus,
                              network=external_bus,
                              write_manager=write_manager,
                              bls_bft_replica=bls_bft_replica,
                              is_master=is_master)
    orderer._data.is_participating = True
    orderer.primary_name = "Alpha:0"
    orderer.l_txnRootHash = lambda ledger, to_str=False: txn_roots[ledger]
    orderer.l_stateRootHash = lambda ledger, to_str=False: state_roots[ledger]
    orderer.requestQueues[DOMAIN_LEDGER_ID] = OrderedSet()
    orderer.l_revert = lambda *args, **kwargs: None
    return orderer
コード例 #17
0
    def graph(self,
              filename,
              options,
              graph_variables: List[Run] = None,
              title=False,
              series=None):
        """series is a list of triplet (script,build,results) where
        result is the output of a script.execute_all()"""
        if series is None:
            series = []

        versions = []

        ymin, ymax = (float('inf'), 0)

        # If no graph variables, use the first serie
        if graph_variables is None:
            graph_variables = []
            for run, results in series[0][2].items():
                graph_variables.append(run)

        #Get all scripts, find versions
        for i, (testie, build, all_results) in enumerate(series):
            self.scripts.add(testie)

        # Combine variables as per the graph_combine_variables config parameter
        for tocombine in self.configlist('graph_combine_variables', []):
            tomerge = tocombine.split('+')
            newgraph_variables = []
            run_map = {}
            for run in graph_variables:
                newrun = run.copy()
                vals = []
                for var, val in run.variables.items():
                    if var in tomerge:
                        del newrun.variables[var]
                        vals.append(str(val[1] if type(val) is tuple else val))
                newrun.variables[tocombine] = ', '.join(OrderedSet(vals))
                newgraph_variables.append(newrun)
                run_map[run] = newrun

            graph_variables = newgraph_variables

            newseries = []
            for i, (testie, build, all_results) in enumerate(series):
                new_all_results = {}
                for run, run_results in all_results.items():
                    newrun = run_map.get(run, None)
                    if newrun is not None:
                        new_all_results[newrun] = run_results
                newseries.append((testie, build, new_all_results))
            series = newseries

        # Data transformation : reject outliers, transform list to arrays, filter according to graph_variables, count var_alls and vars_values
        filtered_series = []
        vars_values = {}
        for i, (testie, build, all_results) in enumerate(series):
            new_results = {}
            for run, run_results in all_results.items():
                if run in graph_variables:
                    for result_type, results in run_results.items():
                        if options.graph_reject_outliers:
                            results = self.reject_outliers(
                                np.asarray(results), testie)
                        else:
                            results = np.asarray(results)
                        new_results.setdefault(run, {})[result_type] = results
                    for k, v in run.variables.items():
                        vars_values.setdefault(k, set()).add(v)

            if new_results:
                filtered_series.append((testie, build, new_results))
                versions.append(build.pretty_name())
            else:
                print("No valid data for %s" % build)
        series = filtered_series

        # Transform results to variables as the graph_result_as_variable options asks
        for result_types, var_name in self.configdict(
                'graph_result_as_variable', {}).items():
            result_to_variable_map = set()
            for result_type in result_types.split('+'):
                result_to_variable_map.add(result_type)
            vars_values[var_name] = result_to_variable_map

            transformed_series = []
            for i, (testie, build, all_results) in enumerate(series):
                new_results = {}

                for run, run_results in all_results.items():
                    for stripout in result_to_variable_map:
                        variables = run.variables.copy()
                        new_run_results = {}
                        nodata = True
                        for result_type, results in run_results.items():
                            if result_type in result_to_variable_map:
                                if result_type == stripout:
                                    variables[var_name] = result_type
                                    nodata = False
                                    new_run_results[var_name] = results
                            else:
                                new_run_results[result_type] = results

                        if not nodata:
                            new_results[Run(variables)] = new_run_results

                if new_results:
                    transformed_series.append((testie, build, new_results))
            series = transformed_series
        vars_all = set()
        for i, (testie, build, all_results) in enumerate(series):
            for run, run_results in all_results.items():
                vars_all.add(run)
        vars_all = list(vars_all)
        vars_all.sort()

        dyns = []
        statics = {}
        for k, v in vars_values.items():
            if len(v) > 1:
                dyns.append(k)
            else:
                statics[k] = list(v)[0]

        ndyn = len(dyns)
        nseries = len(series)

        if nseries == 1 and ndyn > 0 and not (ndyn == 1 and all_num(
                vars_values[dyns[0]]) and len(vars_values[dyns[0]]) > 2):
            """Only one serie: expand one dynamic variable as serie, but not if it was plotable as a line"""
            script, build, all_results = series[0]
            if self.config("var_series") and self.config("var_series") in dyns:
                key = self.config("var_series")
            else:
                key = None
                # First pass : use the non-numerical variable with the most points
                n_val = 0
                nonums = []
                for i in range(ndyn):
                    k = dyns[i]
                    if not all_num(vars_values[k]):
                        nonums.append(k)
                        if len(vars_values[k]) > n_val:
                            key = k
                            n_val = len(vars_values[k])
                if key is None:
                    # Second pass if that missed, use the numerical variable with the less point if dyn==2 (->lineplot) else the most points
                    n_val = 0 if ndyn > 2 else 999
                    for i in range(ndyn):
                        k = dyns[i]
                        if (ndyn > 2 and len(vars_values[k]) > n_val) or (
                                ndyn <= 2 and len(vars_values[k]) < n_val):
                            key = k
                            n_val = len(vars_values[k])

            # if graph_serie:
            #     key=graph_serie
            dyns.remove(key)
            ndyn -= 1
            series = []
            versions = []
            values = list(vars_values[key])
            values.sort()
            new_varsall = set()
            for value in values:
                newserie = {}
                for run, run_results in all_results.items():
                    #                    if (graph_variables and not run in graph_variables):
                    #                        continue
                    if (run.variables[key] == value):
                        newrun = run.copy()
                        del newrun.variables[key]
                        newserie[newrun] = run_results
                        new_varsall.add(newrun)

                series.append((script, build, newserie))
                if type(value) is tuple:
                    value = value[1]
                versions.append(value)
                legend_title = self.var_name(key)
            nseries = len(series)
            vars_all = list(new_varsall)
            vars_all.sort()
        else:
            legend_title = None

        if ndyn == 0:
            key = "version"
        elif ndyn == 1:
            key = dyns[0]
        else:
            key = "Variables"

        data_types = dataset.convert_to_xye(
            [(all_results, script) for script, build, all_results in series],
            vars_all, key)

        if options.output is not None:
            for result_type, data in data_types.items():
                type_filename = npf.build_filename(testie, build,
                                                   options.output, statics,
                                                   'csv', result_type)
                with open(type_filename, 'w') as csvfile:
                    wr = csv.writer(csvfile,
                                    delimiter=' ',
                                    quotechar='"',
                                    quoting=csv.QUOTE_MINIMAL)
                    for i, (x, y, e) in enumerate(data):
                        if (i == 0):
                            wr.writerow(x)
                        wr.writerow(y)
                print("Output written to %s" % type_filename)

        plots = OrderedDict()
        for result_type, data in data_types.items():
            if result_type in self.configlist('graph_subplot_results', []):
                plots.setdefault('common', []).append(result_type)
            else:
                plots[result_type] = [result_type]

        ret = {}
        for whatever, figure in plots.items():
            for isubplot, result_type in enumerate(figure):
                data = data_types[result_type]

                if len(figure) > 1:
                    plt.subplot(len(figure), 1, isubplot + 1)

                if ndyn == 0:
                    """No dynamic variables : do a barplot X=version"""
                    self.do_simple_barplot(versions, result_type, data)
                elif ndyn == 1 and len(vars_all) > 2:
                    """One dynamic variable used as X, series are version line plots"""
                    self.do_line_plot(versions, key, result_type, data)
                else:
                    """Barplot. X is all seen variables combination, series are version"""
                    self.do_barplot(series, vars_all, dyns, versions,
                                    result_type, data)

                type_config = "" if not result_type else "-" + result_type

                if ndyn > 0 and bool(self.config_bool('graph_legend', True)):
                    plt.legend(loc=self.config("legend_loc"),
                               title=legend_title)

                if "result-" + result_type in self.config(
                        'var_log', {}) or "result" in self.config(
                            'var_log', {}):
                    plt.yscale('log')

                if key in self.config('var_log', {}):
                    plt.xscale('log')

                plt.xlabel(self.var_name(key))

                yname = self.var_name("result", result_type=result_type)
                if yname != "result":
                    plt.ylabel(yname)

                var_lim = self.scriptconfig("var_lim", "result" + type_config,
                                            None)
                if var_lim:
                    n = var_lim.split('-')
                    if len(n) == 2:
                        ymin, ymax = (float(x) for x in n)
                        plt.ylim(ymin=ymin, ymax=ymax)
                    else:
                        plt.ylim(ymin=float(n[0]))
                else:
                    if (ymin >= 0 > plt.ylim()[0]):
                        plt.ylim(0, plt.ylim()[1])

                    if (ymin < ymax / 5):
                        plt.ylim(ymin=0)

                if options.graph_size:
                    fig = plt.gcf()
                    fig.set_size_inches(options.graph_size[0],
                                        options.graph_size[1])

                if title and isubplot == 0:
                    plt.title(title)

                try:
                    plt.tight_layout()
                except ValueError:
                    print("WARNING: Too many points or variables to graph")
                    print("Try reducing the number of dynamic variables : ")
                    for dyn in dyns:
                        print(dyn)
                    return None

                if len(figure) > 1:
                    if isubplot < len(figure) - 1:
                        continue
                    else:
                        result_type = 'common'
                if not filename:
                    buf = io.BytesIO()
                    plt.savefig(buf, format='png')
                    buf.seek(0)
                    ret[result_type] = buf.read()
                else:
                    type_filename = npf.build_filename(testie, build,
                                                       options.graph_filename,
                                                       statics, 'pdf',
                                                       result_type)
                    plt.savefig(type_filename)
                    ret[result_type] = None
                    print("Graph of test written to %s" % type_filename)
                plt.clf()
        return ret
コード例 #18
0
ファイル: Solution.py プロジェクト: gnitsua/hashcode2020
    def parse_dataset(filepath):
        with open(filepath, "r") as file:
            header_line = file.readline().strip("\n").split(" ")
            assert(len(header_line) == 3)

            result = Solution(int(header_line[2]))

            # Read book scores
            books_line = file.readline().strip("\n").split(" ")
            for book_id, book_score in enumerate(books_line):
                result.book_scores[book_id] = int(book_score)

            # Read each library
            while(True):
                library_info_line = file.readline().strip("\n").split(" ")
                if(library_info_line == None or library_info_line == [""]): # file should end with a blank line
                    break
                assert(len(library_info_line) == 3)
                library_book_line = map(int, file.readline().strip("\n").split(" "))
                result.libraries.append(Library(len(result.libraries),int(library_info_line[1]), int(library_info_line[2]),OrderedSet(library_book_line)))

        return result
コード例 #19
0
 def _add_ledgers(self):
     for lid in self._write_manager.ledger_ids:
         self._orderer.requestQueues[lid] = OrderedSet()
コード例 #20
0
ファイル: propagator.py プロジェクト: aigoncharov/plenum
class Propagator:
    MAX_REQUESTED_KEYS_TO_KEEP = 1000

    def __init__(self):
        self.requests = Requests()
        self.requested_propagates_for = OrderedSet()

    # noinspection PyUnresolvedReferences
    def propagate(self, request: Request, clientName):
        """
        Broadcast a PROPAGATE to all other nodes

        :param request: the REQUEST to propagate
        """
        if self.requests.has_propagated(request, self.name):
            logger.trace("{} already propagated {}".format(self, request))
        else:
            self.requests.add_propagate(request, self.name)
            propagate = self.createPropagate(request, clientName)
            logger.info("{} propagating request {} from client {}".format(
                self, (request.identifier, request.reqId), clientName),
                        extra={
                            "cli": True,
                            "tags": ["node-propagate"]
                        })
            self.send(propagate)

    @staticmethod
    def createPropagate(request: Union[Request, dict],
                        client_name) -> Propagate:
        """
        Create a new PROPAGATE for the given REQUEST.

        :param request: the client REQUEST
        :return: a new PROPAGATE msg
        """
        if not isinstance(request, (Request, dict)):
            logger.error(
                "{}Request not formatted properly to create propagate".format(
                    THREE_PC_PREFIX))
            return
        logger.trace("Creating PROPAGATE for REQUEST {}".format(request))
        request = request.as_dict if isinstance(request, Request) else \
            request
        if isinstance(client_name, bytes):
            client_name = client_name.decode()
        return Propagate(request, client_name)

    # noinspection PyUnresolvedReferences
    def canForward(self, request: Request):
        """
        Determine whether to forward client REQUESTs to replicas, based on the
        following logic:

        - If exactly f+1 PROPAGATE requests are received, then forward.
        - If less than f+1 of requests then probably there's no consensus on the
            REQUEST, don't forward.
        - If more than f+1 then already forwarded to replicas, don't forward

        Even if the node hasn't received the client REQUEST itself, if it has
        received enough number of PROPAGATE messages for the same, the REQUEST
        can be forwarded.

        :param request: the client REQUEST
        """

        if self.requests.forwarded(request):
            return 'already forwarded'

        # If not enough Propogates, don't bother comparing
        if not self.quorums.propagate.is_reached(self.requests.votes(request)):
            return 'not finalised'

        req = self.requests.req_with_acceptable_quorum(request,
                                                       self.quorums.propagate)
        if req:
            self.requests.set_finalised(req)
            return None
        else:
            return 'not finalised'

    # noinspection PyUnresolvedReferences
    def forward(self, request: Request):
        """
        Forward the specified client REQUEST to the other replicas on this node

        :param request: the REQUEST to propagate
        """
        key = request.key
        logger.debug('{} forwarding request {} to {} replicas'.format(
            self, key, self.replicas.sum_inbox_len))

        self.replicas.pass_message(ReqKey(*key))
        self.monitor.requestUnOrdered(*key)
        self.requests.mark_as_forwarded(request, self.replicas.num_replicas)

    # noinspection PyUnresolvedReferences
    def recordAndPropagate(self, request: Request, clientName):
        """
        Record the request in the list of requests and propagate.

        :param request:
        :param clientName:
        """
        self.requests.add(request)
        self.propagate(request, clientName)
        self.tryForwarding(request)

    def tryForwarding(self, request: Request):
        """
        Try to forward the request if the required conditions are met.
        See the method `canForward` for the conditions to check before
        forwarding a request.
        """
        cannot_reason_msg = self.canForward(request)
        if cannot_reason_msg is None:
            # If haven't got the client request(REQUEST) for the corresponding
            # propagate request(PROPAGATE) but have enough propagate requests
            # to move ahead
            self.forward(request)
        else:
            logger.debug("{} not forwarding request {} to its replicas "
                         "since {}".format(self, request, cannot_reason_msg))

    def request_propagates(self, req_keys):
        """
        Request PROPAGATEs for the given request keys. Since replicas can
        request PROPAGATEs independently of each other, check if it has
        been requested recently
        :param req_keys:
        :return:
        """
        i = 0
        for (idr, req_id) in req_keys:
            if (idr, req_id) not in self.requested_propagates_for:
                self.request_msg(PROPAGATE, {
                    f.IDENTIFIER.nm: idr,
                    f.REQ_ID.nm: req_id
                })
                self._add_to_recently_requested((idr, req_id))
                i += 1
            else:
                logger.debug(
                    '{} already requested PROPAGATE recently for {}'.format(
                        self, (idr, req_id)))
        return i

    def _add_to_recently_requested(self, key):
        while len(self.requested_propagates_for
                  ) > self.MAX_REQUESTED_KEYS_TO_KEEP:
            self.requested_propagates_for.pop(last=False)
        self.requested_propagates_for.add(key)
コード例 #21
0
ファイル: propagator.py プロジェクト: aigoncharov/plenum
 def __init__(self):
     self.requests = Requests()
     self.requested_propagates_for = OrderedSet()