Exemplo n.º 1
0
    def reducer_phase3(self, key, values):
        items = list(values)

        movie1, movie2 = key

        values1 = []
        values2 = []

        for item in items:
            (rating1, numberOfRaters1, rating2, numberOfRaters2) = item

            values1.append(rating1)
            values2.append(rating2)

        min1 = min(values1)
        min2 = min(values2)
        max1 = max(values1)
        max2 = max(values2)

        avg1 = statistics.mean(values1)
        avg2 = statistics.mean(values2)
        g1 = statistics.geometric_mean(values1)
        g2 = statistics.geometric_mean(values2)
        h1 = statistics.harmonic_mean(values1)
        h2 = statistics.harmonic_mean(values2)

        features1 = [min1, max1, avg1, g1, h1]
        features2 = [min2, max2, avg2, g2, h2]

        euclidian = self.calculate_euclidian(features1, features2)
        cosine = self.cosine_similarity(features1, features2)

        yield (movie1, movie2), (euclidian , cosine)
Exemplo n.º 2
0
    def __init__(self, name, obj_pattern, char_spacing, line_spacing, line_z_offset, letters):
        """
        `name` - English description of the font; only used in mod comments.
        `obj_pattern` - Format string to generate the StaticMesh object paths
        `char_spacing` - Extra spacing to put inbetween characters
        `line_spacing` - Extra spacing to put inbetween lines
        `line_z_offset` - How much we need to lower the text so that it's oriented properly
            along the Z axis, in units of line height.  This varies depending on the font
            (just due to how the StaticMesh objects are structured, I guess).  Just determined
            via trial-and-error.
        `letters` - A list of `Letter` objects belonging to this font.
        """
        self.name = name
        self.obj_pattern = obj_pattern
        self.char_spacing = char_spacing
        self.line_spacing = line_spacing
        self.line_z_offset = line_z_offset
        self.letters = {}
        for letter in letters:
            letter.set_mesh_path(obj_pattern)
            self.letters[letter.letter] = letter

        # Compute the width of our "space" char, and also our character-derived line height.
        # We're going to omit any letters which have a mesh_override specified, since those
        # may be special chars like commas, periods, etc.
        relevant_chars = []
        for l in self.letters.values():
            if not l.mesh_override:
                relevant_chars.append(l)
        self.space_width = statistics.geometric_mean([l.width for l in relevant_chars])
        self.line_height = statistics.geometric_mean([l.height for l in relevant_chars])
def stats():
    listNum = int(input('How many numbers in your list?:'))
    nums = []
    for i in range(listNum):
        nums.append(int(input("")))

    op = input('What kind of operation would you like to do?\
        \nChoose between "mode, mean, gmean, hmean, median, range, stdev" : ')

    if op == 'mode':
        return statistics.multimode(nums)

    elif op == 'mean':
        return statistics.mean(nums)

    elif op == 'gmean':
        return statistics.geometric_mean(nums)

    elif op == 'hmean':
        return statistics.harmonic_mean(nums)

    elif op == 'median':
        return statistics.median(nums)

    elif op == 'stdev':
        return statistics.stdev(nums)

    elif op == 'range':
        return max(nums) - min(nums)
Exemplo n.º 4
0
def hecSerialTable(graphs, data, outFile):
    with open(outFile, "a+") as f:
        print("CPU HEC Map Parallel v Serial Table", file=f)
        cpuvgpuratio = []
        for graph, graphSanitized in graphOrder:
            l = [graphSanitized]
            values = data[graph]
            cpu = values[("spec", "hecCPU")]
            gpu = values[("spec", "hec")]
            serial = values[("spec", "hecCPU_real_serial")]
            cpu_coarsen = cpu["coarsen-map-duration-seconds"]["median"] + cpu[
                "coarsen-build-duration-seconds"]["median"]
            gpu_coarsen = gpu["coarsen-map-duration-seconds"]["median"] + gpu[
                "coarsen-build-duration-seconds"]["median"]
            l.append("{:.2f}".format(serial["number-coarse-levels"]["median"] /
                                     cpu["number-coarse-levels"]["median"]))
            l.append("{:.2f}".format(gpu["number-coarse-levels"]["median"] /
                                     cpu["number-coarse-levels"]["median"]))
            l.append("{:.0f}".format(
                abs(serial["number-coarse-levels"]["median"] -
                    cpu["number-coarse-levels"]["median"])))
            l.append("{:.2f}".format(
                serial["coarsen-map-duration-seconds"]["median"] /
                cpu["coarsen-map-duration-seconds"]["median"]))
            l.append("{:.2f}".format(cpu_coarsen / gpu_coarsen))
            cpuvgpuratio.append(cpu_coarsen / gpu_coarsen)
            print(" & ".join(l) + " \\\\", file=f)
        print(" & {:.2f}".format(geometric_mean(cpuvgpuratio)) + " \\\\",
              file=f)
Exemplo n.º 5
0
 def update_probability(self, infections: int) -> None:
     if self.state.cases > 0:
         self.probability_window.pop(0)
         self.probability_window.append(infections / self.state.cases)
         if all(self.probability_window):
             self.state.probability = geometric_mean(
                 self.probability_window)
Exemplo n.º 6
0
    def get_brightness(video):
        """Getting average brightness value for single video."""
        brightness = []
        cap = cv2.VideoCapture(video)
        try:
            while True:
                # Capture frame-by-frame
                _, frame = cap.read()

                # Our operations on the frame come here
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

                # Display the resulting frame
                mean = cv2.mean(gray)
                brightness.append(mean[0])
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
        except cv2.error:
            pass

        # When everything done, release the capture
        cap.release()
        brig_geom = round(statistics.geometric_mean([x+1 for x in brightness]), 1)

        return brig_geom
Exemplo n.º 7
0
def cpuBuildTable(graphs, data, outFile):
    with open(outFile, "a+") as f:
        print("CPU Build Comparison Table", file=f)
        grco = []
        hashmap = []
        spgemm = []
        for graph, graphSanitized in graphOrder2:
            l = [graphSanitized]
            if graph == "gmean":
                l.append("0")
                l.append("{:.0f}".format(100 * geometric_mean(grco)))
                l.append("{:.2f}".format(geometric_mean(hashmap)))
                l.append("{:.2f}".format(geometric_mean(spgemm)))
                grco = []
                hashmap = []
                spgemm = []
            else:
                values = data[graph]
                exp = values[("spec", "hecCPU")]
                l.append("{:.2f}".format(
                    exp["coarsen-duration-seconds"]["median"]))
                l.append("{:.0f}".format(
                    100 * exp["coarsen-build-duration-seconds"]["median"] /
                    exp["coarsen-duration-seconds"]["median"]))
                grco.append(exp["coarsen-build-duration-seconds"]["median"] /
                            exp["coarsen-duration-seconds"]["median"])
                try:
                    exp_map = values[("spec", "hecCPU_hashmap")]
                    l.append("{:.2f}".format(
                        exp_map["coarsen-build-duration-seconds"]["median"] /
                        exp["coarsen-build-duration-seconds"]["median"]))
                    hashmap.append(
                        exp_map["coarsen-build-duration-seconds"]["median"] /
                        exp["coarsen-build-duration-seconds"]["median"])
                except KeyError:
                    l.append(dnf)
                try:
                    exp_gemm = values[("spec", "hecCPU_gemm")]
                    l.append("{:.2f}".format(
                        exp_gemm["coarsen-build-duration-seconds"]["median"] /
                        exp["coarsen-build-duration-seconds"]["median"]))
                    spgemm.append(
                        exp_gemm["coarsen-build-duration-seconds"]["median"] /
                        exp["coarsen-build-duration-seconds"]["median"])
                except KeyError:
                    l.append(dnf)
            print(" & ".join(l) + " \\\\", file=f)
Exemplo n.º 8
0
 def calc_percentiles(
     self, lats: typing.List[float]
 ) -> typing.Tuple[float, float, float, float, float, float]:
     lats_len = len(lats)
     lats.sort()
     return (lats[lats_len // 99], lats[lats_len // 4], lats[lats_len // 2],
             lats[lats_len * 3 // 4], lats[min(lats_len - lats_len // 99,
                                               lats_len - 1)],
             statistics.geometric_mean(lats))
Exemplo n.º 9
0
    def attack_success(self):
        units_success = [
            unit.attack_success for unit in self.units if not unit.recharge
        ]

        if units_success:
            return geometric_mean(units_success)
        else:
            return 0
Exemplo n.º 10
0
def calc_all(vals):
    return {
        "mean": mean(vals),
        "gmean": geometric_mean(vals),
        "hmean": harmonic_mean(vals),
        "median": median(vals),
        "mid-range": mid_range(vals),
        "rms": root_mean_square(vals),
    }
Exemplo n.º 11
0
 def get_attack_success_probability(self) -> float:
     all_attacks = []
     try:
         for unit in self.units:
             attack_probability = unit.get_attack_success_probability()
             all_attacks.append(unit.get_attack_success_probability())
         res = geometric_mean(all_attacks)
     except ValueError:
         res = 0
     return res
Exemplo n.º 12
0
    def stat_v2(self):
        user_es = self._calculate_es()
        not_good_user = {}
        for u in user_es:

            self.user_ei[u] = float(user_es[u]) / (self.user_size[u] * 2)
            if self.user_ei[u] == 0:
                self.user_ei[u] = 0.1
            if self.user_ei[u] < 0.5:
                print([
                    self.user_factor[u]["reliability_factor"], self.user_ei[u]
                ])
                self.user_factor[u]["reliability_factor"] = geometric_mean([
                    self.user_factor[u]["reliability_factor"], self.user_ei[u]
                ])
            else:
                self.user_factor[u]["reliability_factor"] = harmonic_mean([
                    self.user_factor[u]["reliability_factor"], self.user_ei[u]
                ])
            if self.user_ei[u] < 0.5:
                self.user_reward[u] = self.user_size[u] * self.user_factor[u][
                    "reliability_factor"]
                if self.user_ei[u] < 0.25:
                    not_good_user[u] = self.user_factor[u][
                        "reliability_factor"]
            else:
                self.user_reward[u] = self.user_size[u]

            self.user_factor[u]['dissent_tolerance'] = math.log(
                len(self.all_icp)
            ) + self.all_user_factor[u][0]['dissent_tolerance']

            rc = random.gauss(self.user_reward[u] / self.user_size[u],
                              self.user_factor[u]['dissent_tolerance'])
            if rc < 1:
                self.user_factor[u][
                    'greed'] = self.user_factor[u]['greed'] * 0.9

            print(f'用户 id={u} {self.users[u][UNAME_KEY]}, '
                  f'实际 size={self.user_real_size[u]}, '
                  f'申报了 {self.user_size[u]}, 最终得到了 '
                  f'{self.user_reward[u]}, 他本次的 EI 值为 {self.user_ei[u]}'
                  f'他的可靠系数为 {self.user_factor[u]["reliability_factor"]}, '
                  f'由于他具有异议容忍度, 他心理估计值为 {rc}, '
                  f'他的贪婪度为 {self.user_factor[u]["greed"]}')
        if len(not_good_user) > 0:
            sorted_not_good = sorted(not_good_user.keys(),
                                     key=lambda x: not_good_user[x])
            print(sorted_not_good)
            print(
                f'用户 id={sorted_not_good[0]} {self.users[sorted_not_good[0]][UNAME_KEY]}, 被淘汰了'
            )
            del self.users[sorted_not_good[0]]
        else:
            print('没有用户被淘汰')
Exemplo n.º 13
0
 def _cal_result(self, df: DataFrame) -> dict:
     result = dict()
     df["return"] = np.log(df["balance"] / df["balance"].shift(1)).fillna(0)
     df["high_level"] = (df["balance"].rolling(min_periods=1,
                                               window=len(df),
                                               center=False).max())
     df["draw_down"] = df["balance"] - df["high_level"]
     df["dd_percent"] = df["draw_down"] / df["high_level"] * 100
     result['initial_capital / 初始资金'] = self.initial_capital
     result['start_date / 起始日期'] = df.index[0]
     result['end_date / 结束日期'] = df.index[-1]
     result['total_days / 交易天数'] = len(df)
     result['profit_days / 盈利天数'] = len(df[df["net_pnl"] > 0])
     result['loss_days / 亏损天数'] = len(df[df["net_pnl"] < 0])
     result['end_balance / 结束资金'] = round(df["balance"].iloc[-1], 2)
     result['max_draw_down / 最大回撤'] = round(df["draw_down"].min(), 2)
     result['max_dd_percent / 最大回撤百分比'] = str(
         round(df["dd_percent"].min(), 2)) + "%"
     result['total_pnl / 总盈亏'] = round(df["net_pnl"].sum(), 2)
     result['daily_pnl / 平均日盈亏'] = round(
         result['total_pnl / 总盈亏'] / result['total_days / 交易天数'], 2)
     result['total_commission / 总手续费'] = round(df["commission"].sum(), 2)
     result['daily_commission / 日均手续费'] = round(
         result['total_commission / 总手续费'] / result['total_days / 交易天数'], 2)
     # result['total_slippage'] = df["slippage"].sum()
     # result['daily_slippage'] = result['total_slippage'] / result['total_days']
     result['total_turnover / 开仓总资金'] = round(df["turnover"].sum(), 2)
     result['daily_turnover / 每日平均开仓资金'] = round(
         result['total_turnover / 开仓总资金'] / result['total_days / 交易天数'], 2)
     result['total_count / 总成交次数'] = df["count"].sum()
     result['daily_count / 日均成交次数'] = round(
         result['total_count / 总成交次数'] / result['total_days / 交易天数'], 2)
     result['total_return / 总收益率'] = str(
         round((result['end_balance / 结束资金'] / self.initial_capital - 1) *
               100, 2)) + "%"
     by_year_return_std = df["return"].std() * np.sqrt(245)
     df["return_x"] = df["return"] + 1
     try:
         profit_ratio = geometric_mean(df["return_x"].to_numpy())**245 - 1
     except ValueError:
         self.logger.error("计算存在负数, 本次回测作废")
         return {}
     result['annual_return / 年化收益率'] = str(round(profit_ratio * 100,
                                                 2)) + "%"
     result['return_std / 年化标准差'] = str(round(by_year_return_std * 100,
                                              2)) + "%"
     result['volatility / 波动率'] = str(round(df["return"].std() * 100,
                                            2)) + "%"
     if by_year_return_std != 0:
         result['sharpe / 年化夏普率'] = (profit_ratio -
                                     2.5 / 100) / by_year_return_std
     else:
         result['sharpe / 年化夏普率'] = "计算出错"
     return result
Exemplo n.º 14
0
    def validateStdoutWithDimensions(self, out: OutputCapture, benchmarks: List[str], bmSuiteArgs: List[str], retcode: int,
                                     dims: Dict[str, str]) -> List[Any]:

        datapoints = []
        assert len(benchmarks) == 1
        bench_name = benchmarks[0]

        if out.steps > MAX_BENCH_ITERATIONS:
            mx.abort(f"The benchmark {bench_name} did more steps ({out.steps}) than maximum number of iterations ({MAX_BENCH_ITERATIONS}).\n"
                     f"This means for example that the probability of the invocation of GC during the benchmark run is higher than usual.\n"
                     f"Try rerunning the benchmark with smaller warmup time or measure time, or adjust MAX_BENCH_ITERATIONS constant")

        # Filter-out warmup timestamps
        warmup_end = out.start_time + self._bench_args.warmup
        measure_timestamps = [timestamp for timestamp in out.timestamps if timestamp > warmup_end]

        if len(measure_timestamps) < 2:
            mx.abort(f"Less than 2 measurements were done. Run the benchmark with longer measure time (--measure)")

        # Compute final score
        measure_steps = len(measure_timestamps)
        step_time = geometric_mean(np.diff(measure_timestamps))
        # score is number of operations per second
        score = 1 / step_time

        # Find out various machine configuration
        host_vm, host_vm_config = self._get_host_vm_tuple()
        host_vm_backend = "NA"
        if self._run_in_fastr:
            if "FASTR_RFFI" in os.environ:
                host_vm_backend = os.environ["FASTR_RFFI"]
            else:
                host_vm_backend = "nfi"
        
        datapoints.append({
            "vm": "fastr" if self._run_in_fastr else "gnur",
            "config.name": "core" if mx.suite("compiler", fatalIfMissing=False) else "default",
            "config.data-length": self._bench_args.data_length,
            "config.warmup": self._bench_args.warmup,
            "config.measure": self._bench_args.measure,
            "config.iterations": self._bench_args.iterations,
            "host-vm": host_vm,
            "host-vm-config": host_vm_config,
            "host-vm-backend": host_vm_backend,
            "benchmark": bench_name,
            "metric.name": "throughput",
            "metric.value": score,
            "metric.score-function": "id",
            "metric.better": "higher",
            "metric.unit": "op/s",
            "metric.measure-count": str(measure_steps)
        })
        return datapoints
Exemplo n.º 15
0
    def __init__(self, name, obj_pattern, char_spacing, line_spacing,
                 line_z_offset, supports_mi, letters):
        """
        `name` - English description of the font; only used in mod comments.
        `obj_pattern` - Format string to generate the StaticMesh object paths
        `char_spacing` - Extra spacing to put inbetween characters
        `line_spacing` - Extra spacing to put inbetween lines
        `line_z_offset` - How much we need to lower the text so that it's oriented properly
            along the Z axis, in units of line height.  This varies depending on the font.
            This could actually be programmatically determined from the Letter `origin_w`
            and `origin_h` values -- This value should be 1 when the origin values are
            approximately zero, and 2 when the origin values are approximately half the
            total letter height.
        `supports_mi` - `True` if this font supports setting coloration info via
            MaterialInterface/MaterialInstance objects, or `False` otherwise
        `letters` - A list of `Letter` objects belonging to this font.
        """
        self.name = name
        self.obj_pattern = obj_pattern
        self.char_spacing = char_spacing
        self.line_spacing = line_spacing
        self.line_z_offset = line_z_offset
        self.supports_mi = supports_mi
        self.letters = {}
        for letter in letters:
            letter._finalize(self, obj_pattern)
            self.letters[letter.letter] = letter

        # Compute the width of our "space" char, and also our character-derived line height.
        # We're going to omit any letters which have a mesh_override specified, since those
        # may be special chars like commas, periods, etc.
        relevant_chars = []
        for l in self.letters.values():
            if not l.mesh_override:
                relevant_chars.append(l)
        self.space_width = statistics.geometric_mean(
            [l.width for l in relevant_chars])
        self.line_height = statistics.geometric_mean(
            [l.height for l in relevant_chars])
Exemplo n.º 16
0
def get_avg_load():
    load_percentages = psutil.cpu_percent(interval=averaging_period,
                                          percpu=True)

    if averaging_method == "arithmetic":
        return statistics.mean(load_percentages)
    elif averaging_method == "geometric":
        return statistics.geometric_mean(load_percentages)
    elif averaging_method == "harmonic":
        return statistics.harmonic_mean(load_percentages)
    elif averaging_method == "median":
        return statistics.median(load_percentages)
    else:
        raise RuntimeError("Avg CPU load error")
Exemplo n.º 17
0
 def __init__(self):
     datos = global_datosContinuos.getListaDatos()
     self.media_arimetica = round(stats.mean(datos),
                                  2)  #<--- sacar la media aritmetica
     self.media_geometrica = round(stats.geometric_mean(datos),
                                   2)  #<--- sacar la media geometrica
     self.mediana = round(stats.median(datos), 2)  #<--- sacar la mediana
     self.media_truncada = self.calcular_media_truncada(datos)
     self.moda = round(stats.mode(datos), 2)  #<--- sacar la moda
     self.varianza = round(stats.variance(datos),
                           2)  #<--- sacar la varianza muestral
     self.desviacion = round(stats.stdev(datos),
                             2)  #<--- sacar la desviacion estandar
     self.sesgo = cal_sesgo(self.media_arimetica, self.moda,
                            self.desviacion)  #<--- sacar el sesgo
Exemplo n.º 18
0
def process_screen_rolling_backtest_result(json: dict, start_dt, end_dt,
                                           precision):
    if precision is None:
        precision = 2
    length = len(json['rows'])
    data = [start_dt, end_dt, length]
    data.extend(val for val in json['average'][4:8])
    data.append(min(float(item[8]) for item in json['rows']))
    data.append(max(float(item[9]) for item in json['rows']))
    data.append(json['average'][10])
    data.append(
        round(statistics.fmean(float(item[5]) for item in json['rows'][0:13]
                               ), precision) if length >= 13 else None)
    data.append(
        round(statistics.fmean(float(item[5]) for item in json['rows'][0:65]
                               ), precision) if length >= 65 else None)
    data.append(
        round((statistics.geometric_mean(
            float(item[5]) / 100 + 1 for item in json['rows']) - 1) * 100,
              precision))
    data.append(
        round((statistics.geometric_mean(
            float(item[6]) / 100 + 1 for item in json['rows']) - 1) * 100,
              precision))
    data.append(
        round((statistics.geometric_mean(
            float(item[5]) / 100 + 1 for item in json['rows'][0:13]) - 1) *
              100, precision) if length >= 13 else None)
    data.append(
        round((statistics.geometric_mean(
            float(item[5]) / 100 + 1 for item in json['rows'][0:65]) - 1) *
              100, precision) if length >= 65 else None)
    data.append(
        round(statistics.fmean(float(item[4]) for item in json['rows'][0:65]
                               ), precision) if length >= 65 else None)
    return data
Exemplo n.º 19
0
def _step(nums: Iterable[Number]) -> Tuple[float]:
    """Calculates the mean, geometric mean and median of a given iterable

    Parameters
    ----------
    nums: Iterable[Number]
        The iterable containing the set of numbers you want to calculate the
        many types of averages over

    Returns
    -------
    tuple(float)
        a tuple containing the mean, geometric_mean, and the median of numbers
        in nums
    """
    return (mean(nums), geometric_mean(nums), median(nums))
Exemplo n.º 20
0
def write_tex_content(f, benchmark, summary, data):
    # Write each row and collect statistics in the meantime
    stats = dict((conf, []) for conf in configs)
    for prog in data:
        if not summary:
            # Write the program name
            f.write('  ' + prog.replace('_', '\\_'))

        # Write the number of each configuration
        for conf in configs:
            number = data[prog][conf]
            if not configs[conf]['absolute']:
                # Generate normalized numbers
                baseline = float(data[prog]['baseline'])
                number = float(number) / baseline
            if not summary:
                f.write(' & ' + configs[conf]['format'].format(number))
            stats[conf].append(number)

        if not summary:
            # Write the newline
            f.write(' \\\\\n')

    if not summary:
        # Write \midrule
        f.write('\\midrule\n')

    # Write statistic summary: Min
    f.write('  {\\bf Min}')
    for conf in stats:
        f.write(' & ' + configs[conf]['format'].format(min(stats[conf])))
    f.write(' \\\\\n')
    # Write statistic summary: Max
    f.write('  {\\bf Max}')
    for conf in stats:
        f.write(' & ' + configs[conf]['format'].format(max(stats[conf])))
    f.write(' \\\\\n')
    # Write statistic summary: Geomean
    f.write('  {\\bf Geomean}')
    for conf in stats:
        if configs[conf]['absolute']:
            # Not meaningful to write a geometric mean of absolute numbers
            f.write(' & ---')
        else:
            f.write(' & ' + configs[conf]['format'].format(
                statistics.geometric_mean(stats[conf])))
    f.write(' \\\\\n')
Exemplo n.º 21
0
def main(args):
    graph_size, clique_size = args.graph_size, args.clique_size

    results = {}
    must_hold = []
    procs, procs_ret = [], []
    best_config, best_terms, best_acc = None, [], 0
    with multiprocessing.Manager() as manager:
        with multiprocessing.Pool(processes=32) as pool:
            for (i, j, k, l) in itertools.product(range(1, 5), range(1, 5),
                                                  range(1, 5), range(1, 5)):
                pargs = manager.dict()
                # MUST NOT DISCARD SOMETHING GIVEN TO US BY MANAGER
                # See: https://stackoverflow.com/a/60795334
                must_hold.append(pargs)
                pargs['terms'] = (i, j, k, l)
                pargs['pure_dir'] = f"data/pure/({clique_size}-{graph_size})"
                pargs[
                    'impure_dir'] = f"data/impure/({clique_size}-{graph_size})"
                pargs['clique_size'] = clique_size
                pargs['graph_size'] = graph_size
                pargs['batch_size'] = args.batch_size
                pargs['epochs'] = args.epochs
                pargs['device'] = args.device
                procs.append(pool.apply_async(blargh, (pargs, )))
            for p in procs:
                procs_ret.append(p.get())
            pool.close()
            pool.join()

    for (terms, all_a, ac, conf) in procs_ret:
        results[terms] = all_a
        if ac > best_acc: best_config, best_terms, best_acc = conf, terms, ac

    for k, v in results.items():
        g, m, s = round(statistics.geometric_mean(v),
                        3), round(statistics.median(v),
                                  3), round(statistics.pstdev(v), 3)
        print(f"{k} ::: GMean {g}, Median {m}, STD {s}")
        # Save items to disk
    parent = Path("data/models/nn/")
    parent.mkdir(parents=True, exist_ok=True)
    if (os.path.exists(parent / f"({clique_size}-{graph_size}).pth")):
        os.remove(parent / f"({clique_size}-{graph_size}).pth")
    torch.save(best_config, parent / f"({clique_size}-{graph_size}).pth")
Exemplo n.º 22
0
def update_plt1(plt_1_bars: BarContainer, state_info_list: List[StateInfo],
                mean_line: Line2D, txt_dict: PlotTextDict, frame: int) -> None:
    """Insert the new data on the plot.

    Re-plot all of the bars, move the mean line, and set the text of
    everything on plot 1 with newly calculated data.

    Parameters
    ----------
    plt_1_bars : `BarContainer`
        The objects describing the plotted bars
    state_info_list : `List[StateInfo]`
        Continually updated list of state calculation info
    mean_line : `Line2D`
        The object describing the mean-line in the first plot
    txt_dict : `PlotTextDict`
        A dictionary that links the name of each text property to its `Text` object
    frame : `int`
        The current frame number

    """
    pop_per_rep_list = extract_pop_per_rep(state_info_list)

    mean_pop_per_seat: float = np.mean(pop_per_rep_list)
    std_dev_pop_per_seat: float = np.std(pop_per_rep_list)
    range_pop_per_seat: float = max(pop_per_rep_list) - min(pop_per_rep_list)
    geo_mean_pop_per_seat: float = geometric_mean(pop_per_rep_list)

    max_state: str = max(state_info_list,
                         key=operator.itemgetter("priority"))["name"]

    txt_dict["seat_txt"].set_text(f"Seat# {frame + 1}")
    txt_dict["state_txt"].set_text(f"State: {max_state}")
    txt_dict["mean_txt"].set_text(f"Mean: {mean_pop_per_seat:,.2f}")
    txt_dict["std_dev_txt"].set_text(f"Std. Dev. {std_dev_pop_per_seat:,.2f}")
    txt_dict["range_txt"].set_text(f"Range: {range_pop_per_seat:,.2f}")
    txt_dict["geo_mean_txt"].set_text(
        f"Geo. Mean: {geo_mean_pop_per_seat:,.2f}")

    mean_line.set_xdata([0, 1.0])
    mean_line.set_ydata([mean_pop_per_seat])

    for state, state_info in zip(plt_1_bars, state_info_list):
        state.set_height(state_info["pop_per_rep"])
Exemplo n.º 23
0
    def _get_self_vs_self(
        self,
        profile: Iterable[str],
        sim_measure: Optional[Union[str, PairwiseSim]] = PairwiseSim.IC,
    ) -> List[List[float]]:
        """
        Get the optimal matrix to convert the score to a percentage
        """
        score_matrix = []
        for pheno in profile:
            if sim_measure == PairwiseSim.GEOMETRIC:
                score_matrix.append(
                    [geometric_mean([1, self.graph.get_ic(pheno)])])
            elif sim_measure == PairwiseSim.IC:
                score_matrix.append([self.graph.get_ic(pheno)])
            else:
                raise NotImplementedError

        return score_matrix
Exemplo n.º 24
0
def avg_segmentation_value(layer, segmentation):
    """Returns the geometric mean of the segmented part of the layer.

    Arguments:
        layer {np 2d array} -- Dicom image of an organ
        segmentation {np 2d binary array} -- Approximate contour of an organ inside layer

    Returns:
        float -- The geometric mean of the segemnted organs values.
    """
    rows, cols = layer.shape

    values = []
    for i in range(rows):
        for j in range(cols):
            if segmentation[i, j]:
                values.append(max(int(layer[i, j]), 1))

    return geometric_mean(values)
Exemplo n.º 25
0
    def update_plt_1(self, frame: int) -> None:
        """Insert the new data on the plot.

        Re-plot all of the bars, move the mean line, and set the text of
        everything on plot 1 with newly calculated data.

        Parameters
        ----------
        frame : `int`
            The current frame number

        """
        pop_per_rep_list = extract_pop_per_rep(self.state_info_list)

        mean_pop_per_seat: float = np.mean(pop_per_rep_list)
        std_dev_pop_per_seat: float = np.std(pop_per_rep_list)
        range_pop_per_seat: float = max(
            pop_per_rep_list) - min(pop_per_rep_list)
        geo_mean_pop_per_seat: float = geometric_mean(pop_per_rep_list)

        max_state: str = max(
            self.state_info_list, key=lambda obj: obj["priority"])["name"]

        self.txt_dict["seat_txt"].set_text(
            f"Seat# {frame + 1}")
        self.txt_dict["state_txt"].set_text(
            f"State: {max_state}")
        self.txt_dict["mean_txt"].set_text(
            f"Mean: {mean_pop_per_seat:,.2f}")
        self.txt_dict["std_dev_txt"].set_text(
            f"Std. Dev. {std_dev_pop_per_seat:,.2f}")
        self.txt_dict["range_txt"].set_text(
            f"Range: {range_pop_per_seat:,.2f}")
        self.txt_dict["geo_mean_txt"].set_text(
            f"Geo. Mean: {geo_mean_pop_per_seat:,.2f}")

        self.mean_line.set_xdata([0, 1.0])
        self.mean_line.set_ydata([mean_pop_per_seat])

        for state, state_info in zip(
                self.plt_bars_dict["plt_1_bars"], self.state_info_list):
            state.set_height(state_info["pop_per_rep"])
Exemplo n.º 26
0
def shape_cal(rt):
    """
    根据收益率计算
        年化收益率,

    """
    by_year_return_std = rt.std() * np.sqrt(245)
    rtx = rt + 1
    try:
        profit_ratio = geometric_mean(rtx.to_numpy()) ** 245 - 1
    except ValueError:
        print("boom 计算几何平均数存在负数, 本次计算出错 作废")
        return 0, 0, 0, 0
    annual_return = str(round(profit_ratio * 100, 2)) + "%"
    volatility = str(round(rt.std() * 100, 2)) + "%"
    return_std = str(round(by_year_return_std * 100, 2)) + "%"
    if by_year_return_std != 0:
        shape = (profit_ratio - 2.5 / 100) / by_year_return_std
    else:
        shape = "计算出错"
    return annual_return, return_std, volatility, shape
Exemplo n.º 27
0
 def _statistics_():
     print("Statistics")
     print("--")
     print(statistics.fmean([3.5, 4.0, 5.25]))
     print("--")
     print(round(statistics.geometric_mean([54, 24, 36]), 1))
     print("--")
     temperature_feb = statistics.NormalDist.from_samples(
         [4, 12, -3, 2, 7, 14])
     print(temperature_feb.mean)
     print(temperature_feb.stdev)
     print(temperature_feb.cdf(3))
     print(temperature_feb.pdf(7) / temperature_feb.pdf(10))
     el_niño = statistics.NormalDist(4, 2.5)
     temperature_feb += el_niño
     print(temperature_feb)
     statistics.NormalDist(mu=10.0, sigma=6.830080526611674)
     temperature_feb * (9 / 5) + 32
     statistics.NormalDist(mu=50.0, sigma=12.294144947901014)
     print(temperature_feb.samples(3))
     print("--------")
Exemplo n.º 28
0
def body_attr():
    title = 'Body Attractiveness Calculator'
    form = BodyAttrCalculatorForm()
    attractiveness = None
    if form.validate_on_submit():
        points = [
            normalize_height(form.height.data, 180.5, 195.5),
            normalize(form.wrist.data, form.height.data, 9 / 91),
            normalize(form.chest.data, form.wrist.data, 13 / 2),
            normalize(form.biceps.data, form.chest.data, 0.36),
            normalize(form.thigh.data, form.chest.data, 0.53),
            normalize(form.calf.data, form.chest.data, 0.34),
            normalize(form.waist.data, form.chest.data, 0.70),
            normalize(form.neck.data, form.chest.data, 0.37),
            normalize(form.hips.data, form.chest.data, 0.85),
            normalize(form.shoulder.data, form.waist.data, 1.61803),
        ]
        attractiveness = round(geometric_mean(points), 2)
    return render_template('body_attr.html',
                           title=title,
                           form=form,
                           attractiveness=attractiveness)
Exemplo n.º 29
0
 def update_tick(self,
                 pyglet_fps: float,
                 tick: Decimal):
     if pyglet_fps != 0:
         self.fps_list.append(pyglet_fps)
     else:
         if tick != 0:
             self.fps_list.append(float(1 / tick))
         else:
             self.fps_list.append(1)
     if len(self.fps_list) > self.count:
         self.fps_list = self.fps_list[-self.count + 1:]  # 整个列表往前挪一位
     if len(self.get_fps_list) > self.count:
         self.get_fps_list = self.get_fps_list[-self.count + 1:]  # 整个列表往前挪一位
     try:
         self._fps = statistics.geometric_mean(self.fps_list[-100:])  # 取最后100个值的平均值
         self.middle_fps = statistics.median(self.fps_list)  # 取中间值
     except Exception:
         print(self.fps_list)
         raise
     self._max_fps = max(self.fps_list)
     self._min_fps = min(self.fps_list)
Exemplo n.º 30
0
def simple_stats():
    mean_list = statistics.mean(list_of_values)
    print("mean_list : ", mean_list)
    geometric_mean_list = statistics.geometric_mean(list_of_values)
    print("geometric_mean_list : ", geometric_mean_list)
    harmonic_mean_list = statistics.harmonic_mean(list_of_values)
    print("harmonic_mean_list : ", harmonic_mean_list)
    median_list = statistics.median(list_of_values)
    print("median_list : ", median_list)
    median_low_list = statistics.median_low(list_of_values)
    print("median_low_list : ", median_low_list)
    median_high_list = statistics.median_high(list_of_values)
    print("median_high_list : ", median_high_list)
    median_grouped_list = statistics.median_grouped(list_of_values)
    print("median_grouped_list : ", median_grouped_list)
    mode_list = statistics.mode(list_of_values)
    print("mode_list : ", mode_list)
    multimode_list = statistics.multimode(list_of_values)
    print("multimode_list : ", multimode_list)
    quantiles_list = statistics.quantiles(list_of_values)
    print("quantiles_list : ", quantiles_list)
    return mean_list, geometric_mean_list, harmonic_mean_list, median_list, median_low_list, median_high_list, median_grouped_list, mode_list, multimode_list, quantiles_list