Exemple #1
0
    def evaluate(self, observed, estimated, zone, res = 0.5):
        '''
        Returns evaluation between observed and estimated rainfall maps
        by computing following metrics:
            ['BIAS', 'CORREALTION', 'Nash-Sutcliffe', 'RMSE', 'MAE',
                                                      'MEAN_OBS','MEAN_EST']
        Inputs:
            observed  -  2D array.    Observed rainfall map.
            estimated -  2D array.    Estimated rainfall map.
            zone      -  (2,2) tuple. Evaluation study zone [km x km]
        Optional:
            res       -  scalar.      Resolution for comparison, [km].
        Outputs:
            metrics   -  dictionary.  Statistical metrics:
                                     ['bias', 'corr', 'nash', 'rmse', 'mae',
                                                      'mean_obs','mean_est']
        '''
        # We neglect area that is not estimated for comparison purpose.
        estimated[estimated <= -999] = -999
        observed[estimated <= -999] = -999

        ((x0, x1), (y0, y1)) = zone
        # Cut the zone for evaluation
        t1, t2, t3, t4 = int(y0/res), int(y1/res), int(x0/res), int(x1/res)
        observed = observed[t1:t2, t3:t4]
        estimated = estimated[t1:t2, t3:t4]

        est = estimated[estimated<>-999].flatten()
        obs = observed[observed<>-999].flatten()
        stats = dict()
        stats['bias'] = metrics.bias(obs, est)
        stats['corr'] = metrics.corr(obs, est)
        stats['nash'] = metrics.nash(obs, est)
        stats['rmse'] = metrics.rmse(obs, est)
        stats['mae'] = metrics.mae(obs, est)
        stats['mean_obs'] = metrics.average(obs)
        stats['mean_est'] = metrics.average(est)
        # additional metrics can be added
        ##stats['likelihood'] = metrics.likelihood(obs, est)
        ##stats['mape'] = metrics.mape(obs, est)
        ##stats['mse'] = metrics.mse(obs, est)
        ##stats['mspe'] = metrics.mspe(obs, est)
        ##stats['rmspe'] = metrics.rmspe(obs, est)
        return stats
Exemple #2
0
    def _calc(self):
        # Siegel & Waring suggest the discount rate should be
        # "TIPS interest rate (present-value-weighted average interest rate across the TIPS
        # ladder)" at the start of each year.
        # Currently, we don't support changing the discount_rate every year and use a constant rate
        rate = self.discount_rate

        # Siegel & Waring suggest using the average of 120 (the maximum known human life span) and
        # life expectancy based on current age according to the Social Security tables.
        years_left = average([ARVA.MAX_AGE, get_life_expectancy(self.current_age)]) 
        return pmt(rate, years_left, self.portfolio.value)
Exemple #3
0
    def _calc(self):
        # Siegel & Waring suggest the discount rate should be
        # "TIPS interest rate (present-value-weighted average interest rate across the TIPS
        # ladder)" at the start of each year.
        # Currently, we don't support changing the discount_rate every year and use a constant rate
        rate = self.discount_rate

        # Siegel & Waring suggest using the average of 120 (the maximum known human life span) and
        # life expectancy based on current age according to the Social Security tables.
        years_left = average(
            [ARVA.MAX_AGE, get_life_expectancy(self.current_age)])
        return pmt(rate, years_left, self.portfolio.value)
dat = open('%s/recovery/backup_scale.data' % top_path, 'w', 1)

for numBackups in range(3, len(config.hosts)):
    print('Running recovery with %d backup(s)' % numBackups)
    args = {}
    args['num_servers'] = numBackups
    args['backups_per_server'] = 1
    args['num_partitions'] = 1
    args['object_size'] = 1024
    args['num_objects'] = 592415 # 600MB
    args['master_ram'] = 8000
    args['replicas'] = 3
    r = recovery.insist(**args)
    print('->', r['ns'] / 1e6, 'ms', '(run %s)' % r['run'])
    masterCpuMs = metrics.average(
        [(master.master.recoveryTicks / master.clockFrequency)
         for master in r['metrics'].masters]) * 1e3
    diskBandwidth = sum([(backup.backup.storageReadBytes +
                          backup.backup.storageWriteBytes) / 2**20
                         for backup in r['metrics'].backups]) * 1e9 / r['ns']
    diskActiveMsPoints = [backup.backup.storageReadTicks * 1e3 /
                          backup.clockFrequency
                          for backup in r['metrics'].backups]
    print(numBackups,
          r['ns'] / 1e6,
          masterCpuMs,
          diskBandwidth,
          metrics.average(diskActiveMsPoints),
          min(diskActiveMsPoints),
          max(diskActiveMsPoints),
          metrics.average([master.master.logSyncTicks * 1e3 /
    args['num_servers'] = num_hosts
    args['backups_per_server'] = 1
    args['num_partitions'] = numPartitions
    args['object_size'] = 1024
    args['replicas'] = 3
    args['num_objects'] = 626012 * 400 // 640
    args['timeout'] = 180
    print(num_hosts, 'backups')
    print(numPartitions, 'partitions')
    r = recovery.insist(**args)
    print('->', r['ns'] / 1e6, 'ms', '(run %s)' % r['run'])
    diskActiveMsPoints = [backup.backup.storageReadTicks * 1e3 /
                          backup.clockFrequency
                          for backup in r['metrics'].backups]
    segmentsPerBackup = [backup.backup.storageReadCount
                         for backup in r['metrics'].backups]
    masterRecoveryMs = [master.master.recoveryTicks / master.clockFrequency * 1000
                        for master in r['metrics'].masters]
    print(numPartitions, r['ns'] / 1e6,
          metrics.average(diskActiveMsPoints),
          min(diskActiveMsPoints),
          max(diskActiveMsPoints),
          (min(segmentsPerBackup) *
           sum(diskActiveMsPoints) / sum(segmentsPerBackup)),
          (max(segmentsPerBackup) *
           sum(diskActiveMsPoints) / sum(segmentsPerBackup)),
          metrics.average(masterRecoveryMs),
          min(masterRecoveryMs),
          max(masterRecoveryMs),
          file=dat)
def median(l):
    l = sorted(l)
    if len(l) % 2 == 0:
        return metrics.average(l[len(l) // 2:len(l) // 2 + 1])
    else:
        return l[len(l) // 2]
def median(l):
    l = sorted(l)
    if len(l) % 2 == 0:
        return metrics.average(l[len(l)//2:len(l)//2+1])
    else:
        return l[len(l)//2]
Exemple #8
0
    args = {}
    args['num_servers'] = num_hosts
    args['num_partitions'] = numPartitions
    args['object_size'] = 1024
    args['replicas'] = 3
    args['num_objects'] = 626012 * 400 // 640
    args['timeout'] = 180
    print(num_hosts, 'backups')
    print(numPartitions, 'partitions')
    r = recovery.insist(**args)
    print('->', r['ns'] / 1e6, 'ms', '(run %s)' % r['run'])
    diskActiveMsPoints = [backup.backup.storageReadTicks * 1e3 /
                          backup.clockFrequency
                          for backup in r['metrics'].backups]
    segmentsPerBackup = [backup.backup.storageReadCount
                         for backup in r['metrics'].backups]
    masterRecoveryMs = [master.master.recoveryTicks / master.clockFrequency * 1000
                        for master in r['metrics'].masters]
    print(numPartitions, r['ns'] / 1e6,
          metrics.average(diskActiveMsPoints),
          min(diskActiveMsPoints),
          max(diskActiveMsPoints),
          (min(segmentsPerBackup) *
           sum(diskActiveMsPoints) / sum(segmentsPerBackup)),
          (max(segmentsPerBackup) *
           sum(diskActiveMsPoints) / sum(segmentsPerBackup)),
          metrics.average(masterRecoveryMs),
          min(masterRecoveryMs),
          max(masterRecoveryMs),
          file=dat)
Exemple #9
0
        args["num_objects"] = 592950
        args["timeout"] = 300
        r = recovery.insist(**args)
        print("->", r["ns"] / 1e6, "ms", "(run %s)" % r["run"])

        diskActiveMsPoints = [
            backup.backup.storageReadTicks * 1e3 / backup.clockFrequency for backup in r["metrics"].backups
        ]
        segmentsPerBackup = [backup.backup.storageReadCount for backup in r["metrics"].backups]
        masterRecoveryMs = [
            master.master.recoveryTicks / master.clockFrequency * 1000 for master in r["metrics"].masters
        ]

        stats = (
            r["ns"] / 1e6,
            metrics.average(diskActiveMsPoints),
            min(diskActiveMsPoints),
            max(diskActiveMsPoints),
            (min(segmentsPerBackup) * sum(diskActiveMsPoints) / sum(segmentsPerBackup)),
            (max(segmentsPerBackup) * sum(diskActiveMsPoints) / sum(segmentsPerBackup)),
            metrics.average(masterRecoveryMs),
            min(masterRecoveryMs),
            max(masterRecoveryMs),
            metrics.average(
                [
                    (master.master.replicationBytes * 8 / 2 ** 30)
                    / (master.master.replicationTicks / master.clockFrequency)
                    for master in r["metrics"].masters
                ]
            ),
            metrics.average(
Exemple #10
0
            far_random_metrics_global_sd.append(metrics[6])
            eer_metrics_global_sd.append(metrics[7])

            auc_metrics.append(metrics[8])

           # metrics = classifier.mlp(np.array(train_sets_processed[i]), test, classifications[i], test_classification, genuine_quantity, option[0], option[1])
           # frr_metrics[3].append(metrics[0])
           # far_skilled_metrics[3].append(metrics[1])
           # far_random_metrics[3].append(metrics[2])
           # eer_metrics[3].append(metrics[3])
        print("results")
        for p in range(4):
            types = ["KNN", "Tree", "SVM", "MLP"]
            if(types[p] == "SVM"):

                frr_avg = average(frr_metrics[p])
                far_skilled_avg = average(far_skilled_metrics[p])
                far_random_avg = average(far_random_metrics[p])
                eer_avg = average(eer_metrics[p])

                frr_sd = standard_deviation(frr_metrics[p])
                far_skilled_sd = standard_deviation(far_skilled_metrics[p])
                far_random_sd = standard_deviation(far_random_metrics[p])
                eer_sd = standard_deviation(eer_metrics[p])
                
                frr_metrics_local += frr_metrics[p]
                far_skilled_local += far_skilled_metrics[p]
                far_random_local += far_random_metrics[p]
                eer_local += eer_metrics[p]

                frr_metrics_local_sd += frr_metrics[p]