示例#1
0
    def prepare_mix(self):
        sex = np.random.choice(['f', 'm'], 1)[0]
        speakers = np.random.choice(
            self.db.groupby('sex')['id'].unique()[sex], 2)
        self.random_picks = self.db[[
            'id', 'path'
        ]].loc[(self.db['sex'] == sex) & (self.db['mod'] == 'n') &
               (self.db['id'].isin(speakers))].sample(n=2)
        if self.random_picks.values[0][0] == self.random_picks.values[1][0]:
            self.same_speaker = True
        else:
            self.same_speaker = False

        _, sig1_int = read(self.corename + '\\' +
                           self.random_picks.values[0][1])
        _, sig2_int = read(self.corename + '\\' +
                           self.random_picks.values[1][1])

        sig1 = sig1_int / 2**15
        sig2 = sig2_int / 2**15

        noise = self.babble[sex][:len(sig1) + int(0.5 * self.rate) +
                                 len(sig2)] / 2**15

        sig1 *= (rms(noise) * 10**(self.target_SNR / 20)) / rms(sig1)
        sig2 *= (rms(noise) * 10**(self.target_SNR / 20)) / rms(sig2)

        self.mix = np.concatenate(
            (sig1, np.zeros(int(0.5 * self.rate), dtype='float32'), sig2))
        self.mix += noise
        self.mix *= 2**15
        self.mix = self.mix.astype(dtype='int16')
示例#2
0
    def get_simple_muprop_gradient(self):
        """ Computes the simple muprop gradient.

    This muprop control variate does not include the linear term.
    """
        # Hard loss
        logQHard, hardSamples = self._recognition_network()
        hardELBO, reinforce_model_grad = self._generator_network(
            hardSamples, logQHard)

        # Soft loss
        logQ, muSamples = self._recognition_network(sampler=self._mean_sample)
        muELBO, _ = self._generator_network(muSamples, logQ)

        scaling_baseline = self._create_eta(collection='BASELINE')
        learning_signal = (hardELBO - scaling_baseline * muELBO -
                           self._create_baseline())
        self.baseline_loss.append(tf.square(learning_signal))

        optimizerLoss = -(tf.stop_gradient(learning_signal) *
                          tf.add_n(logQHard) + reinforce_model_grad)
        optimizerLoss = tf.reduce_mean(optimizerLoss)

        simple_muprop_gradient = (
            self.optimizer_class.compute_gradients(optimizerLoss))
        debug = {
            'ELBO': hardELBO,
            'muELBO': muELBO,
            'RMS': U.rms(learning_signal),
        }

        return simple_muprop_gradient, debug
示例#3
0
文件: rebar.py 项目: ALISCIFP/models
  def get_simple_muprop_gradient(self):
    """ Computes the simple muprop gradient.

    This muprop control variate does not include the linear term.
    """
    # Hard loss
    logQHard, hardSamples = self._recognition_network()
    hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)

    # Soft loss
    logQ, muSamples = self._recognition_network(sampler=self._mean_sample)
    muELBO, _  = self._generator_network(muSamples, logQ)

    scaling_baseline = self._create_eta(collection='BASELINE')
    learning_signal = (hardELBO
                       - scaling_baseline * muELBO
                       - self._create_baseline())
    self.baseline_loss.append(tf.square(learning_signal))

    optimizerLoss = -(tf.stop_gradient(learning_signal) * tf.add_n(logQHard)
                      + reinforce_model_grad)
    optimizerLoss = tf.reduce_mean(optimizerLoss)

    simple_muprop_gradient = (self.optimizer_class.
                              compute_gradients(optimizerLoss))
    debug = {
        'ELBO': hardELBO,
        'muELBO': muELBO,
        'RMS': U.rms(learning_signal),
    }

    return simple_muprop_gradient, debug
示例#4
0
  def get_muprop_gradient(self):
    """
    random sample function that actually returns mean
    new forward pass that returns logQ as a list

    can get x_i from samples
    """

    # Hard loss
    logQHard, hardSamples = self._recognition_network()
    hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)

    # Soft loss
    logQ, muSamples = self._recognition_network(sampler=self._mean_sample)
    muELBO, _ = self._generator_network(muSamples, logQ)

    # Compute gradients
    muELBOGrads = tf.gradients(tf.reduce_sum(muELBO),
                               [ muSamples[i]['activation'] for
                                i in xrange(self.hparams.n_layer) ])

    # Compute MuProp gradient estimates
    learning_signal = hardELBO
    optimizerLoss = 0.0
    learning_signals = []
    for i in xrange(self.hparams.n_layer):
      dfDiff = tf.reduce_sum(
          muELBOGrads[i] * (hardSamples[i]['activation'] -
                            muSamples[i]['activation']),
          axis=1)
      dfMu = tf.reduce_sum(
          tf.stop_gradient(muELBOGrads[i]) *
          tf.nn.sigmoid(hardSamples[i]['log_param']),
          axis=1)

      scaling_baseline_0 = self._create_eta(collection='BASELINE')
      scaling_baseline_1 = self._create_eta(collection='BASELINE')
      learning_signals.append(learning_signal - scaling_baseline_0 * muELBO - scaling_baseline_1 * dfDiff - self._create_baseline())
      self.baseline_loss.append(tf.square(learning_signals[i]))

      optimizerLoss += (
          logQHard[i] * tf.stop_gradient(learning_signals[i]) +
          tf.stop_gradient(scaling_baseline_1) * dfMu)
    optimizerLoss += reinforce_model_grad
    optimizerLoss *= -1

    optimizerLoss = tf.reduce_mean(optimizerLoss)

    muprop_gradient = self.optimizer_class.compute_gradients(optimizerLoss)
    debug = {
        'ELBO': hardELBO,
        'muELBO': muELBO,
    }

    debug.update(dict([
        ('RMS learning signal layer %d' % i, U.rms(learning_signal))
        for (i, learning_signal) in enumerate(learning_signals)]))

    return muprop_gradient, debug
示例#5
0
文件: rebar.py 项目: ALISCIFP/models
  def get_muprop_gradient(self):
    """
    random sample function that actually returns mean
    new forward pass that returns logQ as a list

    can get x_i from samples
    """

    # Hard loss
    logQHard, hardSamples = self._recognition_network()
    hardELBO, reinforce_model_grad = self._generator_network(hardSamples, logQHard)

    # Soft loss
    logQ, muSamples = self._recognition_network(sampler=self._mean_sample)
    muELBO, _ = self._generator_network(muSamples, logQ)

    # Compute gradients
    muELBOGrads = tf.gradients(tf.reduce_sum(muELBO),
                               [ muSamples[i]['activation'] for
                                i in xrange(self.hparams.n_layer) ])

    # Compute MuProp gradient estimates
    learning_signal = hardELBO
    optimizerLoss = 0.0
    learning_signals = []
    for i in xrange(self.hparams.n_layer):
      dfDiff = tf.reduce_sum(
          muELBOGrads[i] * (hardSamples[i]['activation'] -
                            muSamples[i]['activation']),
          axis=1)
      dfMu = tf.reduce_sum(
          tf.stop_gradient(muELBOGrads[i]) *
          tf.nn.sigmoid(hardSamples[i]['log_param']),
          axis=1)

      scaling_baseline_0 = self._create_eta(collection='BASELINE')
      scaling_baseline_1 = self._create_eta(collection='BASELINE')
      learning_signals.append(learning_signal - scaling_baseline_0 * muELBO - scaling_baseline_1 * dfDiff - self._create_baseline())
      self.baseline_loss.append(tf.square(learning_signals[i]))

      optimizerLoss += (
          logQHard[i] * tf.stop_gradient(learning_signals[i]) +
          tf.stop_gradient(scaling_baseline_1) * dfMu)
    optimizerLoss += reinforce_model_grad
    optimizerLoss *= -1

    optimizerLoss = tf.reduce_mean(optimizerLoss)

    muprop_gradient = self.optimizer_class.compute_gradients(optimizerLoss)
    debug = {
        'ELBO': hardELBO,
        'muELBO': muELBO,
    }

    debug.update(dict([
        ('RMS learning signal layer %d' % i, U.rms(learning_signal))
        for (i, learning_signal) in enumerate(learning_signals)]))

    return muprop_gradient, debug
示例#6
0
  def _create_loss(self):
    # Hard loss
    logQHard, samples = self._recognition_network()
    reinforce_learning_signal, reinforce_model_grad = self._generator_network(samples, logQHard)
    logQHard = tf.add_n(logQHard)

    # REINFORCE
    learning_signal = tf.stop_gradient(center(reinforce_learning_signal))
    self.optimizerLoss = -(learning_signal*logQHard +
                           reinforce_model_grad)
    self.lHat = map(tf.reduce_mean, [
        reinforce_learning_signal,
        U.rms(learning_signal),
    ])

    return reinforce_learning_signal
示例#7
0
文件: rebar.py 项目: ALISCIFP/models
  def _create_loss(self):
    # Hard loss
    logQHard, samples = self._recognition_network()
    reinforce_learning_signal, reinforce_model_grad = self._generator_network(samples, logQHard)
    logQHard = tf.add_n(logQHard)

    # REINFORCE
    learning_signal = tf.stop_gradient(U.center(reinforce_learning_signal))
    self.optimizerLoss = -(learning_signal*logQHard +
                           reinforce_model_grad)
    self.lHat = map(tf.reduce_mean, [
        reinforce_learning_signal,
        U.rms(learning_signal),
    ])

    return reinforce_learning_signal
示例#8
0
文件: rebar.py 项目: ALISCIFP/models
  def get_nvil_gradient(self):
    """Compute the NVIL gradient."""
    # Hard loss
    logQHard, samples = self._recognition_network()
    ELBO, reinforce_model_grad = self._generator_network(samples, logQHard)
    logQHard = tf.add_n(logQHard)

    # Add baselines (no variance normalization)
    learning_signal = tf.stop_gradient(ELBO) - self._create_baseline()

    # Set up losses
    self.baseline_loss.append(tf.square(learning_signal))
    optimizerLoss = -(tf.stop_gradient(learning_signal)*logQHard +
                           reinforce_model_grad)
    optimizerLoss = tf.reduce_mean(optimizerLoss)

    nvil_gradient = self.optimizer_class.compute_gradients(optimizerLoss)
    debug = {
        'ELBO': ELBO,
        'RMS of centered learning signal': U.rms(learning_signal),
    }

    return nvil_gradient, debug
示例#9
0
    def get_nvil_gradient(self):
        """Compute the NVIL gradient."""
        # Hard loss
        logQHard, samples = self._recognition_network()
        ELBO, reinforce_model_grad = self._generator_network(samples, logQHard)
        logQHard = tf.add_n(logQHard)

        # Add baselines (no variance normalization)
        learning_signal = tf.stop_gradient(ELBO) - self._create_baseline()

        # Set up losses
        self.baseline_loss.append(tf.square(learning_signal))
        optimizerLoss = -(tf.stop_gradient(learning_signal) * logQHard +
                          reinforce_model_grad)
        optimizerLoss = tf.reduce_mean(optimizerLoss)

        nvil_gradient = self.optimizer_class.compute_gradients(optimizerLoss)
        debug = {
            'ELBO': ELBO,
            'RMS of centered learning signal': U.rms(learning_signal),
        }

        return nvil_gradient, debug
示例#10
0
    def train(self):

        # import pdb; pdb.set_trace()

        irates = self.sample_rate(100, 0 * Hz, 50 * Hz)
        orates = self.sample_rate(100, 20 * Hz, 100 * Hz)

        self.define_network(irates)
        self.restore_model()

        self.set_plasticity(False)
        # commenced = False
        self.network.run(defaultclock.dt * 50)
        self.set_plasticity(True)

        doter = np.ones(self.args.nepochs) * -1 * Hz
        rewards = np.zeros(self.args.nepochs)

        try:

            for i in range(1, self.args.nepochs + 1):

                print("Epoch: {eno:d}".format(eno=i), self.network.t)
                # import pdb; pdb.set_trace()

                kt_1 = self.olayer.k
                dot_1 = rms(self.olayer.k, orates)

                # self.network.run(defaultclock.dt)
                self.network.run(defaultclock.dt * 100)

                kt = self.olayer.k
                dot = rms(self.olayer.k, orates)

                self.olayer.r = np.sign(dot_1 - dot)

                # if not commenced and rms(kt, kt_1) < EPSILON*Hz:
                # 	commenced = False
                # 	self.set_plasticity(True)

                rewards[i - 1] = np.sign(dot_1 - dot)
                doter[i - 1] = dot

                if self.args.verbose:
                    print("Spikes: {} {}".format(self.kmon_ilayer.num_spikes,
                                                 self.kmon_olayer.num_spikes))
                    print("Distance:", dot)

                    # import pdb; pdb.set_trace()
                    print("Synapses:", np.mean(self.smon_sio.w[:, -1]))

                # if i % (self.args.nepochs_per_save*100) == 0:
                # 	self.save_model()

            plot_cum_reward(rewards, doter,
                            "outputs/exp{}_{}_rs".format(2, self.args.rule),
                            "outputs/exp{}_{}_ds".format(2, self.args.rule))

        except KeyboardInterrupt as e:
            print("Training Interrupted. Refer to model saved in {}".format(
                self.args.model))
示例#11
0
def calc_load_anomaly (grid, out_file, option='constant', ini_temp_file=None, ini_salt_file=None, ini_temp=None, ini_salt=None, constant_t=-1.9, constant_s=34.4, eosType='MDJWF', rhoConst=1035, tAlpha=None, sBeta=None, Tref=None, Sref=None, hfac=None, prec=64, check_grid=True):

    errorTol = 1e-13  # convergence criteria

    # Build the grid if needed
    if check_grid:
        grid = choose_grid(grid, None)
    # Decide which hfac to use
    if hfac is None:
        hfac = grid.hfac

    # Set temperature and salinity
    if ini_temp is not None and ini_salt is not None:
        # Deep copy of the arrays
        temp = np.copy(ini_temp)
        salt = np.copy(ini_salt)
    elif ini_temp_file is not None and ini_salt_file is not None:
        # Read from file
        temp = read_binary(ini_temp_file, [grid.nx, grid.ny, grid.nz], 'xyz', prec=prec)
        salt = read_binary(ini_salt_file, [grid.nx, grid.ny, grid.nz], 'xyz', prec=prec)
    else:
        print 'Error (calc_load_anomaly): Must either specify ini_temp and ini_salt OR ini_temp_file and ini_salt_file'
        sys.exit()

    # Fill in the ice shelves
    # The bathymetry will get filled too, but that doesn't matter because pressure is integrated from the top down
    closed = hfac==0
    if option == 'constant':
        # Fill with constant values
        temp[closed] = constant_t
        salt[closed] = constant_s
    elif option == 'nearest':
        # Select the layer immediately below the ice shelves and tile to make it 3D
        temp_top = xy_to_xyz(select_top(np.ma.masked_where(closed, temp), return_masked=False), grid)
        salt_top = xy_to_xyz(select_top(np.ma.masked_where(closed, salt), return_masked=False), grid)
        # Fill the mask with these values
        temp[closed] = temp_top[closed]
        salt[closed] = salt_top[closed]    
    elif option == 'precomputed':
        for data in [temp, salt]:
            # Make sure there are no missing values
            if (data[~closed]==0).any():
                print 'Error (calc_load_anomaly): you selected the precomputed option, but there are appear to be missing values in the land mask.'
                sys.exit()
            # Make sure it's not a masked array as this will break the rms
            if isinstance(data, np.ma.MaskedArray):
                # Fill the mask with zeros
                data[data.mask] = 0
                data = data.data
    else:
        print 'Error (calc_load_anomaly): invalid option ' + option
        sys.exit()

    # Get vertical integrands considering z at both centres and edges of layers
    dz_merged = np.zeros(2*grid.nz)
    dz_merged[::2] = abs(grid.z - grid.z_edges[:-1])  # dz of top half of each cell
    dz_merged[1::2] = abs(grid.z_edges[1:] - grid.z)  # dz of bottom half of each cell
    # Tile to make 3D
    z = z_to_xyz(grid.z, grid)
    dz_merged = z_to_xyz(dz_merged, grid)

    # Initial guess for pressure (dbar) at centres of cells
    press = abs(z)*gravity*rhoConst*1e-4

    # Iteratively calculate pressure load anomaly until it converges
    press_old = np.zeros(press.shape)  # Dummy initial value for pressure from last iteration
    rms_error = 0
    while True:
        rms_old = rms_error
        rms_error = rms(press, press_old)
        print 'RMS error = ' + str(rms_error)
        if rms_error < errorTol or np.abs(rms_error-rms_old) < 0.1*errorTol:
            print 'Converged'
            break
        # Save old pressure
        press_old = np.copy(press)
        # Calculate density anomaly at centres of cells
        drho_c = density(eosType, salt, temp, press, rhoConst=rhoConst, Tref=Tref, Sref=Sref, tAlpha=tAlpha, sBeta=sBeta) - rhoConst
        # Use this for both centres and edges of cells
        drho = np.zeros(dz_merged.shape)
        drho[::2,...] = drho_c
        drho[1::2,...] = drho_c
        # Integrate pressure load anomaly (Pa)
        pload_full = np.cumsum(drho*gravity*dz_merged, axis=0)
        # Update estimate of pressure
        press = (abs(z)*gravity*rhoConst + pload_full[1::2,...])*1e-4

    # Extract pload at each level edge (don't care about centres anymore)
    pload_edges = pload_full[::2,...]

    # Now find pload at the ice shelf base
    # For each xy point, calculate three variables:
    # (1) pload at the base of the last fully dry ice shelf cell
    # (2) pload at the base of the cell beneath that
    # (3) hFacC for that cell
    # To calculate (1) we have to shift pload_3d_edges upward by 1 cell
    pload_edges_above = neighbours_z(pload_edges)[0]
    pload_above = select_top(np.ma.masked_where(closed, pload_edges_above), return_masked=False)
    pload_below = select_top(np.ma.masked_where(closed, pload_edges), return_masked=False)
    hfac_below = select_top(np.ma.masked_where(closed, hfac), return_masked=False)
    # Now we can interpolate to the ice base
    pload = pload_above + (1-hfac_below)*(pload_below - pload_above)

    # Write to file
    write_binary(pload, out_file, prec=prec)
def getFirstTeamCalcDataKeys(calc):
    sumCategoryADataPointDict = lambda team: utils.dictSum(
        team.calculatedData.avgNumTimesUnaffected,
        utils.dictSum(team.calculatedData.avgNumTimesBeached, team.
                      calculatedData.avgNumTimesSlowed))

    return {
        "avgTorque":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.rankTorque),  # Checked
        "avgSpeed":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.rankSpeed),
        "avgAgility":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.rankAgility),  # Checked
        "avgDefense":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.rankDefense),  # Checked
        "avgBallControl":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.rankBallControl),  # Checked
        "avgDrivingAbility":
        lambda team: calc.drivingAbility(team),
        "disabledPercentage":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: utils.convertFirebaseBoolean(timd.didGetDisabled
                                                            )),
        "incapacitatedPercentage":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: utils.convertFirebaseBoolean(
                timd.didGetIncapacitated)),
        "disfunctionalPercentage":
        lambda team: team.calculatedData.disabledPercentage + team.
        calculatedData.incapacitatedPercentage,

        # Auto
        "autoAbility":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.calculatedData.autoAbility),
        "autoAbilityExcludeD":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: calc.autoAbility(
                calc.timdHasDefenseExclusion(timd, calc.defenseDictionary['d'])
            )),
        "autoAbilityExcludeLB":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: calc.autoAbility(
                calc.timdHasDefenseExclusion(timd, calc.defenseDictionary['e'])
            )),
        "avgHighShotsAuto":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.numHighShotsMadeAuto),  # Checked
        "avgLowShotsAuto":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.numLowShotsMadeAuto),  # Checked   
        "reachPercentage":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: utils.convertFirebaseBoolean(timd.didReachAuto)
        ),
        "highShotAccuracyAuto":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: calc.TIMDShotAccuracy(
                timd.numHighShotsMadeAuto, timd.numHighShotsMissedAuto)
        ),  # Checked
        "lowShotAccuracyAuto":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: calc.TIMDShotAccuracy(
                timd.numLowShotsMadeAuto, timd.numLowShotsMissedAuto)
        ),  # Checked
        "avgMidlineBallsIntakedAuto":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.calculatedData.
            numBallsIntakedOffMidlineAuto),
        "sdMidlineBallsIntakedAuto":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.calculatedData.
            numBallsIntakedOffMidlineAuto),
        "sdHighShotsAuto":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.numHighShotsMadeAuto),  # Checked
        "sdLowShotsAuto":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.numLowShotsMadeAuto),  # Checked
        "sdBallsKnockedOffMidlineAuto":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.numBallsKnockedOffMidlineAuto),

        #Tele
        "scalePercentage":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: int(
                utils.convertFirebaseBoolean(timd.didScaleTele))),
        "challengePercentage":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: int(
                utils.convertFirebaseBoolean(timd.didChallengeTele))),
        "avgGroundIntakes":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.numGroundIntakesTele),  # Checked
        "avgBallsKnockedOffMidlineAuto":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.numBallsKnockedOffMidlineAuto),  # Checked
        "avgShotsBlocked":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.numShotsBlockedTele),  # Checked
        "avgHighShotsTele":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.numHighShotsMadeTele),  # Checked
        "avgLowShotsTele":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.numLowShotsMadeTele),  # Checked
        "highShotAccuracyTele":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: calc.TIMDShotAccuracy(
                timd.numHighShotsMadeTele, timd.numHighShotsMissedTele)
        ),  # Checked
        "lowShotAccuracyTele":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: calc.TIMDShotAccuracy(
                timd.numLowShotsMadeTele, timd.numLowShotsMissedTele)),
        "teleopShotAbility":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.calculatedData.teleopShotAbility
        ),  # Checked
        "siegeConsistency":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: utils.convertFirebaseBoolean(
                timd.didChallengeTele) or utils.convertFirebaseBoolean(
                    timd.didScaleTele)),  # Checked
        "siegeAbility":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.calculatedData.siegeAbility),  # Checked
        "sdHighShotsTele":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.numHighShotsMadeTele),  # Checked
        "sdLowShotsTele":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.numLowShotsMadeTele),  # Checked
        "sdGroundIntakes":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.numGroundIntakesTele),  # Checked
        "sdShotsBlocked":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.numShotsBlockedTele),  # Checked
        "sdTeleopShotAbility":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.calculatedData.teleopShotAbility),
        "sdSiegeAbility":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.calculatedData.siegeAbility),
        "sdAutoAbility":
        lambda team: calc.getStandardDeviationForDataFunctionForTeam(
            team, lambda timd: timd.calculatedData.autoAbility),
        "numScaleAndChallengePoints":
        lambda team: calc.numScaleAndChallengePointsForTeam(team),  # Checked
        "breachPercentage":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: utils.
            convertFirebaseBoolean(lambda team: calc.teamDidBreachInMatch(
                team, lambda team: calc.su.getMatchForNumber(timd.matchNumber))
                                   )),
        "avgHighShotsAttemptedTele":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.calculatedData.highShotsAttemptedTele),
        "avgLowShotsAttemptedTele":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda timd: timd.calculatedData.lowShotsAttemptedTele),
        "twoBallAutoTriedPercentage":
        lambda team: calc.twoBallAutoTriedPercentage(team),
        "twoBallAutoAccuracy":
        lambda team: calc.twoBallAutoAccuracy(team),
        "avgNumTimesBeached":
        lambda team: calc.categoryAAverageDictForDataFunction(
            team, lambda timd: timd.numTimesBeached),
        "avgNumTimesSlowed": {
            "pc": lambda team: calc.avgNumTimesSlowed(team, "pc"),
            "cdf": lambda team: calc.avgNumTimesSlowed(team, "cdf")
        },
        "avgNumTimesUnaffected":
        lambda team: calc.categoryAAverageDictForDataFunction(
            team, lambda timd: timd.numTimesUnaffected),
        "beachedPercentage":
        lambda team: utils.dictQuotient(team.calculatedData.avgNumTimesBeached,
                                        sumCategoryADataPointDict(team)),
        "slowedPercentage":
        lambda team: utils.dictQuotient(team.calculatedData.avgNumTimesSlowed,
                                        sumCategoryADataPointDict(team)),
        "unaffectedPercentage":
        lambda team: utils.dictQuotient(
            team.calculatedData.avgNumTimesUnaffected,
            sumCategoryADataPointDict(team)),
        "avgNumTimesCrossedDefensesAuto":
        lambda team: calc.getAverageForDataFunctionForTeam(
            team, lambda tm: tm.calculatedData.totalNumTimesCrossedDefensesAuto
        ),
        "defenses": [
            lambda team: calc.setDefenseValuesForTeam(
                team, team.calculatedData.
                avgSuccessfulTimesCrossedDefensesTele, lambda tm: tm.
                timesSuccessfulCrossedDefensesTele, lambda x: np.mean(x)
                if x != None and len(x) > 0 else 0, lambda y: len(y)
                if y != None else 0),
            lambda team: calc.setDefenseValuesForTeam(
                team, team.calculatedData.
                avgSuccessfulTimesCrossedDefensesAuto, lambda tm: tm.
                timesSuccessfulCrossedDefensesAuto, lambda x: np.mean(x)
                if x != None and len(x) > 0 else 0, lambda y: len(y)
                if y != None else 0),
            lambda team: calc.setDefenseValuesForTeam(
                team, team.calculatedData.avgFailedTimesCrossedDefensesTele,
                lambda tm: tm.timesFailedCrossedDefensesTele, lambda x: np.
                mean(x) if x != None and len(x) > 0 else 0, lambda y: len(y)
                if y != None else 0),
            lambda team: calc.setDefenseValuesForTeam(
                team, team.calculatedData.avgFailedTimesCrossedDefensesAuto,
                lambda tm: tm.timesFailedCrossedDefensesAuto, lambda x: np.
                mean(x) if x != None and len(x) > 0 else 0, lambda y: len(y)
                if y != None else 0),
            lambda team: calc.setDefenseValuesForTeam(
                team, team.calculatedData.avgTimeForDefenseCrossTele, lambda
                tm: tm.timesSuccessfulCrossedDefensesTele, lambda x: np.mean(x)
                if x != None and len(x) > 0 else 0, lambda y: np.mean(y)
                if y != None and len(y) > 0 else 0),
            lambda team: calc.setDefenseValuesForTeam(
                team, team.calculatedData.avgTimeForDefenseCrossAuto, lambda
                tm: tm.timesSuccessfulCrossedDefensesAuto, lambda x: np.mean(x)
                if x != None and len(x) > 0 else 0, lambda y: np.mean(y)
                if y != None and len(y) > 0 else 0),
            lambda team: calc.setDefenseValuesForTeam(
                team, team.calculatedData.sdSuccessfulDefenseCrossesAuto,
                lambda tm: tm.timesSuccessfulCrossedDefensesAuto, lambda x:
                utils.rms(x)
                if x != None and len(x) > 0 else 0, lambda y: len(y)
                if y != None else 0),
            lambda team: calc.setDefenseValuesForTeam(
                team, team.calculatedData.sdSuccessfulDefenseCrossesTele,
                lambda tm: tm.ti, mesSuccessfulCrossedDefensesTele, lambda x:
                utils.rms(x)
                if x != None and len(x) > 0 else 0, lambda y: len(y)
                if y != None else 0),
            lambda team: calc.setDefenseValuesForTeam(
                team, team.calculatedData.sdFailedDefenseCrossesAuto, lambda
                tm: tm.timesFailedCrossedDefensesAuto, lambda x: utils.rms(x)
                if x != None and len(x) > 0 else 0, lambda y: len(y)
                if y != None else 0),
            lambda team: calc.setDefenseValuesForTeam(
                team, team.calculatedData.sdFailedDefenseCrossesTele, lambda
                tm: tm.timesFailedCrossedDefensesTele, lambda x: utils.rms(x)
                if x != None and len(x) > 0 else 0, lambda y: len(y)
                if y != None else 0)
        ]
    }
示例#13
0
def flux(region):

    # import the catalog file, get names of bands
    filename = glob('./cat/mastercat_region{}*'.format(region))[0]
    catalog = Table(Table.read(filename, format='ascii'), masked=True)
    catalog.sort('_idx')

    bands = np.array(filename.split('bands_')[1].split('.dat')[0].split('_'),
                     dtype=int)
    n_bands = len(bands)
    n_rows = len(catalog)

    ellipse_npix_col = MaskedColumn(length=len(catalog),
                                    name='ellipse_npix',
                                    mask=True)
    circ1_npix_col = MaskedColumn(length=len(catalog),
                                  name='circ1_npix',
                                  mask=True)
    circ2_npix_col = MaskedColumn(length=len(catalog),
                                  name='circ2_npix',
                                  mask=True)
    circ3_npix_col = MaskedColumn(length=len(catalog),
                                  name='circ3_npix',
                                  mask=True)

    n_rejected = 0

    for i in range(n_bands):

        band = bands[i]

        # Load image file for this band
        print("\nLoading image file for region {} in band {} (Image {}/{})".
              format(region, band, i + 1, n_bands))
        imfile = grabfileinfo(region, band)[0]
        contfile = fits.open(imfile)
        data = contfile[0].data.squeeze()

        # Set up wcs, beam, and pixel scale for this image
        mywcs = wcs.WCS(contfile[0].header).celestial
        beam = radio_beam.Beam.from_fits_header(contfile[0].header)
        pixel_scale = np.abs(
            mywcs.pixel_scale_matrix.diagonal().prod())**0.5 * u.deg
        ppbeam = (beam.sr / (pixel_scale**2)).decompose().value
        print('ppbeam: ', ppbeam)
        data = data / ppbeam

        # Set up columns for each aperture
        peak_flux_col = MaskedColumn(length=len(catalog),
                                     name='peak_flux_band{}'.format(band),
                                     mask=True)
        annulus_median_col = MaskedColumn(
            length=len(catalog),
            name='annulus_median_band{}'.format(band),
            mask=True)
        annulus_rms_col = MaskedColumn(length=len(catalog),
                                       name='annulus_rms_band{}'.format(band),
                                       mask=True)
        ellipse_flux_col = MaskedColumn(
            length=len(catalog),
            name='ellipse_flux_band{}'.format(band),
            mask=True)
        circ1_flux_col = MaskedColumn(length=len(catalog),
                                      name='circ1_flux_band{}'.format(band),
                                      mask=True)
        circ2_flux_col = MaskedColumn(length=len(catalog),
                                      name='circ2_flux_band{}'.format(band),
                                      mask=True)
        circ3_flux_col = MaskedColumn(length=len(catalog),
                                      name='circ3_flux_band{}'.format(band),
                                      mask=True)

        ellipse_rms_col = MaskedColumn(length=len(catalog),
                                       name='ellipse_rms_band{}'.format(band),
                                       mask=True)
        circ1_rms_col = MaskedColumn(length=len(catalog),
                                     name='circ1_rms_band{}'.format(band),
                                     mask=True)
        circ2_rms_col = MaskedColumn(length=len(catalog),
                                     name='circ2_rms_band{}'.format(band),
                                     mask=True)
        circ3_rms_col = MaskedColumn(length=len(catalog),
                                     name='circ3_rms_band{}'.format(band),
                                     mask=True)

        circ1_r, circ2_r, circ3_r = 5e-6 * u.deg, 1e-5 * u.deg, 1.5e-5 * u.deg

        print('Photometering sources')
        pb = ProgressBar(len(catalog[np.where(catalog['rejected'] == 0)]))

        masks = []
        datacube = []
        rejects = []
        snr_vals = []
        names = []

        # Iterate over sources, extracting ellipse parameters
        for j in range(n_rows):

            if catalog['rejected'][j] == 1:
                continue

            source = catalog[j]
            x_cen = source['x_cen'] * u.deg
            y_cen = source['y_cen'] * u.deg
            major = source['major_fwhm'] * u.deg
            minor = source['minor_fwhm'] * u.deg
            pa = source['position_angle'] * u.deg

            annulus_width = 1e-5 * u.deg
            center_distance = 1e-5 * u.deg

            # Convert to pixel coordinates
            position = coordinates.SkyCoord(x_cen,
                                            y_cen,
                                            frame='icrs',
                                            unit=(u.deg, u.deg))
            pix_position = np.array(position.to_pixel(mywcs))
            pix_major = major / pixel_scale
            pix_minor = minor / pixel_scale

            # Create cutout
            size = np.max([
                circ3_r.value,
                major.value + center_distance.value + annulus_width.value
            ]) * 2.2 * u.deg
            try:
                cutout = Cutout2D(data, position, size, mywcs, mode='partial')
            except NoOverlapError:
                catalog['rejected'][j] = 1
                pb.update()
                continue
            cutout_center = regions.PixCoord(cutout.center_cutout[0],
                                             cutout.center_cutout[1])
            datacube.append(cutout.data)

            # create all aperture shapes
            ellipse_reg = regions.EllipsePixelRegion(cutout_center,
                                                     pix_major * 2.,
                                                     pix_minor * 2.,
                                                     angle=pa)
            circ1_reg = regions.CirclePixelRegion(cutout_center,
                                                  circ1_r / pixel_scale)
            circ2_reg = regions.CirclePixelRegion(cutout_center,
                                                  circ2_r / pixel_scale)
            circ3_reg = regions.CirclePixelRegion(cutout_center,
                                                  circ3_r / pixel_scale)

            innerann_reg = regions.CirclePixelRegion(
                cutout_center, center_distance / pixel_scale + pix_major)
            outerann_reg = regions.CirclePixelRegion(
                cutout_center, center_distance / pixel_scale + pix_major +
                annulus_width / pixel_scale)

            annulus_mask = mask(outerann_reg, cutout) - mask(
                innerann_reg, cutout)

            # get flux information from regions
            ellipse_flux, ellipse_rms, peak_flux, ellipse_mask, ellipse_npix = apsum(
                ellipse_reg, cutout)
            circ1_flux, circ1_rms, _, circ1_mask, circ1_npix = apsum(
                circ1_reg, cutout)
            circ2_flux, circ2_rms, _, circ2_mask, circ2_npix = apsum(
                circ2_reg, cutout)
            circ3_flux, circ3_rms, _, circ3_mask, circ3_npix = apsum(
                circ3_reg, cutout)

            annulus_rms = rms(cutout.data[annulus_mask.astype('bool')])
            annulus_median = np.median(
                cutout.data[annulus_mask.astype('bool')])

            # Add grid plot mask to list
            masklist = [
                ellipse_mask, annulus_mask, circ1_mask, circ2_mask, circ3_mask
            ]
            masks.append(masklist)

            # add fluxes to appropriate columns
            peak_flux_col[j] = peak_flux
            ellipse_flux_col[j], ellipse_rms_col[j] = ellipse_flux, ellipse_rms
            circ1_flux_col[j], circ1_rms_col[j] = circ1_flux, circ1_rms
            circ2_flux_col[j], circ2_rms_col[j] = circ2_flux, circ2_rms
            circ3_flux_col[j], circ3_rms_col[j] = circ3_flux, circ3_rms

            ellipse_npix_col[j] = ellipse_npix
            circ1_npix_col[j] = circ1_npix
            circ2_npix_col[j] = circ2_npix
            circ3_npix_col[j] = circ3_npix

            annulus_median_col[j] = annulus_median
            annulus_rms_col[j] = annulus_rms

            catalog['snr_band' + str(band)][j] = peak_flux / annulus_rms
            snr_vals.append(peak_flux / annulus_rms)
            names.append(catalog['_idx'][j])

            # Secondary rejection
            rejected = 0
            lowest_flux = np.min(
                [ellipse_flux, circ1_flux, circ2_flux, circ3_flux])
            #if lowest_flux <= annulus_median*ellipse_npix or lowest_flux < 0:
            if lowest_flux < 0:
                catalog['rejected'][j] = 1
                n_rejected += 1
                rejected = 1
            rejects.append(rejected)
            pb.update()

        # Plot the grid of sources
        plot_grid(datacube, masks, rejects, snr_vals, names)
        plt.suptitle('region={}, band={}'.format(region, band))
        plt.show(block=False)

        # add columns to catalog
        catalog.add_columns([
            peak_flux_col,
            ellipse_flux_col,
            ellipse_rms_col,
            circ1_flux_col,
            circ1_rms_col,
            circ2_flux_col,
            circ2_rms_col,
            circ3_flux_col,
            circ3_rms_col,
        ])

    catalog.add_columns([
        ellipse_npix_col, circ1_npix_col, circ2_npix_col, circ3_npix_col,
        annulus_median_col, annulus_rms_col
    ])
    print("\n{} sources flagged for secondary rejection".format(n_rejected))

    # save catalog
    catalog = catalog[sorted(catalog.colnames)]
    catalog.write(filename.split('.dat')[0] + '_photometered.dat',
                  format='ascii')
    print("\nMaster catalog saved as '{}'".format(
        filename.split('.dat')[0] + '_photometered.dat'))
示例#14
0
文件: reject.py 项目: mccbc/nrao
def reject(imfile, catfile, threshold):
    """Reject noisy detections.
    
    Parameters
    ----------
    imfile : str
        The path to the radio image file
    catfile : str
        The path to the source catalog, as obtained from detect.py
    threshold : float
        The signal-to-noise threshold below which sources are rejected
    """
    # Extract information from filename
    outfile = os.path.basename(catfile).split('cat_')[1].split('.dat')[0]
    region = outfile.split('region')[1].split('_band')[0]
    band = outfile.split('band')[1].split('_val')[0]
    min_value = outfile.split('val')[1].split('_delt')[0]
    min_delta = outfile.split('delt')[1].split('_pix')[0]
    min_npix = outfile.split('pix')[1]
    print("\nSource rejection for region {} in band {}".format(region, band))

    print("Loading image file")
    contfile = fits.open(imfile)
    data = contfile[0].data.squeeze()
    mywcs = wcs.WCS(contfile[0].header).celestial

    catalog = Table(Table.read(catfile, format='ascii'), masked=True)

    beam = radio_beam.Beam.from_fits_header(contfile[0].header)
    pixel_scale = np.abs(
        mywcs.pixel_scale_matrix.diagonal().prod())**0.5 * u.deg
    ppbeam = (beam.sr / (pixel_scale**2)).decompose().value

    data = data / ppbeam

    # Remove existing region files
    if os.path.isfile('./reg/reg_' + outfile + '_annulus.reg'):
        os.remove('./reg/reg_' + outfile + '_annulus.reg')
    if os.path.isfile('./reg/reg_' + outfile + '_filtered.reg'):
        os.remove('./reg/reg_' + outfile + '_filtered.reg')

    # Load in manually accepted and rejected sources
    override_accepted = []
    override_rejected = []
    if os.path.isfile('./.override/accept_' + outfile + '.txt'):
        override_accepted = np.loadtxt('./.override/accept_' + outfile +
                                       '.txt').astype('int')
    if os.path.isfile('./.override/reject_' + outfile + '.txt'):
        override_rejected = np.loadtxt('./.override/reject_' + outfile +
                                       '.txt').astype('int')
    print("\nManually accepted sources: ", set(override_accepted))
    print("Manually rejected sources: ", set(override_rejected))

    print('\nCalculating RMS values within aperture annuli')
    pb = ProgressBar(len(catalog))

    data_cube = []
    masks = []
    rejects = []
    snr_vals = []
    mean_backgrounds = []

    for i in range(len(catalog)):
        x_cen = catalog['x_cen'][i] * u.deg
        y_cen = catalog['y_cen'][i] * u.deg
        major_fwhm = catalog['major_fwhm'][i] * u.deg
        minor_fwhm = catalog['minor_fwhm'][i] * u.deg
        position_angle = catalog['position_angle'][i] * u.deg
        dend_flux = catalog['dend_flux_band{}'.format(band)][i]

        annulus_width = 1e-5 * u.deg
        center_distance = 1e-5 * u.deg

        # Define some ellipse properties in pixel coordinates
        position = coordinates.SkyCoord(x_cen,
                                        y_cen,
                                        frame='icrs',
                                        unit=(u.deg, u.deg))
        pix_position = np.array(position.to_pixel(mywcs))
        pix_major_fwhm = major_fwhm / pixel_scale
        pix_minor_fwhm = minor_fwhm / pixel_scale

        # Cutout section of the image we care about, to speed up computation time
        size = (center_distance + annulus_width + major_fwhm) * 2.2
        cutout = Cutout2D(data, position, size, mywcs, mode='partial')
        cutout_center = regions.PixCoord(cutout.center_cutout[0],
                                         cutout.center_cutout[1])

        # Define the aperture regions needed for SNR
        ellipse_reg = regions.EllipsePixelRegion(
            cutout_center,
            pix_major_fwhm * 2.,
            pix_minor_fwhm * 2.,
            angle=position_angle
        )  # Make sure you're running the dev version of regions, otherwise the position angles will be in radians!

        innerann_reg = regions.CirclePixelRegion(
            cutout_center, center_distance / pixel_scale + pix_major_fwhm)
        outerann_reg = regions.CirclePixelRegion(
            cutout_center, center_distance / pixel_scale + pix_major_fwhm +
            annulus_width / pixel_scale)

        # Make masks from aperture regions
        ellipse_mask = mask(ellipse_reg, cutout)
        annulus_mask = mask(outerann_reg, cutout) - mask(innerann_reg, cutout)

        # Plot annulus and ellipse regions
        data_cube.append(cutout.data)
        masks.append([annulus_mask, ellipse_mask])

        # Calculate the SNR and aperture flux sums
        bg_rms = rms(cutout.data[annulus_mask.astype('bool')])
        peak_flux = np.max(cutout.data[ellipse_mask.astype('bool')])
        flux_rms_ratio = peak_flux / bg_rms
        snr_vals.append(flux_rms_ratio)

        # Reject bad sources below some SNR threshold
        rejected = False
        if flux_rms_ratio <= threshold:
            rejected = True

        # Process manual overrides
        if catalog['_idx'][i] in override_accepted:
            rejected = False
        if catalog['_idx'][i] in override_rejected:
            rejected = True
        rejects.append(int(rejected))

        # Add non-rejected source ellipses to a new region file
        fname = './reg/reg_' + outfile + '_filtered.reg'
        with open(fname, 'a') as fh:
            if os.stat(fname).st_size == 0:
                fh.write("icrs\n")
            if not rejected:
                fh.write("ellipse({}, {}, {}, {}, {}) # text={{{}}}\n".format(
                    x_cen.value, y_cen.value, major_fwhm.value,
                    minor_fwhm.value, position_angle.value, i))
        pb.update()

    # Plot the grid of sources
    plot_grid(data_cube, masks, rejects, snr_vals, catalog['_idx'])
    plt.suptitle(
        'region={}, band={}, min_value={}, min_delta={}, min_npix={}, threshold={:.4f}'
        .format(region, band, min_value, min_delta, min_npix, threshold))
    plt.show(block=False)

    # Get overrides from user
    print(
        'Manual overrides example: type "r319, a605" to manually reject source #319 and accept source #605.'
    )
    overrides = input(
        "\nType manual override list, or press enter to continue:\n").split(
            ', ')
    accepted_list = [
        s[1:] for s in list(filter(lambda x: x.startswith('a'), overrides))
    ]
    rejected_list = [
        s[1:] for s in list(filter(lambda x: x.startswith('r'), overrides))
    ]

    # Save the manually accepted and rejected sources
    fname = './.override/accept_' + outfile + '.txt'
    with open(fname, 'a') as fh:
        for num in accepted_list:
            fh.write('\n' + str(num))
    fname = './.override/reject_' + outfile + '.txt'
    with open(fname, 'a') as fh:
        for num in rejected_list:
            fh.write('\n' + str(num))
    print(
        "Manual overrides written to './.override/' and saved to source catalog. New overrides will be displayed the next time the rejection script is run."
    )

    # Process the new overrides, to be saved into the catalog
    rejects = np.array(rejects)
    acc = np.array([a[-2:] for a in accepted_list], dtype=int)
    rej = np.array([r[-2:] for r in rejected_list], dtype=int)
    rejects[acc] = 0
    rejects[rej] = 1

    # Save the catalog with new columns for SNR
    catalog.add_column(Column(snr_vals), name='snr_band' + band)
    catalog.add_column(np.invert(catalog.mask['snr_band' + band]).astype(int),
                       name='detected_band' + band)
    catalog.add_column(Column(rejects), name='rejected')
    catalog.write('./cat/cat_' + outfile + '_filtered.dat', format='ascii')
示例#15
0
    X = tf.cast(X, tf.complex64)
    pad_amount = 2 * (n_fft - n_hopsize)
    audio_tf = tf.contrib.signal.inverse_stft(
        tf.transpose(X),
        n_fft,
        n_hopsize,
        window_fn=tf.contrib.signal.inverse_stft_window_fn(n_hopsize))
    if center and pad_amount > 0:
        audio_tf = audio_tf[pad_amount // 2:-pad_amount // 2]

    if out_type == "tf":
        return audio_tf
    elif out_type == "numpy":
        return audio_tf.numpy()

    return audio_tf


def spectrogram(X, power):
    return tf.abs(X)**power


if __name__ == "__main__":
    s = utils.sine()
    #    s = np.stack([s, s, s, s])
    X = stft(s)
    x = istft(X)
    #    print(s)
    #    print(x)
    print(utils.rms(s, x))
示例#16
0
def report_sim_difference(sim0: rebound.Simulation, sim1: rebound.Simulation,
                          object_names: List[str], verbose: bool=False) -> \
                          Tuple[np.array, np.array]:
    """Report the difference between two simulations on a summary basis"""
    # Extract configuration arrays for the two simulations
    cfg0: np.array = sim_cfg_array(sim0, object_names)
    cfg1: np.array = sim_cfg_array(sim1, object_names)

    # Convert both arrays to heliocentric coordinates
    cfg0 = cfg0 - cfg0[0:1, :]
    cfg1 = cfg1 - cfg1[0:1, :]

    # Displacement of each body to earth
    earth_idx: int = object_names.index('Earth')
    q0: np.array = cfg0[:, 0:3] - cfg0[earth_idx, 0:3]
    q1: np.array = cfg1[:, 0:3] - cfg1[earth_idx, 0:3]

    # Right Ascension and Declination
    r0, asc0, dec0 = cart_to_sph(q0)
    r1, asc1, dec1 = cart_to_sph(q1)

    # Error in asc and dec; convert from radians to arcseconds
    asc_err: np.array = np.degrees(np.abs(asc1 - asc0)) * 3600
    dec_err: np.array = np.degrees(np.abs(dec1 - dec0)) * 3600

    # Take differences
    cfg_diff: np.array = (cfg1 - cfg0)
    pos_diff: np.array = cfg_diff[:, 0:3]
    vel_diff: np.array = cfg_diff[:, 3:6]

    # Error in position and velocity in heliocentric coordinates; skip the sun
    pos_err: np.array = np.linalg.norm(pos_diff, axis=1)
    pos_err_den: np.array = np.linalg.norm(cfg0[:, 0:3], axis=1)
    pos_err_den[0] = 1.0
    pos_err_rel: np.array = pos_err / pos_err_den
    vel_err: np.array = np.linalg.norm(vel_diff, axis=1)
    vel_err_den: np.array = np.linalg.norm(cfg0[:, 3:6], axis=1)
    vel_err_den[0] = 1.0
    vel_err_rel: np.array = vel_err / vel_err_den

    if verbose:
        print(f'\nPosition difference - absolute & relative')
        print(f'(Angle errors in arcseconds, position in AU)')
        print(f'Body       : Phi     : Theta   : Pos AU  : Pos Rel : Vel Rel')
        object_names_short: List[str] = [
            nm.replace(' Barycenter', '') for nm in object_names
        ]
        for i, nm in enumerate(object_names_short):
            print(
                f'{nm:10} : {asc_err[i]:5.2e}: {dec_err[i]:5.2e}: {pos_err[i]:5.2e}: '
                f'{pos_err_rel[i]:5.2e}: {vel_err_rel[i]:5.2e}')
        print(
            f'Overall    : {rms(asc_err):5.2e}: {rms(dec_err):5.2e}: {rms(pos_err):5.2e}: '
            f'{rms(pos_err_rel):5.2e}: {rms(vel_err_rel):5.2e}')

    # Extract orbital element arrays from the two simulations
    elt0: np.array = sim_elt_array(sim0, object_names[1:])
    elt1: np.array = sim_elt_array(sim1, object_names[1:])

    # Take differences
    elt_diff: np.array = (elt1 - elt0)
    # Angle differences are mod two pi
    two_pi: float = 2.0 * np.pi
    elt_diff[:, 2:] = (elt_diff[:, 2:] + np.pi) % two_pi - np.pi

    # Compute RMS difference by orbital element
    elt_rms: np.array = rms(elt_diff, axis=0)
    elt_err: np.array = np.abs(elt_diff)

    # Names of selected elements
    elt_names: List[str] = [
        'a', 'e', 'inc', 'Omega', 'omega', 'f', 'M', 'pomega', 'long'
    ]

    # Report RMS orbital element differences
    if verbose:
        print(f'\nOrbital element errors:')
        print(f'elt    : RMS      : worst      : max_err  : HRZN        : REB')
        for j, elt in enumerate(elt_names):
            idx = np.argmax(elt_err[:, j])
            worse = object_names_short[idx + 1]
            print(
                f'{elt:6} : {elt_rms[j]:5.2e} : {worse:10} : {elt_err[idx, j]:5.2e} : '
                f'{elt0[idx, j]:11.8f} : {elt1[idx, j]:11.8f}')
        print(f'RMS (a, e, inc) =          {rms(elt_diff[:,0:3]):5.2e}')
        print(f'RMS (f, M, pomega, long) = {rms(elt_diff[:,5:9]):5.2e}')

    # One summary error statistic
    ang_err: np.array = rms(np.array([asc_err, dec_err]))

    # Return the RMS position error and angle errors
    return pos_err, ang_err
import test_scipy
import test_librosa
import test_torch
import utils
import numpy as np

stfts = [test_torch, test_scipy, test_librosa]
istfts = [test_torch, test_scipy, test_librosa]

n_fft = 2048
n_hopsize = 1024

if __name__ == "__main__":
    s = utils.sine(dtype=np.float32)
    for forward_method in stfts:
        stft = getattr(forward_method, 'stft')
        for inverse_method in istfts:
            istft = getattr(inverse_method, 'istft')
            X = stft(s, n_fft=n_fft, n_hopsize=n_hopsize)
            x = istft(X, n_fft=n_fft, n_hopsize=n_hopsize)

            print(forward_method.__name__, "-->", inverse_method.__name__,
                  utils.rms(s, x))
示例#18
0
    def fit_curve(self, function='gauss'):
        """Returns (height, x, y, width_x, width_y)
        the gaussian parameters of a 2D distribution found by a fit"""
        if self.squared_data is None:
            try:
                self.squared_data = self.fill_to_square(11, 11)
            except IndexError:
                raise IndexError("Border object, ignore")

        if function == 'gauss':
            # params = self.moments(self.squared_data)
            # errorfunction = lambda p: np.ravel(self.gaussian(*p)(*np.indices(self.squared_data.shape)) - self.squared_data)
            # try:
            #     p, success = optimize.leastsq(errorfunction, params, maxfev=5000)
            #     p2, success = optimize.leastsq(errorfunction, params, maxfev=500000, xtol=1e-1)
            #     p3, success = optimize.leastsq(errorfunction, params, maxfev=50000000, ftol=1e-10)
            # except TypeError as e:
            #     return 'Error during fitting'
            # x = np.arange(0, self.squared_data.shape[1], 1)
            # y = np.arange(0, self.squared_data.shape[0], 1)
            # predicted = np.zeros(self.squared_data.shape)
            # predicted2 = np.zeros(self.squared_data.shape)
            # predicted3 = np.zeros(self.squared_data.shape)
            # for y, row in enumerate(self.squared_data):
            #     for x, val in enumerate(row):
            #         predicted[y][x] = self.gaussian(*p)(x,y)
            #         predicted2[y][x] = self.gaussian(*p2)(x,y)
            #         predicted3[y][x] = self.gaussian(*p3)(x,y)
            # show_3d_data(self.squared_data, secondary_data=[predicted, predicted2, predicted3])
            # print('#################################')
            # print('Root mean error:', rms(self.squared_data, predicted))
            # # print(self.gaussian(*p)(1,1))
            # self.params = p
            # return p
            x = np.linspace(0, 10, 11)
            y = np.linspace(0, 10, 11)
            x, y = np.meshgrid(x, y)

            # initial_guess = (3,100,100,20,40,0,10)
            popt, pcov = curve_fit(self.twoD_Gaussian, (x, y),
                                   self.squared_data.flatten(),
                                   maxfev=50000000)

            predicted = self.twoD_Gaussian(
                (x, y), *popt).reshape(*self.squared_data.shape)
            print('Root mean error:', rms(self.squared_data, predicted))
            show_3d_data(self.squared_data, secondary_data=[predicted])

        if function == 'astropy_gauss':
            x = np.arange(0, self.squared_data.shape[1], 1)
            y = np.arange(0, self.squared_data.shape[0], 1)
            matrix_x, matrix_y = np.meshgrid(x, y)
            amp_init = np.matrix(self.squared_data).max()
            halfsize = 5
            stdev_init = 0.33 * halfsize

            # Fit the data using a box model.
            # Bounds are not really needed but included here to demonstrate usage.

            def tie_stddev(model):  # we need this for tying x_std and y_std
                xstddev = model.x_stddev
                return xstddev

            params = self.moments(self.squared_data)
            t_init = models.Gaussian2D(x_mean=halfsize + 0.5,
                                       y_mean=halfsize + 0.5,
                                       x_stddev=stdev_init,
                                       y_stddev=stdev_init,
                                       amplitude=amp_init,
                                       tied={'y_stddev': tie_stddev})

            fit_m = fitting.LevMarLSQFitter()
            m = fit_m(t_init, matrix_x, matrix_y, self.squared_data)
            print(fit_m.fit_info['message'])

            predicted = np.zeros(self.squared_data.shape, dtype=int)
            for y, row in enumerate(self.squared_data):
                for x, val in enumerate(row):
                    predicted[y][x] = m(x, y)

            rme = rms(self.squared_data, predicted)

            print('Root mean error:', rme)

            self.kurtosis = kurtosis(self.squared_data.flatten())
            self.skew = skew(self.squared_data.flatten())
            print('kurtosis: {}, skew: {}'.format(self.kurtosis, self.skew))

            show_3d_data(self.squared_data, secondary_data=[predicted])
        elif function == 'veres':
            self.length = 50  # from self.header_data
            self.width = 10  # from self.header_data
            self.total_flux = self.squared_data.sum()  # from self.header_data
            self.rotation = 5  # rotation from self.header_data
            # x0 = math.ceil(np.mean((self.min_x, self.max_x)))
            # y0 = math.ceil(np.mean((self.min_y, self.max_y)))
            x0 = len(self.squared_data[0]) // 2
            y0 = len(self.squared_data) // 2

            params = np.array([x0, y0, 50, 10, 5])
            print('fitting object with params {}'.format(params))
            # print(self.veres(*params)(2,2))
            print(self.squared_data)
            res = self.veres(*params)(2, 2)
            print(np.indices(self.squared_data.shape))
            print(res)
            assert False, 'end'
            errorfunction = lambda p: np.ravel(
                self.veres(*p)(*np.indices(self.squared_data.shape)))
            try:
                p, success = optimize.leastsq(errorfunction, params)
                assert False, 'SOMETHING WORKS'
            except TypeError as e:
                return 'Error during fitting'
            print('################')
def extract_dataframes():

	for pid in pids:
		print ()
		print ('pid: ', pid)
		tac_reading = pd.read_csv('clean_tac/' + pid + '_clean_TAC.csv')
		acc_data = pd.read_csv('accelerometer/accelerometer_' + pid + '.csv')

		tac_labels = []

		for feat_no, feature in enumerate(features):
			print ('   feature:', feature)
			array_long = []

			for ind, row in tac_reading.iterrows():
				
				if ind!=0:
				
					t1, t2 = prev_row['timestamp'], row['timestamp']
					long_data = acc_data[ (acc_data['time']/1000 >= t1) & (acc_data['time']/1000 < t2) ]

					if not long_data.empty:
						
						if feat_no==0:
							if prev_row['TAC_Reading'] >= 0.08:
								tac_labels.append(1)
							else:
								tac_labels.append(0) 

						if feature=='rms':
							lt = []
							for axis in ['x', 'y', 'z']:
								lt.append(utils.rms(long_data[axis]))

							lt = np.array(lt)
							array_long.append(lt)

						else:
							short_datas = np.array_split(long_data, 300)
							
							# stores the features for every 1 second in 10 second segment
							array_short = []

							for short_seg, short_data in enumerate(short_datas):

								# data_short = data_long[data_long['short_segment']==short_seg]

								lt = []
								for axis in ['x', 'y', 'z']:
									data_axis =	np.array(short_data[axis])

									if feature=='mean':
										lt.append(utils.mean_feature(data_axis))
									elif feature=='std':
										lt.append(utils.std(data_axis))
									elif feature=='median':
										lt.append(utils.median(data_axis))
									elif feature=='crossing_rate':
										lt.append(utils.crossing_rate(data_axis))
									elif feature=='max_abs':
										lt.append(utils.max_abs(data_axis))
									elif feature=='min_abs':
										lt.append(utils.min_abs(data_axis))
									elif feature=='max_raw':
										lt.append(utils.max_raw(data_axis))
									elif feature=='min_raw':
										lt.append(utils.min_raw(data_axis))
									elif feature=='spec_entrp_freq':
										lt.append(utils.spectral_entropy_freq(data_axis))
									elif feature=='spec_entrp_time':
										lt.append(utils.spectral_entropy_time(data_axis))
									elif feature=='spec_centroid':
										lt.append(utils.spectral_centroid(data_axis))
									elif feature=='spec_spread':
										lt.append(utils.spectral_spread(data_axis))
									elif feature=='spec_rolloff':
										lt.append(utils.spectral_rolloff(data_axis))
									elif feature=='max_freq':
										lt.append(utils.max_freq(data_axis))
									elif feature=='spec_flux':
										if short_seg==0:
											lt.append(utils.spectral_flux(data_axis, np.zeros(len(data_axis))))
											if axis=='x':
												x = data_axis
											elif axis=='y':
												y = data_axis
											elif axis=='z':
												z = data_axis
										else:
											if axis=='x':
												if len(data_axis) > len(x):
													zeros = np.zeros(len(data_axis) - len(x))
													x = np.append(x, zeros)
												elif len(data_axis) < len(x):
													zeros = np.zeros(len(x) - len(data_axis))
													data_axis = np.append(data_axis, zeros)

												lt.append(utils.spectral_flux(data_axis, x))
											elif axis=='y':
												if len(data_axis) > len(y):
													zeros = np.zeros(len(data_axis) - len(y))
													y = np.append(y, zeros)
												elif len(data_axis) < len(y):
													zeros = np.zeros(len(y) - len(data_axis))
													data_axis = np.append(data_axis, zeros)

												lt.append(utils.spectral_flux(data_axis, y))
											elif axis=='z':
												if len(data_axis) > len(z):
													zeros = np.zeros(len(data_axis) - len(z))
													z = np.append(z, zeros)
												elif len(data_axis) < len(z):
													zeros = np.zeros(len(z) - len(data_axis))
													data_axis = np.append(data_axis, zeros)

												lt.append(utils.spectral_flux(data_axis, z))


								array_short.append(np.array(lt))
							
							short_metric = np.array(array_short)
							array_long.append(short_metric)

				prev_row = row
		
			if feature=='rms':
				df = pd.DataFrame(columns=['Rms_x', 'Rms_y', 'Rms_z'])
				long_metric = np.array(array_long)

				df['Rms_x'] = long_metric[:,0:1].flatten()
				df['Rms_y'] = long_metric[:,1:2].flatten()
				df['Rms_z'] = long_metric[:,2:].flatten()

				df.to_csv('features/' + feature + '_feature.csv', index=False)
			else:
				long_metric = np.array(array_long)

				summary_stats(long_metric, feature, pid)
		
		print ('   tac_labels: ', len(tac_labels))
		rename_column_and_concat(pid, tac_labels)
示例#20
0
def test_integration(sa: rebound.SimulationArchive, test_objects: List[str],
                     sim_name: str, test_name: str,
                     verbose: bool = False,
                     make_plot: bool = False) -> \
                     Tuple[np.array, np.array]:
    """Test the integration of the planets against Horizons data"""
    # Start time of simulation
    dt0: datetime = datetime(2000, 1, 1)

    # Dates to be tested
    test_years: List[int] = list(range(2000, 2041))
    test_dates: List[datetime] = [datetime(year, 1, 1) for year in test_years]
    verbose_dates: List[datetime] = [test_dates[-1]]

    # Errors on these dates
    ang_errs: List[np.array] = []
    pos_errs: List[np.array] = []

    # Test the dates
    dt_t: datetime
    for dt_t in test_dates:
        # The date to be tested as a time coordinate
        t: int = (dt_t - dt0).days
        # The reference simulation from Horizons
        sim0: rebound.Simulation = make_sim_horizons(object_names=test_objects,
                                                     epoch=dt_t)
        # The test simulation from the simulation archive
        sim1: rebound.Simulation = sa.getSimulation(t=t, mode='exact')
        # Verbosity flag and screen print if applicable
        report_this_date: bool = (dt_t in verbose_dates) and verbose
        if report_this_date:
            print(f'\nDifference on {dt_t}:')
        # Run the test
        pos_err, ang_err = report_sim_difference(sim0=sim0,
                                                 sim1=sim1,
                                                 object_names=test_objects,
                                                 verbose=report_this_date)
        # Save position and angle errors
        pos_errs.append(pos_err)
        ang_errs.append(ang_err)

    # Plot error summary
    pos_err_rms: np.array = np.array([rms(x) for x in pos_errs])
    ang_err_rms: np.array = np.array([rms(x) for x in ang_errs])

    if make_plot:
        # Chart titles
        sim_name_chart = sim_name.title()
        test_name_chart = test_name.title(
        ) if test_name != 'planets_com' else 'Planets (COM)'

        # Error in the position
        fig, ax = plt.subplots(figsize=[16, 10])
        ax.set_title(
            f'Position Error of {test_name_chart} in {sim_name_chart} Integration'
        )
        ax.set_ylabel(f'RMS Position Error in AU')
        ax.ticklabel_format(axis='y', style='sci', scilimits=(
            0,
            0,
        ))
        ax.plot(test_years, pos_err_rms, marker='o', color='red')
        ax.grid()
        fname: str = f'../figs/integration_test/sim_error_{sim_name}_{test_name}_pos.png'
        fig.savefig(fname=fname, bbox_inches='tight')

        # Error in the angle
        fig, ax = plt.subplots(figsize=[16, 10])
        ax.set_title(
            f'Angle Error of {test_name_chart} in {sim_name_chart} Integration'
        )
        ax.set_ylabel(f'RMS Angle Error vs. Earth in Arcseconds')
        ax.plot(test_years, ang_err_rms, marker='o', color='blue')
        ax.grid()
        fname: str = f'../figs/integration_test/sim_error_{sim_name}_{test_name}_angle.png'
        fig.savefig(fname=fname, bbox_inches='tight')

    if verbose:
        print(f'\nError by Date:')
        print('DATE       : ANG   : AU  ')
        for i, dt_t in enumerate(test_dates):
            print(
                f'{dt_t.date()} : {ang_err_rms[i]:5.3f} : {pos_err_rms[i]:5.3e}'
            )

    # Compute average error
    mean_ang_err = np.mean(ang_err_rms)
    mean_pos_err = np.mean(pos_err_rms)
    print(
        f'\nMean RMS error in {sim_name} integration of {test_name} test objects:'
    )
    print(f'AU   : {mean_pos_err:5.3e}')
    print(f'angle: {mean_ang_err:5.3f}')

    # Return summary of errors in position and angles
    return pos_err_rms, ang_err_rms
示例#21
0
mpl.rc_file(BIN + 'my_matplotlib_rcparams')
for kk in np.arange(4):
    plt.figure()
    n_hist_pred, bin_edges, _ = plt.hist(residuals[:, kk],
                                         label='Residuals',
                                         linestyle=line_style[0],
                                         alpha=alph,
                                         bins=200,
                                         range=range)
    plt.suptitle('Residuals of %s' % train.columns[kk])
    plt.xlabel(residual_strings[kk]
               )  # (train.columns[kk], train.columns[kk], train.columns[kk]))
    plt.ylabel('Number of jets')
    ms.sciy()
    #plt.yscale('log')
    rms = utils.rms(residuals[:, kk])
    ax = plt.gca()
    plt.text(.2,
             .5,
             'RMS = %f' % rms,
             bbox={
                 'facecolor': 'white',
                 'alpha': 0.7,
                 'pad': 10
             },
             horizontalalignment='center',
             verticalalignment='center',
             transform=ax.transAxes,
             fontsize=20)
    if save:
        plt.savefig(figures_path + prefix + '_residuals_' + train.columns[kk])
示例#22
0
    def fit_curve(self, function='gauss', square_size=(11,11)):
        try:
            self.squared_data = self.fill_to_square(*square_size)
        except IndexError:
            raise IndexError("Border object, ignore")

        if self.sobel:
            self.noise_median = np.median(self.background_data)

        if function == 'gauss':
            x = np.linspace(0, square_size[0]-1, square_size[0])
            y = np.linspace(0, square_size[1]-1, square_size[1])
            x, y = np.meshgrid(x, y)

            if self.squared_data.sum() == 0:
                self.correct_fit = False
                return
            moments = self.moments(self.squared_data)
            pred = [*moments, 10, 0]
            popt, pcov = curve_fit(self.gaussian_2d, (x, y), self.squared_data.flatten(), maxfev=100000, xtol=1e-10, ftol=1e-10, p0=pred)
            try:
                if popt is not None and popt[3] is not None and popt[3] != 0:
                    self.correct_fit = True
                else:
                    self.correct_fit = False
                    return
            except NameError:
                self.correct_fit = False
                return
            self.fwhm_x = 2*math.sqrt(2*math.log(2)) * abs(popt[3])
            self.fwhm_y = 2*math.sqrt(2*math.log(2)) * abs(popt[4])
            self.x0 = round(self.low_x + abs(popt[1]),2)
            self.y0 = round(self.low_y + abs(popt[2]),2)
            if self.x0 >= self.image.shape[1] or \
            self.y0 >= self.image.shape[0] or \
            math.isnan(self.fwhm_x) or \
            math.isnan(self.fwhm_y) or \
            self.fwhm_x >= self.image.shape[1] or \
            self.fwhm_y >= self.image.shape[0]:
                self.correct_fit = False
                return
            self.fwhm = "{}|{}".format(abs(round(self.fwhm_x, 2)),abs(round(self.fwhm_y, 2)))

            self.predicted = self.gaussian_2d((x, y), *popt).reshape(*self.squared_data.shape)
            self.rms_res = rms(self.squared_data, self.predicted)
            if self.show_object_fit or self.show_object_fit_separate:
                print('==============')
                print('Root mean error:', self.rms_res)
                print(self.fwhm)
                if self.show_object_fit_separate:
                    show_3d_data(self.squared_data)
                    show_3d_data(self.predicted, color='red')
                else:
                    show_3d_data(self.squared_data, secondary_data=[self.predicted])

        elif function == 'veres':
            self.length = 50 # from self.header_data
            self.width = 0.5 # from self.header_data
            self.rotation = 45 # rotation from self.header_data
            print('################')
            x = np.linspace(0, square_size[0]-1, square_size[0])
            y = np.linspace(0, square_size[1]-1, square_size[1])
            x, y = np.meshgrid(x, y)

            # gaussian
            moments = self.moments(self.squared_data)
            pred = [*moments, 10, 0]
            popt, pcov = curve_fit(self.gaussian_2d, (x, y), self.squared_data.flatten(), maxfev=500000000, xtol=1e-15, ftol=1e-15, p0=pred)
            predicted_gauss = self.gaussian_2d((x, y), *popt).reshape(*self.squared_data.shape)
            show_3d_data(self.squared_data, secondary_data=[predicted_gauss])
            self.x0 = popt[1]
            self.y0 = popt[2]
            # self.width = min(popt[3],popt[4])
            # gaussian
            if popt[1] < 0 or popt[2] < 0:
                raise IndexError("Incorrect gaussian fit")

            new_center = (int(popt[1]), int(popt[2]))
            try:
                self.squared_data = self.fill_to_square(*square_size, new_center)
            except IndexError:
                raise IndexError("Border object, ignore")

            total_flux = self.squared_data - (np.median(self.background_data)+100)
            total_flux = total_flux[total_flux>0].sum() // 3
            prediction = [square_size[0]//2, square_size[1]//2, 1.5, 55, 45, total_flux]
            # popt, pcov = curve_fit(self.veres, np.array((x, y), dtype=int), self.squared_data.flatten(), maxfev=500000000 )
            veres_bounds = ([0,0,0,0,0,0],[self.squared_data.shape[1], self.squared_data.shape[0], 10, 70, 90, total_flux])
            popt, pcov = curve_fit(self.veres, np.array((x, y), dtype=int), self.squared_data.flatten(), maxfev=500000000, p0=prediction, bounds=veres_bounds )
            # print(pcov)

            # self.predicted = self.veres((x, y), *prediction).reshape(*self.squared_data.shape)
            self.predicted = self.veres((x, y), *popt).reshape(*self.squared_data.shape)
            # self.fwhm = "{}|{}".format(abs(round(popt[2], 2)),abs(round(popt[3], 2)))
            self.fwhm = "unknown"

            self.rms_res = rms(self.squared_data, self.predicted)
            if self.show_object_fit:
                print('==============')
                print('Root mean error:', rms(self.squared_data, self.predicted))
                show_3d_data(self.squared_data, secondary_data=[self.predicted])

        self.cumulated_flux = round(self.squared_data.sum())
        skew_mid_x = round(skew(self.squared_data, 1)[square_size[1]//2], 2)
        skew_mid_y = round(skew(self.squared_data, 0)[square_size[0]//2], 2)
        kurtosis_mid_x = round(kurtosis(self.squared_data, 1, fisher=True)[square_size[1]//2], 2)
        kurtosis_mid_y = round(kurtosis(self.squared_data, 0, fisher=True)[square_size[0]//2], 2)
        self.skew = str(skew_mid_x) + "|" + str(skew_mid_y)
        self.kurtosis = str(kurtosis_mid_x) + "|" + str(kurtosis_mid_y)
        self.rms = round(self.rms_res, 3)
        self.psnr = psnr(self.squared_data, self.noise_median, 5)
示例#23
0
 def getStandardDeviationOfDataFunctionAcrossCompetition(
         self, dataFunction):
     return utils.rms(map(dataFunction, self.su.teamsWithCalculatedData()))