def test_running_mean(self):
        x, N = [1, 1, 1], 1
        expected = np.array([1, 1, 1])

        np.testing.assert_allclose(running_mean(x, N), expected)

        x, N = [1, 1, 1], 4
        expected = np.array([1, 1, 1])

        np.testing.assert_allclose(running_mean(x, N), expected)

        x, N = [1, 1, 1], 0
        expected = np.array([1, 1, 1])

        np.testing.assert_allclose(running_mean(x, N), expected)

        x, N = [1, -1, 1, -1, 1, -1], 2
        expected = np.array([1, 0, 0, 0, 0, 0])

        np.testing.assert_allclose(running_mean(x, N), expected)

        x, N = [1, -1, 1, -1, 1, -1], 3
        expected = np.array([1, 0, 1 / 3, -1 / 3, 1 / 3, -1 / 3])

        np.testing.assert_allclose(running_mean(x, N), expected)
Esempio n. 2
0
def avgComplexCW(crossDict, length=0):
    """
    Plots rolling average of length "length" of magnitude and phase of all dict items.

    Parameters
    ----------
    crossDict : dictionary
                (it plots phase, so auto will be boring)
    length :    int (default: len(crossDict[key]))
    """

    global fignum

    if length == 0:  # Only run if no useful length is supplied
        for key in crossDict:
            if key[0] != 'd':
                    length = max(length, len(crossDict[key]))
    avgMagDict = {}
    avgPhaseDict = {}

    for key in crossDict:
        if key[0] != 'd':
            avgMagDict[key] = utils.running_mean(
                10.*np.log10(np.abs(crossDict[key])), length)
            avgPhaseDict[key] = utils.running_mean(
                np.angle(crossDict[key])*180/np.pi, length)

    fig = plt.figure(fignum)
    fig.suptitle('{}kHz CW - rolling averages of length {}'.format(
        du.get_frequency(), length))
    phase = plt.subplot(211)
    phase.set_title('Phase')
    phase.set_ylabel('Degrees')
    for key in crossDict:
        phase.plot(avgPhaseDict[key], label=key)
    mag = plt.subplot(212)
    mag.set_title('Magnitude')
    mag.set_ylabel('dB (Voltage)')
    for key in crossDict:
        mag.plot(avgMagDict[key], label=key)
    plt.legend()
    plt.show(block=False)

    print('Fignum: {}'.format(fignum))
    fignum += 1
Esempio n. 3
0
def avgFloatCW(floatDict, unitLabel='', length=0):
    """
    Plots rolling average of length "length" of all dict items without changing their unit. If input is rads, output is rads.

    Parameters
    ----------
    floatDict : dictionary
    length :    int (default: len(floatDict[key]))
    unitLabel : label for y-axis
    """

    global fignum

    fullAvg = False

    if length == 0:  # Only run if no useful length is supplied
        fullAvg = True
        for key in floatDict:
                length = max(length, len(floatDict[key]))
    avgfloatDict = {}
    for key in floatDict:
        avgfloatDict[key] = utils.running_mean(floatDict[key], length)

    plt.rcParams.update({'font.size': 18})

    plt.figure(fignum)
    plt.ylabel(unitLabel)

    if fullAvg is True:
        for key in avgfloatDict:
            plt.plot(np.repeat(avgfloatDict[key], 2), label=key)
        plt.xlabel('Samples')
        plt.title('{}kHz CW - experiment average'.format(
            du.get_frequency()))
        plt.tick_params(
            axis='x',           # changes apply to the x-axis
            which='both',       # both major and minor ticks are affected
            bottom=False,       # ticks along the bottom edge are off
            top=False,          # ticks along the top edge are off
            labelbottom=False)  # labels along the bottom edge are off
    else:
        for key in avgfloatDict:
            plt.plot(avgfloatDict[key], label=key)
        plt.xlabel('Samples')
        plt.title('{}kHz CW - rolling averages of length {}'.format(
            du.get_frequency(), length))

    plt.legend()
    plt.show(block=False)

    print('Fignum: {}'.format(fignum))
    fignum += 1
def run_per_slice_eval(groundtruth, prediction, avg=True, sc=4.):
    downscaled = utils.downscale_manually(groundtruth, sc)
    bicubic = utils.bicubic_up(downscaled, sc, 0)
    prediction, [groundtruth,
                 bicubic] = utils.cut_to_same_size(prediction,
                                                   [groundtruth, bicubic])
    raw_error_arr = se_arr(prediction, groundtruth)
    bicubic_weighting = se_arr(bicubic, groundtruth)
    print(np.max(bicubic_weighting))
    bicubic_weighting = 0.5 + bicubic_weighting / (np.max(bicubic_weighting) *
                                                   2)
    weighted_error_arr = raw_error_arr * bicubic_weighting

    raw_error_per_slice = evaluate_per_slice(raw_error_arr)
    weighted_error_per_slice = evaluate_per_slice(weighted_error_arr)
    if avg:
        raw_error_per_slice, _ = utils.running_mean(raw_error_per_slice, sc)
        weighted_error_per_slice, _ = utils.running_mean(
            weighted_error_per_slice, sc)
    plt.plot(raw_error_per_slice)
    plt.plot(weighted_error_per_slice)
    plt.show()
Esempio n. 5
0
    def plot_decay(self, fig=None, ax=None, plot_file_path=None):
        """Plots decay graph.

        Args:
            fig: Figure instance. New will be created if None is passed.
            ax: Axis instance. New will be created if None is passed to fig.
            plot_file_path: Save plot figure to a file.

        Returns:
            - Figure
            - Axes
        """
        if fig is None:
            fig, ax = plt.subplots()

        peak_ind, knee_point_ind, noise_floor, window_size = self.decay_params(
        )

        start = max(0, (peak_ind - 2 * (knee_point_ind - peak_ind)))
        end = min(len(self), (peak_ind + 2 * (knee_point_ind - peak_ind)))
        t = np.arange(start, end) / self.fs

        squared = self.data.copy()
        squared /= np.max(np.abs(squared))
        squared = squared[start:end]**2
        avg = running_mean(squared, window_size)
        squared = 10 * np.log10(squared + 1e-24)
        avg = 10 * np.log10(avg + 1e-24)

        ax.plot(t * 1000,
                squared,
                color=COLORS['lightblue'],
                label='Squared impulse response')
        ax.plot(t[window_size // 2:window_size // 2 + len(avg)] * 1000,
                avg,
                color=COLORS['blue'],
                label=f'{window_size / self.fs *1000:.0f} ms moving average')

        ax.set_ylim([np.min(avg) * 1.2, 0])
        ax.set_xlim([start / self.fs * 1000, end / self.fs * 1000])
        ax.set_xlabel('Time (ms)')
        ax.set_ylabel('Amplitude (dBr)')
        ax.grid(True, which='major')
        ax.set_title('Decay')
        ax.legend(loc='upper right')

        if plot_file_path:
            fig.savefig(plot_file_path)

        return fig, ax
Esempio n. 6
0
    def _create_corrected_sales_and_trend_normalization_df(self):
        N = 28
        list_corrected_sales = []
        norm_factor = []

        for i in range(self.oos_flag_df.shape[0]):
            oos = self.oos_flag_df.iloc[i,:-1-self.test_size].values.astype(int)
            sales = self.sales_raw_df.iloc[i,6:].values.astype(int)

            sales_moving_avg = running_mean(sales,N)

            #Arrays without trailing zeros
            oos_without_beg = oos[np.argmax(sales!=0):]
            sales_mov_avg_without_beg = sales_moving_avg[np.argmax(sales!=0):]

            #Replace mov av sales with zero for timestamp where oos
            sales_mov_avg_without_beg = sales_mov_avg_without_beg*(1-oos_without_beg)
            #Fill zeroes with last know non zero value
            sales_mov_avg_without_beg = self._fill_zeros_with_last(sales_mov_avg_without_beg)
            #Create new sales by replacing wherever oos by the moving average
            sales_corrected = np.concatenate([sales[:np.argmax(sales!=0)],np.where(oos_without_beg==0,sales[np.argmax(sales!=0):],sales_mov_avg_without_beg)])
            list_corrected_sales.append(sales_corrected)
            
            #Normalization (has 28 more values than sales_df to be able to use it for inference)
            sales_mov_avg_corrected = np.concatenate([sales[:np.argmax(sales!=0)],sales_mov_avg_without_beg])
            sales_mov_avg_corrected = np.concatenate([sales_mov_avg_corrected[0]*np.ones(28),sales_mov_avg_corrected])
            normalization = np.where(sales_mov_avg_corrected==0,1,1/sales_mov_avg_corrected)
            norm_factor.append(normalization) 
                
            
        self.corrected_sales_df = pd.DataFrame(list_corrected_sales,columns=self.sales_raw_df.columns[6:])
        self.corrected_sales_df = self._add_nan_for_test(self.corrected_sales_df)
        self.corrected_sales_df['id'] = self.sales_df['id']
        
        self.normalization_factor_df = pd.DataFrame(norm_factor,columns=['d_'+str(i+1) for i in range(norm_factor[0].shape[0])])
        self.normalization_factor_df = self._add_nan_for_test(self.normalization_factor_df)
        self.normalization_factor_df['id'] = self.sales_df['id']
Esempio n. 7
0
                obs = next_obs
                rsum += reward
                j += 1
                if env.verbose:
                    env.render()
                if done:
                    print("Episode : " + str(i) + " rsum=" + str(rsum) + ", " +
                          str(j) + " actions")
                    break

            if experience_replay and len(agent._memory) > agent.batch_size:
                agent.experience_replay(epoch=10, update_target=True)

        rewards.append(envm.get_episode_rewards())

        env.close()

    import pickle as pk

    pk.dump((rewards, parameters), open("dqldata.dat", "wb"))
    for i, r in enumerate(rewards):
        reward = running_mean(r, 50)

        plt.plot([i for i in range(len(reward))],
                 reward,
                 label=f'tau={parameters[i][0]} '
                 f' er={parameters[i][1]}')

    plt.legend(loc="upper left")

    plt.savefig(f'img/{name}gamma{gamma}alpha{alpha}.png')
Esempio n. 8
0
def ionospheric_height(phaseDict, key, mean=200, planar=True):
    """
    Plots the calculated ionospheric height (assuming planar wave arrival) between a single phaseDict element. A rolling mean is used.

    Parameters
    ----------
    phaseDict : dictionary
    key : string
        which element to plot
    mean : int
        rolling average window length (default=200)
    planar : boolean
        if False, calculated using triangle method
    """

    global fignum

    plotDict = {}
    freq = du.get_frequency()
    if du.get_config() != 2:
        print('Continuing, but config 2 is all this makes sense to me for. Not sure how to calculate height for config 1.')


    plotDict[key] = utils.running_mean(phaseDict[key], mean)

    length = len(plotDict[sorted(plotDict.keys())[0]])-mean+1

    if all(item in key for item in ['2', '6']):
        bl = 20
        td = 0
    if all(item in key for item in ['2', '7']):
        bl = 40
        td = 0
    if all(item in key for item in ['2', '8']):
        bl = 60
        td = 0
    if all(item in key for item in ['6', '7']):
        bl = 20
        td = 20
    if all(item in key for item in ['6', '8']):
        bl = 40
        td = 20
    if all(item in key for item in ['7', '8']):
        bl = 20
        td = 40
    print('bl={}, key={}'.format(bl, key))

    for i in range(0, length-1):
        phase = VH.get_phase_diff_as_dist(plotDict[key][i], freq*1000)
        if planar == True:
            plotDict[key][i] = VH.find_virtual_height_plane_wave(phase, 19841+td, bl)
        elif planar == False:
            plotDict[key][i] = VH.find_virtual_height_triangles(phase,19841+td,bl)

    time=np.linspace(0,length)*0.4
    plt.figure(fignum)
    for key in plotDict:
        plt.plot(time, plotDict[key]/1000, label=key)

    plt.xlabel('Time (s)')
    plt.ylabel('Height (km)')
    plt.title('Ionospheric virtual reflection height at {}MHz assuming planar wave={}'.format(freq/1000, planar))
    plt.xlim(time[0], time[-1])
    # plt.ylim(0, 400)
    plt.legend()
    plt.show(block=False)

    print('Fignum: {}'.format(fignum))
    fignum += 1
Esempio n. 9
0
    def decay_times(self,
                    peak_ind=None,
                    knee_point_ind=None,
                    noise_floor=None,
                    window_size=None):
        """Calculates decay times EDT, RT20, RT30, RT60

        Args:
            peak_ind: Peak index as returned by `decay_params()`. Optional.
            knee_point_ind: Knee point index as returned by `decay_params()`. Optional.
            noise_floor: Noise floor as returned by `decay_params()`. Optional.
            window_size: Moving average window size as returned by `decay_params()`. Optional.

        Returns:
            - EDT, None if SNR < 10 dB
            - RT20, None if SNR < 35 dB
            - RT30, None if SNR < 45 dB
            - RT60, None if SNR < 75 dB

        """
        if peak_ind is None or knee_point_ind is None or noise_floor is None:
            peak_ind, knee_point_ind, noise_floor, window_size = self.decay_params(
            )

        t = np.linspace(0, self.duration(), len(self))

        knee_point_ind -= (peak_ind + 0)
        data = self.data.copy()
        data = data[peak_ind - 0 * self.fs // 1000:]
        data /= np.max(np.abs(data))
        # analytical = np.abs(signal.hilbert(data))  # Hilbert doesn't work will with broadband signa
        analytical = np.abs(data)

        schroeder = np.cumsum(analytical[knee_point_ind::-1]**2 /
                              np.sum(analytical[:knee_point_ind]**2))[:0:-1]
        schroeder = 10 * np.log10(schroeder)

        # Moving average of the squared impulse response
        avg = self.data.copy()
        # Truncate data to avoid unnecessary computations
        # Ideally avg_head is the half window size but this might not be possible if the IR has been truncated already
        # and the peak is closer to the start than half window
        avg_head = min((window_size // 2), peak_ind)
        avg_tail = min((window_size // 2),
                       len(avg) - (peak_ind + knee_point_ind))
        # We need an index offset for average curve if the avg_head is not half window
        avg_offset = window_size // 2 - avg_head
        avg = avg[peak_ind - avg_head:peak_ind + knee_point_ind +
                  avg_tail]  # Truncate
        avg /= np.max(np.abs(avg))  # Normalize
        avg = avg**2
        avg = running_mean(avg, window_size)
        avg = 10 * np.log10(avg + 1e-18)
        # Find offset which minimizes difference between Schroeder backward integral and the moving average
        # ie. offset which moves Schroeder curve to same vertical position as the decay power curve
        # Limit the range 10% -> 90% of Schroeder and avg start and end
        fit_start = max(int(len(schroeder) * 0.1),
                        avg_offset)  # avg could start after 10% of Schroeder
        fit_end = min(int(len(schroeder) * 0.9), avg_offset +
                      (len(avg)))  # avg could end before 90% of Schroeder
        offset = np.mean(
            schroeder[fit_start:fit_end] -
            avg[fit_start - avg_offset:fit_end -
                avg_offset]  # Shift avg indexes by the offset length
        )

        decay_times = dict()
        limits = [(-1, -10, -10, 'EDT'), (-5, -25, -20, 'RT20'),
                  (-5, -35, -30, 'RT30'), (-5, -65, -60, 'RT60')]
        for start_target, end_target, decay_target, name in limits:
            decay_times[name] = None
            if end_target < noise_floor + offset + 10:
                # There has to be at least 10 dB of headroom between the end target point and noise floor,
                # in this case there is not. Current decay time shall remain undefined.
                continue
            try:
                start = np.argwhere(schroeder <= start_target)[0, 0]
                end = np.argwhere(schroeder <= end_target)[0, 0]
            except IndexError as err:
                # Targets not found on the Schroeder curve
                continue
            slope, intercept, _, _, _ = stats.linregress(
                t[start:end], schroeder[start:end])
            decay_times[name] = decay_target / slope

        return decay_times['EDT'], decay_times['RT20'], decay_times[
            'RT30'], decay_times['RT60']