예제 #1
0
def run(dt=0.001, nt=100, scheme="Euler"):
    with open("./configs/vhs_2d.json") as f:
        config = json.load(f)

    vmesh = collision.VMesh(config)
    rp = riemann.advection_1D
    coll_op = collision.FSInelasticVHSCollision(config, vmesh)

    # tau = None

    def coll(x):
        return coll_op(x, device="gpu")

    if scheme == "BEuler":
        solver = pykinetic.PenalizationSolver1D(1e-3, 10, rp, coll)
        solver.time_integrator = "BEuler"
    else:
        solver = pykinetic.BoltzmannSolver1D(1e-4, rp, coll)
        solver.dt = dt
        if "RK" in scheme:
            solver.time_integrator = "RK"
            solver.a = rkcoeff[scheme]["a"]
            solver.b = rkcoeff[scheme]["b"]
            solver.c = rkcoeff[scheme]["c"]
        else:
            solver.time_integrator = scheme

    print("dt is {}".format(solver.dt))
    # solver.order = 1
    solver.lim_type = 2
    solver.bc_lower[0] = pykinetic.BC.periodic
    solver.bc_upper[0] = pykinetic.BC.periodic

    x = pykinetic.Dimension(0.0, 1.0, 100, name="x")
    domain = pykinetic.Domain([x])
    state = pykinetic.State(domain, vdof=vmesh.nv_s)
    state.problem_data["vx"] = vmesh.v_centers[0]
    qinit(state, vmesh)
    sol = pykinetic.Solution(state, domain)

    with open("./configs/config_1d_2d.json") as f:
        domain_config = json.load(f)
    euler = Euler1D(domain_config)
    euler.set_initial(vmesh.get_p(sol.q)[0], 0, 1.0)

    sol_frames = []
    macro_frames = []
    for _ in tnrange(nt):
        solver.evolve_to_time(sol)
        # l2_err = (
        #     np.sqrt(np.sum((sol.q - bkw_fn(vmesh, sol.t)) ** 2))
        # * vmesh.delta
        # )
        # sol_frames.append([copy.deepcopy(sol), l2_err])
        macro_frames.append(vmesh.get_p(sol.q))
        sol_frames.append(copy.deepcopy(sol))

    euler.solve(dt * nt)

    return macro_frames, euler.macros(2)
def detect_lang2(NEG_DATAFRAME):
    # print("Second batch translations!")
    for i in tnrange(len(NEG_DATAFRAME)):
        try:
            lang = detect(NEG_DATAFRAME['review'][i])
            if lang == 'id':
                NEG_DATAFRAME['review'].iloc[i] = translate_to_english(
                    NEG_DATAFRAME['review'][i])
        except:
            lang = 'no'
        #  print("This row throws error:", NEG_DATAFRAME['review'][i])
    return NEG_DATAFRAME
예제 #3
0
def run(kn=1, tau=0.1, p=5, dt=0.01, nt=1000, scheme="Euler"):
    with open("./configs/vhs_2d.json") as f:
        config = json.load(f)

    vmesh = collision.VMesh(config)
    coll_op = collision.FSInelasticVHSCollision(config, vmesh)

    tau = tau * kn

    print(tau)

    def coll(x):
        return coll_op(x, heat_bath=tau, device="gpu")

    if scheme == "BEuler":
        solver = PenalizationSolver0D(
            kn=kn, penalty=p, collision_operator=coll
        )
        solver.time_integrator = "BEuler"
    else:
        solver = pykinetic.BoltzmannSolver0D(kn=kn, collision_operator=coll)
        if "RK" in scheme:
            print("OK")
            solver.time_integrator = "RK"
            solver.a = rkcoeff[scheme]["a"]
            solver.b = rkcoeff[scheme]["b"]
            solver.c = rkcoeff[scheme]["c"]
        else:
            solver.time_integrator = scheme
    solver.dt = dt

    domain = pykinetic.Domain([])
    state = pykinetic.State(domain, vdof=vmesh.nv_s)

    qinit(state, vmesh)

    sol = pykinetic.Solution(state, domain)
    sol_frames = []
    T_frames = []
    for _ in tnrange(nt):
        solver.evolve_to_time(sol)
        # l2_err = (
        #     np.sqrt(np.sum((sol.q - bkw_fn(vmesh, sol.t)) ** 2))
        # * vmesh.delta
        # )
        # sol_frames.append([copy.deepcopy(sol), l2_err])
        T_frames.append(vmesh.get_p(sol.q)[-1])
        sol_frames.append(copy.deepcopy(sol))

    return T_frames, sol_frames, vmesh, coll, solver
예제 #4
0
파일: rbm.py 프로젝트: folrent1896/kipack
def run(kn=1, dt=0.01, nt=1000, eps=(1, 1), coll="fsm", scheme="Euler"):
    with open("./tests/configs/rbm.json") as f:
        config = json.load(f)

    vmesh = collision.VMesh(config)
    if coll == "fsm":
        coll_op = collision.FSInelasticVHSCollision(config, vmesh)

        def coll(x):
            return coll_op(x, device="gpu")

    elif coll == "rbm":
        coll_op = RandomBatchCollision(config, vmesh)
        a, b = eps
        coll_op.eps = a * vmesh.delta**b

        def coll(x):
            return coll_op(x, device="gpu")

    else:
        raise NotImplementedError(
            "Collision method {} is not implemented.".format(coll))

    solver = pykinetic.BoltzmannSolver0D(kn=kn, collision_operator=coll)
    if "RK" in scheme:
        solver.time_integrator = "RK"
        solver.a = rkcoeff[scheme]["a"]
        solver.b = rkcoeff[scheme]["b"]
        solver.c = rkcoeff[scheme]["c"]
    else:
        solver.time_integrator = scheme
    solver.dt = dt

    domain = pykinetic.Domain([])
    state = pykinetic.State(domain, vdof=vmesh.nv_s)

    qinit(state, vmesh)

    sol = pykinetic.Solution(state, domain)
    sol_frames = []
    macro_frames = []
    for _ in tnrange(nt):
        solver.evolve_to_time(sol)
        l2_err = (np.sqrt(np.sum(
            (sol.q - bkw_fn(vmesh, sol.t))**2)) * vmesh.delta)
        sol_frames.append([copy.deepcopy(sol), l2_err])
        macro_frames.append(vmesh.get_p(sol.q))
        # sol_frames.append(copy.deepcopy(sol))

    return macro_frames, sol_frames, vmesh.delta
예제 #5
0
def detect_lang(DATAFRAME):
    '''
    Identify every review data using detect language library
    '''
    # print("Please wait, we're currently detecting the review language!")
    list_lang = []
    row_x = []
    for i in tnrange(len(DATAFRAME)):
        try:
            x = detect(DATAFRAME['review'][i])
            list_lang.append(x)
        except:
            x = 'no'
            row_x.append(i)
    DATAFRAME = DATAFRAME.drop(row_x).reset_index(drop=True)
    DATAFRAME['lang'] = pd.DataFrame(list_lang)
    return DATAFRAME
예제 #6
0
def run_simulation_lskr(snr_db, num_mc, param1, param2, ncol):
    # Storing the results:
    norm_square_error = np.zeros((num_mc, snr_db.size))
    # Monte Carlo Simulation:
    for realization in tnrange(num_mc):
        # Generating matrices:
        A = rand(param1, ncol).view(np.complex_)
        B = rand(param2, ncol).view(np.complex_)
        mt_x = tensoralg.kr(A, B)
        for ids, snr in enumerate(snr_db):
            # Applying noise to the matrix X_0:
            x_noise = apply_noise(snr, mt_x)
            # Estimating factor matrices:
            a_hat, b_hat = tensoralg.lskrf(x_noise, param1, param2)
            # Calculating the estimative of X_0:
            x_hat = tensoralg.kr(a_hat, b_hat)
            # Calculating the normalized error:
            norm_square_error[realization, ids] = norm_mse(mt_x, x_hat)
    # Returning the NMSE:
    return norm_square_error.mean(axis=0)
예제 #7
0
def arima(price, window, desc):

    pred = np.full(price.shape, np.nan)
    for i in tnrange(window, price.shape[0], desc=desc):

        train = price[i - window:i]

        if np.any(np.isnan(train)):
            continue

        with warnings.catch_warnings():
            # Uninvertible hessian
            warnings.filterwarnings('ignore', 'Inverting')
            # RuntimeWarning: invalid value encountered in true_divide
            warnings.filterwarnings('ignore', 'invalid')
            # RuntimeWarning: overflow encountered in exp
            warnings.filterwarnings('ignore', 'overflow')
            # ConvergenceWarning: Maximum Likelihood optimization failed to converge. Check mle_retvals
            # warnings.filterwarnings('ignore', 'Maximum')
            # RuntimeWarning: divide by zero encountered in true_divide
            warnings.filterwarnings('ignore', 'divide')

            # Initialize model
            model = auto_arima(train,
                               max_p=3,
                               max_q=3,
                               seasonal=False,
                               trace=False,
                               error_action='ignore',
                               suppress_warnings=True)

            # Determine model parameters
            model.fit(train)
            order = model.get_params()['order']

            # Fit and predict
            model = pm.ARIMA(order=order)
            model.fit(train)
            pred[i] = model.predict(1)

    return pred
예제 #8
0
def run_simulation_mlskr(snr_db, num_mc, nrows, ncol, flag=False):
    # Storing the results:
    norm_square_error = np.zeros((num_mc, snr_db.size))
    # Monte Carlo Simulation:
    for realization in tnrange(num_mc):
        # Generating matrices:
        mt_list = [rand(rows, ncol).view(np.complex_) for rows in nrows]
        # Matrix X_0
        mt_x = tensoralg.kr(*mt_list)
        for ids, snr in enumerate(snr_db):
            # Applying noise to the matrix X_0:
            x_noise = apply_noise(snr, mt_x)
            # Estimating factor matrices:
            if not flag: # Using HOSVD
                a_hat = tensoralg.mlskrf(x_noise, nrows)
            else: # Using HOOI
                a_hat = tensoralg.mlskrf(x_noise, nrows, flag)
            # Calculating the estimative of X_0:
            x_hat = tensoralg.kr(*a_hat)
            # Calculating the normalized error:
            norm_square_error[realization, ids] = norm_mse(mt_x, x_hat)
    # Returning the NMSE:
    return norm_square_error.mean(axis=0)
예제 #9
0
def run_simulation_als(snr_db, num_mc, rows, rank, tol, it):
    # Storing the results:
    norm_square_error_a = np.zeros((num_mc, snr_db.size))
    norm_square_error_b = np.zeros((num_mc, snr_db.size))
    norm_square_error_c = np.zeros((num_mc, snr_db.size))
    norm_square_error_x = np.zeros((num_mc, snr_db.size))
    # Monte Carlo Simulation:
    for realization in tnrange(num_mc):
        # Generating matrices:
        # mtx = [randn(i, rank*2).view(complex) for i in rows]
        a = randn(rows[0], rank*2).view(complex)
        b = randn(rows[1], rank*2).view(complex)
        c = randn(rows[2], rank*2).view(complex)
        mtx = [a, b, c]
        # Tensor X_0:
        tx = tensoralg.cpd_tensor(mtx)
        for ids, snr in enumerate(snr_db):
            # Applying noise to the tensor X_0:
            tx_noise = apply_noise(snr, tx)
            # Estimating factor matrices:            
            mtx_hat = tensoralg.cp_decomp(tx_noise, rank, 
                                          eps=tol, num_iter=it)
            # Constructing tensor X_hat:
            tx_hat = tensoralg.cpd_tensor(mtx_hat)
            # Calculating the normalized error:
            norm_square_error_x[realization, ids] = norm_mse(tx,tx_hat)
            norm_square_error_a[realization, ids] = norm_mse(mtx[0],mtx_hat[0])
            norm_square_error_b[realization, ids] = norm_mse(mtx[1],mtx_hat[1])
            norm_square_error_c[realization, ids] = norm_mse(mtx[2],mtx_hat[2])
    # Retruning results:
    results = [norm_square_error_a.mean(axis=0), 
               norm_square_error_b.mean(axis=0), 
               norm_square_error_c.mean(axis=0), 
               norm_square_error_x.mean(axis=0)]
    
    return results 
예제 #10
0
def collect_pediatric_data(n_records):
    ''' 
    Collect a sample of pediatric data with n_records using get_data_from_url
    
    Parameters:
    n_records (int): the number of records to collect
    
    Returns:
    all_pediatric (dataframe): a dataframe containing a sample of pediatric data with n_records
    '''

    # determine how many times to query the API
    n_iterations = math.ceil(n_records / 100)

    # loop over requests pulling 100 records each time
    # skipping i*100 rows each time so as not to pull the same data twice
    all_pediatric = pd.DataFrame()
    for i in tnrange(n_iterations, desc='Progress'):
        url = f"https://api.fda.gov/drug/event.json?search=patient.patientagegroup:3&limit=100&skip={i*100}"
        json_data = requests.get(url).json()
        data = pd.json_normalize(json_data.get('results'))
        all_pediatric = all_pediatric.append(data)

    return all_pediatric
예제 #11
0
def fit(model, train_dl, valid_dl, loss_fn, opt, epochs=3, 
        pooling_mode='attention', device='cpu', 
        tboard_path=False, model_path=False, csv_sep="\t", map_flag=False, do_validation=1,
        early_stopping_patience=False, model_name="default"):

    num_batch_train = len(train_dl)
    num_batch_valid = len(valid_dl)
    cprint('[INFO]', bc.dgreen, 'Number of batches: {}'.format(num_batch_train))
    cprint('[INFO]', bc.dgreen, 'Number of epochs: {}'.format(epochs))
    
    if tboard_path:
        try:
            from torch.utils.tensorboard import SummaryWriter       
            tboard_writer = SummaryWriter(tboard_path) 
        except ImportError:
            cprint('[WARNING]', bc.dred, 'SummaryWriter could not be imported! Continue without creating a tensorboard.')
            tboard_writer = False
    else:
        tboard_writer = False

    # if do_validation is not -1, 
    # perform validation at least once
    if do_validation in [0]:
        do_validation = epochs + 2

    print_summary = True
    wtrain_counter = 0
    wvalid_counter = 0

    # initialize early stopping parameters
    es_loss = False
    es_stop = False

    for epoch in tnrange(epochs):
        if train_dl:
            model.train()
            y_true_train = list()
            y_pred_train = list()
            total_loss_train = 0

            t_train = tqdm(iter(train_dl), leave=False, total=num_batch_train)
            t_train.set_description('Epoch {}/{}'.format(epoch+1, epochs))
            for x1, len1, x2, len2, y, train_indxs in t_train:
                # transpose x1 and x2
                x1 = x1.transpose(0, 1)
                x2 = x2.transpose(0, 1)

                x1 = Variable(x1.to(device))
                x2 = Variable(x2.to(device))
                y = Variable(y.to(device))
                len1 = len1.numpy()
                len2 = len2.numpy()

                # step 1. zero the gradients
                opt.zero_grad()
                # step 2. compute the output
                pred = model(x1, len1, x2, len2, pooling_mode=pooling_mode, device=device)
                if print_summary:
                    # print info about the model only in the first epoch
                    torch_summarize(model)
                    print_summary = False
                # step 3. compute the loss
                loss = loss_fn(pred, y)
                # step 4. use loss to produce gradients
                loss.backward()
                # step 5. use optimizer to take gradient step
                opt.step()

                pred_softmax = F.softmax(pred, dim=-1)
                t_train.set_postfix(loss=loss.data)
                pred_idx = torch.max(pred_softmax, dim=1)[1]

                y_true_train += list(y.cpu().data.numpy())
                y_pred_train += list(pred_idx.cpu().data.numpy())
                total_loss_train += loss.data

                # if tboard_writer:    
                #     # XXX not working at this point, but the results can be plotted here: https://projector.tensorflow.org/
                #     # XXX TODO: change the metadata to the string name, plot embeddings derived for evaluation or test dataset
                #     s1s2_strings = train_dl.dataset.df[train_dl.dataset.df["index"].isin(train_indxs.tolist())]["s1"].to_list()
                #     s1s2_strings.extend(train_dl.dataset.df[train_dl.dataset.df["index"].isin(train_indxs.tolist())]["s2"].to_list())
                #     x1x2_tensors = torch.cat((x1.T, x2.T))
                #     try:
                #         tboard_writer.add_embedding(x1x2_tensors,
                #                                     global_step=wtrain_counter, 
                #                                     metadata=s1s2_strings,
                #                                     tag="Embedding")
                #         tboard_writer.flush()
                #     except:
                #         continue

                wtrain_counter += 1

            train_acc = accuracy_score(y_true_train, y_pred_train)
            train_pre = precision_score(y_true_train, y_pred_train)
            train_rec = recall_score(y_true_train, y_pred_train)
            train_macrof1 = f1_score(y_true_train, y_pred_train, average='macro')
            train_weightedf1 = f1_score(y_true_train, y_pred_train, average='weighted')

            train_loss = total_loss_train / len(train_dl)
            epoch_log = '{} -- Epoch: {}/{}; Train; loss: {:.3f}; acc: {:.3f}; precision: {:.3f}, recall: {:.3f}, macrof1: {:.3f}, weightedf1: {:.3f}'.format(
                    datetime.now().strftime("%m/%d/%Y_%H:%M:%S"), epoch+1, epochs, train_loss, train_acc, train_pre, train_rec, train_macrof1,train_weightedf1)
            cprint('[INFO]', bc.orange, epoch_log)
            if model_path:
                log_message(epoch_log + "\n", mode="a+", filename=os.path.join(model_path, "log.txt"))
            else:
                log_message(epoch_log + "\n", mode="a+")

            if tboard_writer:    
                # Record loss
                tboard_writer.add_scalar('Train/Loss', loss.item(), epoch)
                # Record accuracy
                tboard_writer.add_scalar('Train/Accuracy', train_acc, epoch)
                tboard_writer.flush()

        if valid_dl and (((epoch+1) % do_validation) == 0):
            valid_desc = 'Epoch: {}/{}; Valid'.format(epoch+1, epochs)
            valid_loss = test_model(model, 
                                    valid_dl, 
                                    eval_mode="valid", 
                                    valid_desc=valid_desc,
                                    pooling_mode=pooling_mode, 
                                    device=device,
                                    model_path=model_path, 
                                    tboard_writer=tboard_writer,
                                    csv_sep=csv_sep,
                                    epoch=epoch+1,
                                    map_flag=map_flag,
                                    output_loss=True)

            if (not es_loss) or (valid_loss <= es_loss):
                es_loss = valid_loss
                es_model = copy.deepcopy(model)
                es_checkpoint = epoch + 1
                es_counter = 0
            else:
                es_counter += 1
            
            if early_stopping_patience:
                if es_counter >= early_stopping_patience:
                    # --- save the model
                    checkpoint_path = os.path.join(model_path, 
                                                   model_name + '.model')
                    if not os.path.isdir(os.path.dirname(checkpoint_path)):
                        os.makedirs(os.path.dirname(checkpoint_path))
                    cprint('[INFO]', bc.lgreen, 
                           f'saving the model (early stopped) with least valid loss (checkpoint: {es_checkpoint}) at {checkpoint_path}')
                    torch.save(es_model, checkpoint_path)
                    torch.save(es_model.state_dict(), checkpoint_path + "_state_dict")
                    es_stop = True

        if model_path:
            # --- save the model
            cprint('[INFO]', bc.lgreen, 'saving the model')
            checkpoint_path = os.path.join(model_path, f'checkpoint{epoch+1:05d}.model')
            if not os.path.isdir(os.path.dirname(checkpoint_path)):
                os.makedirs(os.path.dirname(checkpoint_path))
            torch.save(model, checkpoint_path)
            torch.save(model.state_dict(), checkpoint_path + "_state_dict")
        
        if es_stop:
            cprint('[INFO]', bc.dgreen, 'Early stopping at epoch: {}, selected epoch: {}'.format(epoch+1, es_checkpoint))
            return 
    
    if model_path and epoch > 0:
        # --- save the model with least validation loss
        model_path_save = os.path.join(model_path,
                                       model_name + '.model')
        if not os.path.isdir(os.path.dirname(model_path_save)):
            os.makedirs(os.path.dirname(model_path_save))
        cprint(f'[INFO]', bc.lgreen, 
               f'saving the model with least valid loss (checkpoint: {es_checkpoint}) at {model_path_save}')
        torch.save(es_model, model_path_save)
        torch.save(es_model.state_dict(), model_path_save + "_state_dict")
예제 #12
0
def do_TDanalysis(Chipnum,
                  resultpath=None,
                  matname='TDresults',
                  ppd=30,
                  nrseg=32,
                  nrsgm=6,
                  sfreq=50e3):
    '''Main function of this module that executes the noise post-processing.
    It writes the number of rejected segments and output PSDs in the same format as 
    the algorithm by PdV (.mat-file).
    Takes:
    Chipnum -- Chip number to preform the analysis on
    resultpath -- output where the resulting .mat-file is written. Default is NoiseTDanalyse folder.
    matname -- name of the output .mat-file. Default is TDresults.
    ppd -- points per decade to downsample the PSDs (default 30).
    nrseg -- number of segments for the pulse rejection.
    nrsgm -- number of standard deviations to reject a segment in pulse rejection (default 6).
    sfreq -- sample frequency of the data (default 50 kHz, to be removed).
    
    Returns:
    Nothing, but writes the .mat-file in the resultpath under matname.'''

    if resultpath is None:
        resultpath = io.get_datafld() + f'{Chipnum}/NoiseTDanalyse/'

    #find all KIDs, Read powers and Temperatures
    KIDPrT = np.array([[int(i.split('\\')[-1].split('_')[0][3:]),
                        int(i.split('\\')[-1].split('_')[1][:-3]),
                        int(i.split('\\')[-1].split('_')[4][3:-4])]
              for i in glob.iglob(io.get_datafld() + \
                                  f'{Chipnum}/Noise_vs_T/TD_2D/*TDmed*.bin')])
    KIDs = np.unique(KIDPrT[:, 0])

    #initialize:
    TDparam = np.empty((1, len(KIDs)),
                       dtype=[('kidnr', 'O'), ('Pread', 'O'), ('Temp', 'O'),
                              ('nrrejectedmed', 'O'), ('fmtotal', 'O'),
                              ('SPRrealneg', 'O'), ('SPRrealpos', 'O'),
                              ('SPRimagneg', 'O'), ('SPRimagpos', 'O'),
                              ('SPPtotal', 'O'), ('SRRtotal', 'O')])

    for i in tnrange(len(KIDs), desc='KID', leave=False):
        TDparam['kidnr'][0, i] = np.array([[KIDs[i]]])

        Preads = np.unique(KIDPrT[KIDPrT[:, 0] == KIDs[i], 1])
        TDparam['Pread'][0, i], TDparam['Temp'][0, i] = np.zeros(
            (2, len(Preads),
             np.unique(KIDPrT[KIDPrT[:, 0] == KIDs[i], 1],
                       return_counts=True)[1].max()))

        TDparam['nrrejectedmed'][0,i],TDparam['fmtotal'][0,i],\
        TDparam['SPRrealneg'][0,i],TDparam['SPRrealpos'][0,i],\
        TDparam['SPRimagneg'][0,i],TDparam['SPRimagpos'][0,i],TDparam['SPPtotal'][0,i],\
        TDparam['SRRtotal'][0,i] = np.full((8,len(Preads),np.unique(
            KIDPrT[KIDPrT[:,0]==KIDs[i],1],return_counts=True)[1].max()),np.nan,dtype='O')

        for j in tnrange(len(Preads), desc='Pread', leave=False):
            Temps = np.unique(KIDPrT[
                np.logical_and(KIDPrT[:,
                                      0] == KIDs[i], KIDPrT[:,
                                                            1] == Preads[j]),
                2])
            for k in tnrange(len(Temps), desc='Temp', leave=False):
                TDparam['Pread'][0, i][j, k] = Preads[j]
                TDparam['Temp'][0, i][j, k] = Temps[k]

                noisedata = io.get_noisebin(Chipnum, KIDs[i], Preads[j],
                                            Temps[k])
                amp, phase = to_ampphase(noisedata)
                spamp, rejectamp = rej_pulses(amp,
                                              nrsgm=nrsgm,
                                              nrseg=nrseg,
                                              sfreq=sfreq)
                spphase, rejectphase = rej_pulses(phase,
                                                  nrsgm=nrsgm,
                                                  nrseg=nrseg,
                                                  sfreq=sfreq)

                #amp:
                f, SRR = calc_avgPSD(spamp, rejectamp, sfreq=sfreq)
                lsfRR, lsSRR = logsmooth(f, SRR, ppd)
                #phase:
                f, SPP = calc_avgPSD(spphase, rejectphase, sfreq=sfreq)
                lsfPP, lsSPP = logsmooth(f, SPP, ppd)
                #cross:
                f, SPR = calc_avgPSD(spphase,
                                     rejectphase,
                                     spamp,
                                     rejectamp,
                                     sfreq=sfreq)
                lsfPR, lsSPR = logsmooth(f, SPR, ppd)

                #write to TDparam:
                if all(np.logical_and(lsfRR == lsfPP, lsfPP == lsfPR)):
                    TDparam['nrrejectedmed'][0,i][j,k] = \
                        np.logical_or(rejectamp,rejectphase).sum()
                    TDparam['fmtotal'][0, i][j, k] = lsfRR
                    with warnings.catch_warnings():
                        warnings.simplefilter('ignore', RuntimeWarning)
                        TDparam['SPRrealneg'][0,i][j,k] = \
                            10*np.log10(-1*np.clip(np.real(lsSPR),None,0))
                        TDparam['SPRrealpos'][0,i][j,k] = \
                            10*np.log10(np.clip(np.real(lsSPR),0,None))
                        TDparam['SPRimagneg'][0,i][j,k] = \
                            10*np.log10(-1*np.clip(np.imag(lsSPR),None,0))
                        TDparam['SPRimagpos'][0,i][j,k] = \
                            10*np.log10(np.clip(np.imag(lsSPR),0,None))
                    TDparam['SPPtotal'][0,
                                        i][j,
                                           k] = 10 * np.log10(np.real(lsSPP))
                    TDparam['SRRtotal'][0,
                                        i][j,
                                           k] = 10 * np.log10(np.real(lsSRR))
                else:
                    warnings.warn('different frequencies, writing nans')
                    TDparam['nrrejectedmed'][0,i][j,k],TDparam['fmtotal'][0,i][j,k],\
                    TDparam['SPRrealneg'][0,i][j,k], TDparam['SPRrealpos'][0,i][j,k],\
                    TDparam['SPRimagneg'][0,i][j,k],TDparam['SPRimagpos'][0,i][j,k],\
                    TDparam['SPPtotal'][0,i][j,k], TDparam['SRRtotal'][0,i][j,k] = \
                        np.full([8,1],np.nan)

    savemat(resultpath + matname + '.mat', {'TDparam': TDparam})