Esempio n. 1
0
def valid_epoch(loader, data, netD, batch_size, Y_true, L_true):
    netD.eval()
    Y_pred = []
    for inputs in loader.get_batches(data, batch_size, shuffle=False):
        X_p, X_f = inputs[0], inputs[1]
        batch_size = X_p.size(0)

        X_p_enc, _ = netD(X_p)
        X_f_enc, _ = netD(X_f)
        Y_pred_batch = mmd_util.batch_mmd2_loss(X_p_enc, X_f_enc, sigma_var)
        Y_pred.append(Y_pred_batch.data.cpu().numpy())
    Y_pred = np.concatenate(Y_pred, axis=0)

    L_pred = Y_pred
    fp_list, tp_list, thresholds = sklearn.metrics.roc_curve(L_true, L_pred)
    auc = sklearn.metrics.auc(fp_list, tp_list)
    eval_dict = {
        'Y_pred': Y_pred,
        'L_pred': L_pred,
        'Y_true': Y_true,
        'L_true': L_true,
        'mse': -1,
        'mae': -1,
        'auc': auc
    }
    return eval_dict
Esempio n. 2
0
def gel_all_cps_in_sequence(X, window_size, beta_val, threshold_cp, labels):
    '''function to get cps in a long sequence'''
    mmd = np.zeros(len(X))
    var_array_1 = np.zeros(len(X))
    var_array_2 = np.zeros(len(X))
    for i in range(window_size, len(X) - window_size):
        past_np_median = X[i - window_size:i, :]
        var_array_1[i] = np.median(np.var(X[i - window_size:i, :], axis=0))
        past = torch.from_numpy(X[i - window_size:i, :]).cuda().float()
        past = past.unsqueeze(0)

        fut = torch.from_numpy(X[i:i + window_size, :]).cuda().float()
        fut = fut.unsqueeze(0)
        # labels = labels[ i - p_win : i + f_win]

        #sigma_list = mmd_util.median_heuristic(past_np_median, beta= beta_val)
        sigma_list = [0.0001, 0.01, 0.1, 1, 10]
        sigma_var = torch.FloatTensor(sigma_list).cuda()
        var_array_2[i] = np.median(np.var(X[i:i + window_size, :], axis=0))
        mmd[i] = mmd_util.batch_mmd2_loss(past, fut,
                                          sigma_var).cpu().numpy()[0]
    # mmd[var_array_1 < 0.1] = 0
    # mmd[var_array_2 < 0.1] = 0
    #change_indices = find_peaks(mmd  , distance= window_size, height=  threshold_cp)[0] 'for mackay glass'
    change_indices = find_peaks(mmd, distance=900,
                                height=0.0250)[0]  #for acti tracker
    plot_cps(X, labels, mmd, change_indices)
    '''
    sio.savemat('./../data_files/gen_simulations/mat_files/mackay_switch/cp_detected_switch_mackay.mat',
                {'X': X, 'Y': labels,
                 'Cp_stat': mmd, 'var_arr_1': var_array_1,'var_arr_2': var_array_2,
                 'c_indices': change_indices})
                 '''

    return change_indices
Esempio n. 3
0
def valid_epoch(loader,
                data,
                model,
                batch_size,
                Y_true,
                L_true,
                model_type='MMD_codespace'):
    model.eval()
    Y_pred = []
    for inputs in loader.get_batches(data, batch_size, shuffle=False):
        X_p, X_f = inputs[0], inputs[1]
        batch_size = X_p.size(0)

        X_p_enc = model.get_hidden(X_p)
        X_f_enc = model.get_hidden(X_f)
        if model_type == 'MMD_codespace':
            Y_pred_batch = mmd_util.batch_mmd2_loss(X_p_enc, X_f_enc,
                                                    sigma_var)
        elif model_type == 'MMD_dataspace':
            Y_pred_batch = mmd_util.batch_mmd2_loss(X_p, X_f, sigma_var)
        else:
            raise NotImplementedError(
                'unknown model  %s [MMD_dataspace|MMD_codespace]' %
                (model_type))
        Y_pred.append(Y_pred_batch.data.cpu().numpy())
    Y_pred = np.concatenate(Y_pred, axis=0)

    L_pred = Y_pred
    fp_list, tp_list, thresholds = sklearn.metrics.roc_curve(L_true, L_pred)
    auc = sklearn.metrics.auc(fp_list, tp_list)
    eval_dict = {
        'Y_pred': Y_pred,
        'L_pred': L_pred,
        'Y_true': Y_true,
        'L_true': L_true,
        'mse': -1,
        'mae': -1,
        'auc': auc
    }
    return eval_dict
Esempio n. 4
0
def check_cp_at_index(X, index, thresh_sim, thresh_dissim):
    '''Function to check if a change point in Signal X around the provided index'''
    p_win = 800
    f_win = 800
    win_pair = 600
    buff = 30
    cp = 0
    #X_temp = X[index - 2*p_win : index+2*p_win,:]
    X_temp = X
    #Y_temp = Y[index - 2*p_win : index+2*p_win,:]
    mmd = np.zeros(len(X_temp))
    var_array = np.zeros(len(X_temp))
    for i in range(p_win, len(X_temp) - f_win):
        past_np_median = X_temp[i - p_win:i, :]
        var_array[i] = np.var(np.mean(X_temp[i - p_win:i, :], axis=1))
        past = torch.from_numpy(X_temp[i - p_win:i, :]).cuda().float()
        past = past.unsqueeze(0)

        fut = torch.from_numpy(X_temp[i:i + f_win, :]).cuda().float()
        fut = fut.unsqueeze(0)
        # labels = labels[ i - p_win : i + f_win]

        sigma_list = mmd_util.median_heuristic(past_np_median, beta=5)
        sigma_var = torch.FloatTensor(sigma_list).cuda()

        mmd[i] = mmd_util.batch_mmd2_loss(past, fut,
                                          sigma_var).cpu().numpy()[0]
    mmd[-100:] = 0
    cp_mag = max(mmd)
    change_indices = np.argmax(mmd)
    #plot_cps(X_temp, 0 , mmd, change_indices)
    sim = 0
    X_pair_1 = X[index - win_pair - buff:index - buff, :]
    X_pair_2 = X[index + buff:index + buff + win_pair, :]
    X1_var = np.median(np.var(X_pair_1[0:500, :], axis=0))
    X2_var = np.median(np.var(X_pair_2[-300:, :], axis=0))
    if X1_var < 0.005 or X2_var < 0.005:  #'For 0 and 0 case '
        thresh_dissim = 2

    if max(mmd[:i - 0]) > thresh_dissim:
        sim = -1
    elif max(mmd) < thresh_sim:
        sim = 1
    else:
        sim = 0

    return sim
Esempio n. 5
0
def detect_cp_and_get_pairs(signal, win_size):
    #Reshape signal such that first dimension consists of number of points. 2nd dimension: signal dimension
    series = signal.reshape(-1, 1)
    mmd = np.zeros(series.shape[0])
    sigma_list = mmd_util.median_heuristic(series, beta=0.1)
    sigma_var = torch.FloatTensor(sigma_list).cuda()
    p_win = win_size
    f_win = win_size
    feat_temp = series
    for i in range(win_size, series.shape[0] - f_win):
        past = torch.from_numpy(feat_temp[i - p_win: i, :]).cuda().float()
        past = past.unsqueeze(0)
        fut = torch.from_numpy(feat_temp[i: i + f_win, :]).cuda().float()
        fut = fut.unsqueeze(0)
        mmd[i] = mmd_util.batch_mmd2_loss(past, fut, sigma_var).cpu().numpy()[0]
    change_indices = find_peaks(mmd, height=1.0)[0]
    X1_sim, X2_sim, X1_dis, X2_dis = get_paris_from_cp(series, change_indices, 5)
    return X1_sim, X2_sim, X1_dis, X2_dis
Esempio n. 6
0
            batch_size = X_p.size(0)
            bidx += 1

            # real data
            X_p_enc, X_p_dec = netD(X_p)
            X_f_enc, X_f_dec = netD(X_f)

            # fake data
            noise = torch.cuda.FloatTensor(1, batch_size,
                                           args.RNN_hid_dim).normal_(0, 1)
            noise = Variable(noise, volatile=True)  # total freeze netG
            Y_f = Variable(netG(X_p, X_f, noise).data)
            Y_f_enc, Y_f_dec = netD(Y_f)

            # batchwise MMD2 loss between X_f and Y_f
            D_mmd2 = mmd_util.batch_mmd2_loss(X_f_enc, Y_f_enc, sigma_var)

            # batchwise MMD loss between X_p and X_f
            mmd2_real = mmd_util.batch_mmd2_loss(X_p_enc, X_f_enc, sigma_var)

            # reconstruction loss
            real_L2_loss = torch.mean((X_f - X_f_dec)**2)
            #real_L2_loss = torch.mean((X_p - X_p_dec)**2)
            fake_L2_loss = torch.mean((Y_f - Y_f_dec)**2)
            #fake_L2_loss = torch.mean((Y_f - Y_f_dec)**2) * 0.0

            # update netD
            netD.zero_grad()
            lossD = D_mmd2.mean() - lambda_ae * (
                real_L2_loss + fake_L2_loss) - lambda_real * mmd2_real.mean()
            #lossD = 0.0 * D_mmd2.mean() - lambda_ae * (real_L2_loss + fake_L2_loss) - lambda_real * mmd2_real.mean()
Esempio n. 7
0
for epoch in range(1, args.max_iter + 1):
    trn_loader = Data.get_batches(Data.trn_set,
                                  batch_size=args.batch_size,
                                  shuffle=True)
    for bidx in range(n_batchs):
        netD.train()
        inputs = next(trn_loader)
        X_p, X_f, Y_true = inputs[0], inputs[1], inputs[2]

        netD.zero_grad()
        X_p_enc, X_p_dec = netD(X_p)  # batch_size x seq_len x RNN_hid_dim
        X_f_enc, X_f_dec = netD(X_f)  # batch_size x seq_len x RNN_hid_dim
        batch_size, seq_len, nz = X_p_enc.size()

        # MMD on real data
        MMD2_real = mmd_util.batch_mmd2_loss(X_p_enc, X_f_enc, sigma_var)
        MMD2_real = MMD2_real.mean()

        # MMD on perturbed fake data
        std = torch.std(X_f_enc) / (1. + 0.1 * (epoch - 1))
        noise = torch.cuda.FloatTensor(batch_size, seq_len,
                                       nz).normal_(0, std.data[0])
        noise = Variable(noise)
        X_f_enc_fake = X_f_enc + noise
        MMD2_fake = mmd_util.batch_mmd2_loss(X_f_enc, X_f_enc_fake, sigma_var)
        MMD2_fake = MMD2_fake.mean()

        # reconstruction loss
        L2_loss = torch.mean((X_p_dec - X_p)**2) + torch.mean(
            (X_f_dec - X_f)**2)