batch_xt, batch_yt = [], []
     previous_xt.append(batch_xt)
     previous_yt.append(batch_xt)
     prequential_acc.append(
         nn_score(model_f, model_c, [batch_xs], [batch_ys], [], [],
                  drift_num))
 no_drift_count += 1
 if len(prequential_acc) > label_lag - 1 and dd.set_input(
         1 - prequential_acc[-label_lag]):
     #first_training_index = np.min([first_training_index, i-label_lag])
     start_time = time.time()
     print('CHANGE DETECTED at ' + str(i))
     drift_list.append(i)
     #warning_index.append(no_drift_count)
     print('retrain using dataset index ' +
           str(keep_last_consecutive(warning_index)))
     model_f = models.Net_f(task=task, outdim=outdim).cuda()
     model_c = models.Net_c_cway(task=task, outdim=outdim).cuda()
     optimizer_f = torch.optim.Adam(model_f.parameters(), 0.001)
     optimizer_c = torch.optim.Adam(model_c.parameters(), 0.001)
     train_xs, train_ys = [], []
     train_xt, train_yt = [], []
     for j in keep_last_consecutive(warning_index):
         train_xs.append(previous_xs[j])
         train_ys.append(previous_ys[j])
         if len(previous_xt[j]) == 0:
             continue
         train_xt.append(previous_xt[j])
         train_yt.append(previous_yt[j])
     train_clf(model_f, model_c, train_xs, train_ys, train_xt, train_yt,
               drift_num, optimizer_f, optimizer_c)
Exemplo n.º 2
0
        loss_sample.backward(retain_graph=True)
        #loss_sample.backward()
        kfac_optim.acc_stats = False
        kfac_optim.zero_grad()  # clear the gradient for computing true-fisher.
        loss.backward()
        fisher_score = kfac_optim.step()
    else:
        fisher_score = 0.0
    qFS_list.append(fisher_score)

    prequential_acc.append(nn_score(model_f, model_c, [previous_xtogether[-1]], [previous_ytogether[-1]]))
    if q1_drift == False:
        if dd.set_input(q1_list[-1])==True:
            q1_drift=True
            if warning_index!=[]:
                first_training_index = np.min([first_training_index, keep_last_consecutive(warning_index)[0]])
            else:
                first_training_index = np.min([first_training_index, i])
            drift_1.append(i)
        elif dd.is_warning_zone:
            warning_index.append(i)
    if q2_drift == False:
        if dd_2.set_input(q2_list[-1])==True:
            q2_drift=True
            if warning_index_2!=[]:
                first_training_index = np.min([first_training_index, keep_last_consecutive(warning_index_2)[0]]) 
            else:
                first_training_index = np.min([first_training_index, i]) 
            drift_2.append(i)
        elif dd_2.is_warning_zone:
            warning_index_2.append(i)
Exemplo n.º 3
0
    retraining_time = 0
    total_retraining_samples = 0
    total_added_samples = 0

    ret_ind = []

    for i in range(initial_batches + label_lag + 50, n_batch):

        prequential_acc.append(clf2.score(batch_X[i], batch_Y[i]))
        if dd.set_input(1 - clf2.score(batch_X[i - 3], batch_Y[i - 3])):
            start_time = time.time()
            print('CHANGE DETECTED at ' + str(i))
            drift_list.append(i)
            warning_index.append(i)
            print('retrain using dataset index ' +
                  str(keep_last_consecutive(warning_index)))
            clf2 = classification_method(n_estimators=20)
            ret_ind.append(keep_last_consecutive(warning_index))
            train_X, train_Y = np.concatenate([
                batch_X[j] for j in keep_last_consecutive(warning_index)
            ]), np.concatenate(
                [batch_Y[j] for j in keep_last_consecutive(warning_index)])
            clf2.fit(train_X, train_Y)
            warning_index = []
            retraining_time += (time.time() - start_time)
            total_retraining_samples += train_X.shape[0]
        if dd.is_warning_zone:
            warning_index.append(i)

### PH in batch
if args.model == 'ph':
Exemplo n.º 4
0
        kfac_optim.zero_grad()  # clear the gradient for computing true-fisher.
        loss.backward()
        fisher_score = kfac_optim.step()

        qFS_list.append(fisher_score)

        prequential_acc.append(
            clf2.score(scaler.transform(batch_X[i]), batch_Y[i]))

        if q1_drift == False:
            if dd.set_input(q1_list[-1]) == True:
                q1_drift = True
                if warning_index != []:
                    first_training_index = np.min([
                        first_training_index,
                        keep_last_consecutive(warning_index)[0]
                    ])
                else:
                    first_training_index = np.min([first_training_index, i])
                drift_1.append(i)
            elif dd.is_warning_zone:
                warning_index.append(i)
        if q2_drift == False:
            if dd_2.set_input(q2_list[-1]) == True:
                q2_drift = True
                if warning_index_2 != []:
                    first_training_index = np.min([
                        first_training_index,
                        keep_last_consecutive(warning_index_2)[0]
                    ])
                else: