Example #1
0
    clf2.fit(train_X, train_Y)

    dd = DDM()
    warning_index = []
    drift_list = []
    prequential_acc = []
    retraining_time = 0
    total_retraining_samples = 0
    total_added_samples = 0

    ret_ind = []

    for i in range(initial_batches + label_lag + 50, n_batch):

        prequential_acc.append(clf2.score(batch_X[i], batch_Y[i]))
        if dd.set_input(1 - clf2.score(batch_X[i - 3], batch_Y[i - 3])):
            start_time = time.time()
            print('CHANGE DETECTED at ' + str(i))
            drift_list.append(i)
            warning_index.append(i)
            print('retrain using dataset index ' +
                  str(keep_last_consecutive(warning_index)))
            clf2 = classification_method(n_estimators=20)
            ret_ind.append(keep_last_consecutive(warning_index))
            train_X, train_Y = np.concatenate([
                batch_X[j] for j in keep_last_consecutive(warning_index)
            ]), np.concatenate(
                [batch_Y[j] for j in keep_last_consecutive(warning_index)])
            clf2.fit(train_X, train_Y)
            warning_index = []
            retraining_time += (time.time() - start_time)
                                        )], batch_yt[:int(sample_count /
                                                          8 + 2)]
     previous_xt.append(batch_xt)
     previous_yt.append(batch_yt)
     prequential_acc.append(
         nn_score(model_f, model_c, [batch_xs], [batch_ys], [batch_xt],
                  [batch_yt], drift_num))
 else:
     batch_xt, batch_yt = [], []
     previous_xt.append(batch_xt)
     previous_yt.append(batch_xt)
     prequential_acc.append(
         nn_score(model_f, model_c, [batch_xs], [batch_ys], [], [],
                  drift_num))
 no_drift_count += 1
 if len(prequential_acc) > label_lag - 1 and dd.set_input(
         1 - prequential_acc[-label_lag]):
     #first_training_index = np.min([first_training_index, i-label_lag])
     start_time = time.time()
     print('CHANGE DETECTED at ' + str(i))
     drift_list.append(i)
     #warning_index.append(no_drift_count)
     print('retrain using dataset index ' +
           str(keep_last_consecutive(warning_index)))
     model_f = models.Net_f(task=task, outdim=outdim).cuda()
     model_c = models.Net_c_cway(task=task, outdim=outdim).cuda()
     optimizer_f = torch.optim.Adam(model_f.parameters(), 0.001)
     optimizer_c = torch.optim.Adam(model_c.parameters(), 0.001)
     train_xs, train_ys = [], []
     train_xt, train_yt = [], []
     for j in keep_last_consecutive(warning_index):
         train_xs.append(previous_xs[j])
Example #3
0
            sampled_y = torch.multinomial(torch.nn.functional.softmax(outputs.cpu().data, dim=1),
                                          1).squeeze().cuda()
        loss_sample = criterion_cel(F.softmax(outputs), sampled_y)
        loss_sample.backward(retain_graph=True)
        #loss_sample.backward()
        kfac_optim.acc_stats = False
        kfac_optim.zero_grad()  # clear the gradient for computing true-fisher.
        loss.backward()
        fisher_score = kfac_optim.step()
    else:
        fisher_score = 0.0
    qFS_list.append(fisher_score)

    prequential_acc.append(nn_score(model_f, model_c, [previous_xtogether[-1]], [previous_ytogether[-1]]))
    if q1_drift == False:
        if dd.set_input(q1_list[-1])==True:
            q1_drift=True
            if warning_index!=[]:
                first_training_index = np.min([first_training_index, keep_last_consecutive(warning_index)[0]])
            else:
                first_training_index = np.min([first_training_index, i])
            drift_1.append(i)
        elif dd.is_warning_zone:
            warning_index.append(i)
    if q2_drift == False:
        if dd_2.set_input(q2_list[-1])==True:
            q2_drift=True
            if warning_index_2!=[]:
                first_training_index = np.min([first_training_index, keep_last_consecutive(warning_index_2)[0]]) 
            else:
                first_training_index = np.min([first_training_index, i])