Exemple #1
0
def MIS_transform(batches, data_paras, Qks=None):
    n = [len(b) for b in batches]
    n[0] = data_paras.cnt_log
    if Qks is None:
        s = sum(n[1:])
        return [MData.CExample(e.x, e.y, (n[0]+s)/(n[0]*data_paras.Q0(e.x)+s), e.z) \
            for b in batches for e in b]
    else:
        s = sum(n)
        return [MData.CExample(e.x, e.y, s/sum([nq[0]*nq[1](e.x) for nq in zip(n, Qks)]), e.z) \
            for b in batches for e in b]
Exemple #2
0
def MIS_transform(batches, config, env, Qks=None):
    n = [len(b) for b in batches]
    n[0] = env.experiment.cnt_log
    if Qks is None:
        s = sum(n[1:])
        return [MData.CExample(e.x, e.y, (n[0]+s)/(n[0]*env.policy.Q0(e.x)+s), e.z) \
            for b in batches for e in b]
    else:
        s = sum(n)
        return [MData.CExample(e.x, e.y, s/sum([nq[0]*nq[1](e.x) for nq in zip(n, Qks)]), e.z) \
            for b in batches for e in b]
Exemple #3
0
def batch_train(learning, data, tot_cnt, idx=0):
    assert isinstance(learning, CLearning)
    if len(data) == 0:
        return learning, 0
    sum_loss = 0.0
    if idx == 0:
        learning.model.w = np.zeros(data[0].x.shape)
    opt_update = opt.gd
    data = [
        MData.CExample(e.x, e.y, e.w,
                       learning.random.randint(idx + 1, idx + tot_cnt))
        for e in data if e.z > 0
    ]
    data = sorted(data, key=lambda e: e.z)

    for example in data:
        if example.z == 0:
            continue
        else:
            idx += example.w
        if learning.model.predict(example.x) * example.y <= 0:
            sum_loss += example.w
        opt_update(learning.model, example, idx,
                   learning.parameters.learning_rate)
    sum_loss = sum([
        e.w for e in data if e.z > 0 and learning.model.predict(e.x) * e.y <= 0
    ])
    return learning, sum_loss
Exemple #4
0
def active_MIS_clip_capped_debias_digest(learning, data_batches, sum_loss, config, env):
    m = env.experiment.cnt_log
    n = env.total_size-m
    new_batch = []
    qk = lambda x: 1 if env.policy.Q0(x)<2*n/m else 0
    Mk = 2*(m+n)/(m*env.policy.xi0+n)

    for e in data_batches[-1]:
        e_cp = MData.CExample(e.x, e.y, e.w, e.z)
        if qk(e_cp.x)>0:
            e_cp.z = 1
            e_cp.w = 1
            if test_dis_var(e_cp, learning, env.sum_var, env.model.c0, env.total_size, env.clip_th, env.model.learning_rate):
                learning.cnt_labels += 1
            else:
                e_cp.y = learning.model.predict(e_cp.x)
        else:
            e_cp.z = 0
        new_batch.append(e_cp)
    if learning.cnt_labels<10 or Mk<10:
        env.clip_th = Mk
    else:
        env.clip_th = MLearning.search_clip_threshold(env.qs, (m+n)/m, -n/m, env.model.c0/(m+n), 0)
    env.Qks.append(qk)
    data_batches[-1] = new_batch
    return MIS_transform(data_batches, config, env, env.Qks), data_batches[-1]
Exemple #5
0
def batch_train(learning, data, config, env, idx=0):
    assert isinstance(learning, MLearning.CLearning)
    if len(data)==0:
        return 0

    if idx==0:
        learning.reset(config, env)
        env.idx = 0
    else:
        idx = env.idx
    if config.model.opt_shuffle:        
        data = [MData.CExample(e.x, e.y, e.w, env.experiment.random.randint(idx, idx+len(data))) for e in data if e.z>0]
        data = sorted(data, key=lambda e: e.z)
    
    ws = [np.copy(learning.model.w)]
    env.acc_var_err = []
    for i in range(0,3):
        sum_loss = learning.update_model(data, config, env, env.idx+len(data)*i)
        ws.append(np.copy(learning.model.w))
    sum_loss = sum([e.w for e in data if e.z>0 and e.w<=env.clip_th and learning.model.predict(e.x)*e.y<=0])
    sum_var = sum([e.w*e.w for e in data if e.z>0 and e.w<=env.clip_th and learning.model.predict(e.x)*e.y<=0])
    env.sum_var = sum_var

    env.idx += len(data)*3

    diff_ws_info = ["%.1E"%np.linalg.norm(ws[i]-ws[i+1]) for i in range(0, len(ws)-1)]
    clip_info = "()" if env.clip_th>1/env.policy.xi0 \
                else " (%.2E, %.2f)"%(1/env.clip_th, MLearning.calc_clip_percentage(data, env.clip_th))
    var_err_info = "()"
    if (len(env.acc_var_err)>10):
        err = sorted(env.acc_var_err)
        lerr = len(err)
        p1, p2, p3 = lerr//10, lerr//2, lerr//10*9
        var_err_info = "((%.2f, %.1E), (%.2f, %.1E), (%.2f, %.1E))"%(err[p1][0], err[p1][1], \
                        err[p2][0], err[p2][1], err[p3][0], err[p3][1])
    misc_info = "[(%s), %s, %s]"%(",".join(diff_ws_info), clip_info, var_err_info)
    env.logger.log_misc_info(misc_info)

    return sum_loss
Exemple #6
0
def IDBAL(dataset, logger, data_paras, model_paras):
    assert isinstance(dataset, MData.CDataSet)
    assert isinstance(logger, MLogger.CLogger)
    assert isinstance(data_paras, experiments.CDataParameters)
    assert isinstance(model_paras, experiments.CModelParameters)

    model = MModel.CModel()
    model.w = np.zeros(dataset.all_data[0].x.shape)
    learning = CLearning(model, model_paras)
    learning.random = dataset.random

    last_tot_cnt = idx = int(data_paras.cnt_log * model_paras.init_log_prop)
    tmp_len = 0
    while tmp_len < len(
            dataset.log_data) and dataset.log_data[tmp_len].z < last_tot_cnt:
        tmp_len += 1
    learning, sum_loss = batch_train(learning, dataset.log_data[:tmp_len],
                                     last_tot_cnt)
    opt_idx = int(sum([e.w for e in dataset.log_data[:tmp_len] if e.z > 0]))
    alpha = data_paras.cnt_log * (1.0 - model_paras.init_log_prop) / len(
        dataset.online_data)
    cur_online = 0
    cur_log_cnt = idx
    cur_log_idx = tmp_len
    cur_k = model_paras.batch_sz
    train_data = [
        MData.CExample(e.x, e.y, 1.0 / data_paras.Q0(e.x), e.z)
        for e in dataset.train_data
    ]

    xi = data_paras.xi0
    wmaxk = 1 / xi
    logger.on_start(learning, dataset)
    xis = [xi]
    sum_online_z = 0
    while cur_online < len(dataset.online_data):
        cur_log_batch = []
        while cur_log_idx < len(dataset.log_data) and dataset.log_data[
                cur_log_idx].z <= cur_log_cnt + int(cur_k * alpha):
            e = dataset.log_data[cur_log_idx]
            cur_log_batch.append(e)
            cur_log_idx += 1
        eta = model_paras.learning_rate

        xi_next = min([1.0/e.w for e in train_data[:int((1+alpha)*cur_k)]\
                if test_dis(sum_loss, model_paras.c0*wmaxk, last_tot_cnt, learning.model, e, eta, opt_idx)]+[1])
        wmaxk_next = (alpha + 1) / (alpha * xi_next + 1)
        Qk = lambda x: 1 if data_paras.Q0(x) <= xi + 1 / alpha else 0

        last_log_cnt = cur_log_cnt
        if len(cur_log_batch) != 0:
            cur_log_cnt = cur_log_batch[-1].z
            cur_log_batch = [MData.CExample(e.x, e.y, (1.0+alpha)/(alpha/e.w+Qk(e.x)), e.z) \
                for e in cur_log_batch]

        cur_online_batch = []
        for tmp in dataset.online_data[cur_online:cur_online + cur_k]:
            cur_data = MData.CExample(
                tmp.x, tmp.y,
                (1.0 + alpha) / (alpha * data_paras.Q0(tmp.x) + Qk(tmp.x)), 1)

            if Qk(cur_data.x) == 0:
                cur_data.z = 0
            else:
                sum_online_z += 1
                if test_dis(sum_loss, model_paras.c0 * wmaxk, last_tot_cnt,
                            learning.model, cur_data, eta, opt_idx):
                    learning.cnt_labels += 1
                else:
                    cur_data.y = 1 if learning.model.predict(
                        cur_data.x) >= 0 else -1
            cur_online_batch.append(cur_data)

        last_tot_cnt = int(len(cur_online_batch) * (1 + alpha))
        learning, sum_loss = batch_train(learning, cur_log_batch + cur_online_batch, \
            last_tot_cnt, opt_idx)
        opt_idx = int(opt_idx + sum([e.w for e in cur_log_batch if e.z > 0]) +
                      sum([e.w for e in cur_online_batch if e.z > 0]))
        idx += last_tot_cnt
        cur_online += len(cur_online_batch)
        cur_k = int(cur_k * model_paras.batch_rate)
        xi = xi_next
        wmaxk = wmaxk_next
        xis.append(xi)
    logger.log_misc_info("[" +
                         ",".join([str((sum_online_z + 1) /
                                       (cur_online + 1))] +
                                  [str(x) for x in xis]) + "]")
    logger.on_stop(learning, dataset)
    return learning