コード例 #1
0
ファイル: run_svm.py プロジェクト: apricotxingya/peer_loss
def run(args):
    logger.configure(
        f'logs/{args["dataset"]}/svm/{datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")}'
    )
    logger.info(args)

    pool = mp.Pool(mp.cpu_count())
    svm_arg = args.copy()

    if 'C1' not in svm_arg.keys():
        best_c1 = pool.map(find_best_c1, make_arg_list(svm_arg))
        best_c1 = np.mean(best_c1, 0)
        if 'verbose' in svm_arg.keys() and svm_arg['verbose']:
            for i in range(len(best_c1)):
                logger.record_tabular(f'[C-SVM] C1 = {CLASS_WEIGHTS[i]}',
                                      best_c1[i])
            logger.dump_tabular()
        best_c1 = CLASS_WEIGHTS[best_c1.argmax()]
        logger.record_tabular('[C-SVM] best C1', best_c1)
        svm_arg['C1'] = best_c1

    results_svm = pool.map(run_c_svm, make_arg_list(svm_arg))

    logger.record_tabular('[C-SVM] accuracy mean', np.mean(results_svm))
    logger.record_tabular('[C-SVM] accuracy max', np.max(results_svm))
    logger.record_tabular('[C-SVM] accuracy min', np.min(results_svm))
    logger.record_tabular('[C-SVM] accuracy std', np.std(results_svm))
    logger.dump_tabular()
コード例 #2
0
def run(args):
    logger.configure(
        f'logs/{args["dataset"]}/pam/{datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")}'
    )
    logger.info(args)

    pool = mp.Pool(mp.cpu_count())
    pam_arg = args.copy()

    if 'margin' not in pam_arg.keys():
        best_margin = pool.map(find_best_margin, make_arg_list(pam_arg))
        best_margin = np.mean(best_margin, 0)
        if 'verbose' in pam_arg.keys() and pam_arg['verbose']:
            for i in range(len(best_margin)):
                logger.record_tabular(f'[PAM] margin = {MARGINS[i]}',
                                      best_margin[i])
            logger.dump_tabular()
        best_margin = MARGINS[best_margin.argmax()]
        logger.record_tabular('[PAM] best margin', best_margin)
        pam_arg['margin'] = best_margin

    results_pam = pool.map(run_pam, make_arg_list(pam_arg))

    logger.record_tabular('[PAM] accuracy mean', np.mean(results_pam))
    logger.record_tabular('[PAM] accuracy max', np.max(results_pam))
    logger.record_tabular('[PAM] accuracy min', np.min(results_pam))
    logger.record_tabular('[PAM] accuracy std', np.std(results_pam))
    logger.dump_tabular()
コード例 #3
0
ファイル: run_nn.py プロジェクト: apricotxingya/peer_loss
def find_best_params(kargs):
    args = kargs.copy()
    args['alpha'] = 1.0
    pool = mp.Pool(mp.cpu_count())
    results = np.empty(
        (len(kargs['batchsize']), len(kargs['lr']), len(kargs['hidsize'])))

    if len(kargs['batchsize']) == 1 and len(kargs['lr']) == 1 and len(
            kargs['hidsize']) == 1:
        return {
            'batchsize': kargs['batchsize'][0],
            'batchsize_peer': kargs['batchsize_peer'][0],
            'hidsize': kargs['hidsize'][0],
            'lr': kargs['lr'][0],
        }

    for k, hidsize in enumerate(kargs['hidsize']):
        for i, batchsize in enumerate(kargs['batchsize']):
            for j, lr in enumerate(kargs['lr']):
                args.update({
                    'batchsize': batchsize,
                    'hidsize': hidsize,
                    'lr': lr,
                })
                res = [
                    res['val_acc']
                    for res in pool.map(run_nn_peer, make_arg_list(args))
                ]
                results[i, j, k] = np.mean(res, axis=0)[-1]
                if 'verbose' in args.keys() and args['verbose']:
                    logger.info(f'acc:{results[i, j, k]:4.3}\t'
                                f'hidsize:{str(hidsize):8}\t'
                                f'batchsize:{batchsize:2}\t'
                                f'lr:{lr:6}\t')
    pool.close()
    pool.join()
    best_batchsize, best_lr, best_hidsize = np.unravel_index(
        results.reshape(-1).argmax(), results.shape)
    best_acc = results.max()
    best_batchsize = kargs['batchsize'][best_batchsize]
    best_lr = kargs['lr'][best_lr]
    best_hidsize = kargs['hidsize'][best_hidsize]

    return {
        'batchsize': best_batchsize,
        'hidsize': best_hidsize,
        'lr': best_lr,
        'acc': best_acc,
    }
コード例 #4
0
ファイル: run_nn.py プロジェクト: apricotxingya/peer_loss
def find_best_alpha_val(kargs):
    if len(kargs['alpha']) == 1:
        return {'alpha': kargs['alpha'][0]}
    args = kargs.copy()
    pool = mp.Pool(mp.cpu_count())
    results = []
    for alpha in kargs['alpha']:
        args['alpha'] = alpha
        res = [
            res['val_acc']
            for res in pool.map(run_nn_peer_val, make_arg_list(args))
        ]
        res = np.mean(res, axis=0)[-1]
        if 'verbose' in args.keys() and args['verbose']:
            logger.record_tabular(f'[PEER] alpha = {alpha}', res)
        results.append(res)
    pool.close()
    pool.join()
    logger.dump_tabular()
    best_alpha = kargs['alpha'][np.argmax(results)]
    return {'alpha': best_alpha}
コード例 #5
0
ファイル: run_nn.py プロジェクト: apricotxingya/peer_loss
def run(args):
    prefix = datetime.datetime.now().strftime("%y-%m-%d-%H-%M-%S")
    logger.configure(f'logs/{args["dataset"]}/nn/{prefix}')
    logger.info(args)

    pool = mp.Pool(mp.cpu_count())

    nn_arg = args.copy()
    nn_arg.update(find_best_params(nn_arg))
    nn_arg.update(find_best_alpha_val(nn_arg))
    logger.record_tabular('[PEER] batchsize', nn_arg['batchsize'])
    logger.record_tabular('[PEER] learning rate', nn_arg['lr'])
    logger.record_tabular('[PEER] hidsize', nn_arg['hidsize'])
    logger.record_tabular('[PEER] alpha', nn_arg['alpha'])
    logger.dump_tabular()

    nn_arg['seed'] = 1
    run_nn_dmi(nn_arg)
    results_dmi = pool.map(run_nn_dmi, make_arg_list(nn_arg))
    results_surr = pool.map(run_nn_surr, make_arg_list(nn_arg))
    results_nn = pool.map(run_nn, make_arg_list(nn_arg))
    results_peer = pool.map(run_nn_peer, make_arg_list(nn_arg))
    results_symm = pool.map(run_nn_symm, make_arg_list(nn_arg))
    pool.close()
    pool.join()

    test_acc_bce = [res['val_acc'] for res in results_nn]
    test_acc_peer = [res['val_acc'] for res in results_peer]
    test_acc_surr = [res['val_acc'] for res in results_surr]
    test_acc_symm = [res['val_acc'] for res in results_symm]
    test_acc_dmi = [res['val_acc'] for res in results_dmi]

    plot([
        test_acc_bce, test_acc_peer, test_acc_surr, test_acc_symm, test_acc_dmi
    ], [
        'cross entropy loss', 'peer loss', 'surrogate loss', 'symmtric loss',
        'dmi loss'
    ],
         title='Accuracy During Testing',
         path=f'logs/{args["dataset"]}/nn/{prefix}')

    train_acc_bce = [res['train_acc'] for res in results_nn]
    train_acc_peer = [res['train_acc'] for res in results_peer]
    train_acc_surr = [res['train_acc'] for res in results_surr]
    train_acc_symm = [res['train_acc'] for res in results_symm]
    train_acc_dmi = [res['train_acc'] for res in results_dmi]

    plot([
        train_acc_bce, train_acc_peer, train_acc_surr, train_acc_symm,
        train_acc_dmi
    ], [
        'cross entropy loss', 'peer loss', 'surrogate loss', 'symmetric loss',
        'dmi loss'
    ],
         title='Accuracy During Training',
         path=f'logs/{args["dataset"]}/nn/{prefix}')

    loss_acc_surr = [res['loss'] for res in results_surr]
    loss_acc_bce = [res['loss'] for res in results_nn]
    loss_acc_peer = [res['loss'] for res in results_peer]
    loss_acc_symm = [res['loss'] for res in results_symm]
    loss_acc_dmi = [res['loss'] for res in results_dmi]

    plot([
        loss_acc_bce, loss_acc_peer, loss_acc_surr, loss_acc_symm, loss_acc_dmi
    ], [
        'cross entropy loss', 'peer loss', 'surrogate loss', 'symmetric loss',
        'dmi loss'
    ],
         title='Loss',
         path=f'logs/{args["dataset"]}/nn/{prefix}')

    logger.record_tabular('[NN] with peer loss', np.mean(test_acc_peer, 0)[-1])
    logger.record_tabular('[NN] with surrogate loss',
                          np.mean(test_acc_surr, 0)[-1])
    logger.record_tabular('[NN] with symmetric loss',
                          np.mean(test_acc_symm, 0)[-1])
    logger.record_tabular('[NN] with dmi loss', np.mean(test_acc_dmi, 0)[-1])
    logger.record_tabular(f'[NN] with {args["loss"]} loss',
                          np.mean(test_acc_bce, 0)[-1])
    logger.dump_tabular()