Esempio n. 1
0
def evaluate(tau4vec, pretrained_higgsId, dataloader, conf, device):
    with torch.no_grad():
        logger.info('start eval mode')
        dataloader.dataset.test()
        test_dataset = dataloader.dataset
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=100,
                                     shuffle=False)
        result = make_output_dict()

        pretrain_conf = conf.sub_task_params.tau4vec.pretrain
        loss_func = set_module([nn, MyLoss], pretrain_conf, 'loss_func')
        input_key = pretrain_conf.data.input_key
        target_key = pretrain_conf.data.target_key

        for i, model_name in enumerate(FIRST_MODEL_NAME):
            model = tau4vec[i]
            model.to(device)
            tauvec_ins = Tauvec_BaseTask()
            tauvec_ins.initialize(model=model,
                                  dataloaders=test_dataloader,
                                  input_key=input_key,
                                  target_key=target_key,
                                  criterion=loss_func,
                                  device=device)
            (result['LOSS_1ST'][model_name],
             result['RATIO'][model_name]) = tauvec_ins.predict_model()

        for i, j in product(range(3), range(3)):
            pretrain_conf = conf.sub_task_params.higgsId.pretrain
            model = pretrained_higgsId[
                f'{FIRST_MODEL_NAME[i]}:{SECOND_MODEL_NAME[j]}']
            model.to(device)
            preprocess = tau4vec[i]
            loss_func = set_module([nn, MyLoss], pretrain_conf, 'loss_func')
            from models import MyMetrics
            metrics = set_module([nn, MyMetrics], pretrain_conf, 'metrics')
            activation = pretrain_conf.activation.name
            input_key = pretrain_conf.data.input_key
            target_key = pretrain_conf.data.target_key
            higgsid_ins = HiggsId_BaseTask()
            higgsid_ins.initialize(model=model,
                                   dataloaders=test_dataloader,
                                   input_key=input_key,
                                   target_key=target_key,
                                   criterion=loss_func,
                                   preprocess=preprocess,
                                   device=device,
                                   metrics=metrics,
                                   activation=activation)
            (result['LOSS_2ND']
             [f'{FIRST_MODEL_NAME[i]}:{SECOND_MODEL_NAME[j]}'],
             result['AUC'][f'{FIRST_MODEL_NAME[i]}:{SECOND_MODEL_NAME[j]}']
             ) = higgsid_ins.predict_model()

    logger.info(result)
    return result
def evaluate(model, conf, dataloader, metrics, result, choice=None):
    with torch.no_grad():
        logger.info('start eval mode')
        model.eval()
        dataloader.dataset.test()
        test_dataset = dataloader.dataset
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=100,
                                     shuffle=False)
        A = range(len(conf.sub_task_params.tau4vec.tasks))
        B = range(len(conf.sub_task_params.higgsId.tasks))
        num_name_conb = {
            num: f'{f}_{s}'
            for num, (f, s) in zip(
                product(A, B), product(FIRST_MODEL_NAME, SECOND_MODEL_NAME))
        }
        outputs_data = []
        targets_data = []
        temp_outputs_data = []
        temp_targets_data = []
        for data in test_dataloader:
            inputs = add_device(data['inputs'], DEVICE)
            targets = add_device(data['targets'], DEVICE)
            outputs, now_choice = model(inputs, choice)
            outputs_data.extend(tensor_to_array(outputs[1]))
            targets_data.extend(tensor_to_array(targets[1]))
            temp_outputs_data.extend(tensor_to_array(outputs[0]))
            temp_targets_data.extend(tensor_to_array(targets[0]))
        targets_data = np.array(targets_data)
        outputs_data = np.array(outputs_data)
        auc_score = metrics(targets_data, outputs_data)
        result['AUC'][num_name_conb[choice]].append(auc_score)
        temp_outputs_data = np.array(temp_outputs_data)
        temp_targets_data = np.array(temp_targets_data)
        upper = np.loadtxt('./logs/GP_upper.csv', delimiter=',')
        lower = np.loadtxt('./logs/GP_lower.csv', delimiter=',')

        c_1 = set_module([torch.nn, MyLoss], conf.SPOS_NAS, 'loss_first')
        c_2 = set_module([torch.nn, MyLoss], conf.SPOS_NAS, 'loss_second')
        loss_1st = c_1(torch.tensor(temp_outputs_data),
                       torch.tensor(temp_targets_data))
        loss_2nd = c_2(torch.tensor(outputs_data), torch.tensor(targets_data))

        from models.sub_task import set_phi_within_valid_range

        def reshape3vec(data):
            return data.reshape(-1, 3)

        temp_outputs_data = set_phi_within_valid_range(
            reshape3vec(temp_outputs_data))
        upper = set_phi_within_valid_range(reshape3vec(upper))
        lower = set_phi_within_valid_range(reshape3vec(lower))
        ratio = np.sum(
            np.where(((lower < upper)
                      & (temp_outputs_data < upper)
                      & (lower < temp_outputs_data))
                     | ((upper < lower)
                        & (upper < temp_outputs_data)
                        & (lower < temp_outputs_data))
                     | ((upper < lower)
                        & (temp_outputs_data < upper)
                        & (temp_outputs_data < lower)), True,
                     False).all(axis=1)) / (len(temp_outputs_data))
        only_pt_ratio = np.sum(
            np.where(((lower[:, 0] < upper[:, 0])
                      & (temp_outputs_data[:, 0] < upper[:, 0])
                      & (lower[:, 0] < temp_outputs_data[:, 0]))
                     | ((upper[:, 0] < lower[:, 0])
                        & (upper[:, 0] < temp_outputs_data[:, 0])
                        & (lower[:, 0] < temp_outputs_data[:, 0]))
                     | ((upper[:, 0] < lower[:, 0])
                        & (temp_outputs_data[:, 0] < upper[:, 0])
                        & (temp_outputs_data[:, 0] < lower[:, 0])), True,
                     False)) / (len(temp_outputs_data))

        result['RATIO'][num_name_conb[choice]].append(ratio)
        result['ONLY_PT_RATIO'][num_name_conb[choice]].append(only_pt_ratio)
        result['LOSS_1ST'][num_name_conb[choice]].append(loss_1st.item())
        result['LOSS_2ND'][num_name_conb[choice]].append(loss_2nd.item())
        logger.info(f'[Choice:{now_choice} / auc:{auc_score:.6f}] / ' +
                    f'first_loss: {loss_1st:.6f} / ' +
                    f'ratio: {ratio:.6f} / ' +
                    f'only_pt_ratio: {only_pt_ratio:.6f} / ')

    logger.info(result)
    return result
def main(conf: str, seed: int, gpu_index: int, data_path: str):
    global DEVICE
    conf = load_config(conf)
    if seed is not None:
        conf.seed = seed
    if gpu_index is not None and DEVICE == torch.device('cuda'):
        DEVICE = torch.device(f'cuda:{gpu_index}')
    if data_path is not None:
        conf['dataset']['params']['data_path'] = data_path
    logger.info(DEVICE)
    logger.info(conf)

    set_seed(conf.seed)
    from models import sub_task
    tau4vec = set_task(conf.sub_task_params, 'tau4vec', sub_task)
    logger.info('set_task: tau4vec')
    set_seed(conf.seed)
    higgsId = set_task(conf.sub_task_params, 'higgsId', sub_task)
    logger.info('set_task: higgsId')
    from models import MyDataset
    from models import MyMetrics
    set_seed(conf.seed)
    dataset = set_module([MyDataset], conf, 'dataset')
    set_seed(conf.seed)
    dataloader = DataLoader(dataset, batch_size=100, shuffle=True)
    logger.info('set dataloader')
    # #########################################################################
    # pre-train ###############################################################
    # #########################################################################
    logger.info('----- pretrain[0] start -----')
    pretrain_conf = conf.sub_task_params.tau4vec.pretrain
    for i, sub_model in enumerate(tau4vec):
        logger.info(f'pretrain: [0][{i}]')
        set_seed(conf.seed)
        optimizer = set_module([optim],
                               pretrain_conf,
                               'optimizer',
                               params=sub_model.parameters())
        loss_func = set_module([nn, MyLoss], pretrain_conf, 'loss_func')
        metrics = set_module([MyMetrics], pretrain_conf, 'metrics')
        activation = set_module([nn], pretrain_conf, 'activation')
        input_key = pretrain_conf.data.input_key
        target_key = pretrain_conf.data.target_key
        patience = pretrain_conf.patience
        tau4vec[i] = sub_task.pre_train(epochs=pretrain_conf.epochs,
                                        model=sub_model,
                                        dataloader=dataloader,
                                        optimizer=optimizer,
                                        loss_func=loss_func,
                                        input_key=input_key,
                                        target_key=target_key,
                                        device=DEVICE,
                                        patience=patience,
                                        metrics=metrics,
                                        activation=activation)
    logger.info('----- pretrain[0] end -----')
    logger.info('----- pretrain[1] start -----')
    pretrain_conf = conf.sub_task_params.higgsId.pretrain
    for i, sub_model in enumerate(higgsId):
        logger.info(f'pretrain: [1][{i}]')
        set_seed(conf.seed)
        optimizer = set_module([optim],
                               pretrain_conf,
                               'optimizer',
                               params=sub_model.parameters())
        loss_func = set_module([nn], pretrain_conf, 'loss_func')
        metrics = set_module([MyMetrics], pretrain_conf, 'metrics')
        activation = set_module([nn], pretrain_conf, 'activation')
        input_key = pretrain_conf.data.input_key
        target_key = pretrain_conf.data.target_key
        patience = pretrain_conf.patience
        higgsId[i] = sub_task.pre_train(epochs=pretrain_conf.epochs,
                                        model=sub_model,
                                        dataloader=dataloader,
                                        optimizer=optimizer,
                                        loss_func=loss_func,
                                        input_key=input_key,
                                        target_key=target_key,
                                        device=DEVICE,
                                        patience=patience,
                                        metrics=metrics,
                                        activation=activation)
    logger.info('----- pretrain[1] end -----')

    # #########################################################################
    # #########################################################################
    logger.info('copy the pretrain models')
    pre_trained_tau4vec = set_task(conf.sub_task_params, 'tau4vec', sub_task)
    pre_trained_higgsId = set_task(conf.sub_task_params, 'higgsId', sub_task)
    pre_trained_model = [pre_trained_tau4vec, pre_trained_higgsId]
    task = [tau4vec, higgsId]
    for num_task, sub in enumerate(task):
        for num_model in range(len(sub)):
            pre_trained_model[num_task][num_model].load_state_dict(
                deepcopy(task[num_task][num_model].state_dict()))
    # #########################################################################
    # #########################################################################

    logger.info('----- SPOS-NAS start -----')
    sposnas_conf = conf.SPOS_NAS

    def make_output_dict():
        return {
            'X': [],
            'AUC': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
            'LOSS_1ST': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
            'LOSS_2ND': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
            'RATIO': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
            'ONLY_PT_RATIO': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
        }

    # evaluate only pre-train model
    loss_func = [
        set_module([nn, MyLoss], sposnas_conf, 'loss_first'),
        set_module([nn, MyLoss], sposnas_conf, 'loss_second')
    ]
    loss_weight = [0.5, 0.5]
    metrics = get_module([MyMetrics], 'Calc_Auc')()
    from models.SPOS_NAS import SPOS
    model = SPOS(task=task, loss_func=loss_func, loss_weight=loss_weight)
    model.to(DEVICE)
    logger.info('evaluate only pre-train model')
    dummy = make_output_dict()
    for now_choice in product(range(3), range(3)):
        pre_train_result = evaluate(model, conf, dataloader, metrics, dummy,
                                    now_choice)

    output_dict = make_output_dict()
    X_list = [0.0, 0.1, 0.5]
    for X in (np.array(X_list)).round(10):
        output_dict['X'].append(X)
        logger.info(f'loss_ratio: {X:.6f} (loss_1*X + loss_2*(1-X)) start')
        set_seed(conf.seed)

        def initialize_pretrain_weight():
            logger.info('load pretrain models...')
            for num_task, sub in enumerate(task):
                for num_model in range(len(sub)):
                    task[num_task][num_model].load_state_dict(
                        deepcopy(pre_trained_model[num_task]
                                 [num_model].state_dict()))
            logger.info('load pretrain models done')

        logger.info('set model parameters...')
        loss_func = [
            set_module([nn, MyLoss], sposnas_conf, 'loss_first'),
            set_module([nn, MyLoss], sposnas_conf, 'loss_second')
        ]
        loss_weight = [X, 1. - X]
        metrics = get_module([MyMetrics], 'Calc_Auc')()

        for now_choice in product(range(3), range(3)):
            initialize_pretrain_weight()
            model = SPOS(task=task,
                         loss_func=loss_func,
                         loss_weight=loss_weight)
            model.to(DEVICE)
            optimizer = set_module([optim],
                                   sposnas_conf,
                                   'optimizer',
                                   params=model.parameters())
            scheduler = set_module([optim.lr_scheduler],
                                   sposnas_conf,
                                   'scheduler',
                                   optimizer=optimizer)
            logger.info('set model parameters done')
            logger.info('fit model...')
            model.fit(epochs=sposnas_conf.epochs,
                      dataloader=dataloader,
                      device=DEVICE,
                      optimizer=optimizer,
                      scheduler=scheduler,
                      patience=sposnas_conf.patience,
                      choice=now_choice)
            logger.info('fit model done')
            logger.info('eval model...')
            output_dict = evaluate(model, conf, dataloader, metrics,
                                   output_dict, now_choice)
            logger.info('eval model done')

    logger.info(f'seed: {conf.seed}/ pretrain result: {pre_train_result}')
    logger.info(f'seed: {conf.seed}/ final result: {output_dict}')

    logger.info('all train and eval step are done')

    logger.info('plot results...')
    logger.info('plot auc...')
    import matplotlib.pyplot as plt
    plt.style.use('seaborn-darkgrid')
    import pandas as pd
    df = pd.DataFrame(output_dict['AUC'], index=output_dict['X'])
    df = df.rename(
        columns={
            f'{f}_{s}': f'{f}:{s}'
            for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
        })
    df.plot()
    plt.xlabel('X')
    plt.ylabel('AUC')
    plt.savefig(f'grid_auc_{conf.seed}.png')
    plt.close()

    logger.info('plot loss_2ND...')
    import matplotlib.pyplot as plt
    plt.style.use('seaborn-darkgrid')
    df = pd.DataFrame(output_dict['LOSS_2ND'], index=output_dict['X'])
    df = df.rename(
        columns={
            f'{f}_{s}': f'{f}:{s}'
            for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
        })
    df.plot()
    plt.xlabel('X')
    plt.ylabel('LOSS_2ND')
    plt.savefig(f'grid_loss_2nd_{conf.seed}.png')
    plt.close()

    logger.info('plot loss_1ST...')
    import matplotlib.pyplot as plt
    plt.style.use('seaborn-darkgrid')
    df = pd.DataFrame(output_dict['LOSS_1ST'], index=output_dict['X'])
    df = df.rename(
        columns={
            f'{f}_{s}': f'{f}:{s}'
            for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
        })
    df.plot()
    plt.xlabel('X')
    plt.ylabel('LOSS_1ST')
    plt.savefig(f'grid_loss_1st_{conf.seed}.png')
    plt.close()

    logger.info('plot ratios...')
    import matplotlib.pyplot as plt
    plt.style.use('seaborn-darkgrid')
    df = pd.DataFrame(output_dict['ONLY_PT_RATIO'], index=output_dict['X'])
    df = df.rename(
        columns={
            f'{f}_{s}': f'{f}:{s}'
            for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
        })
    df.plot()
    plt.ylabel('ratio')
    plt.savefig(f'grid_only_pt_ratio_{conf.seed}.png')
    plt.close()
    import matplotlib.pyplot as plt
    plt.style.use('seaborn-darkgrid')
    df = pd.DataFrame(output_dict['RATIO'], index=output_dict['X'])
    df = df.rename(
        columns={
            f'{f}_{s}': f'{f}:{s}'
            for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
        })
    df.plot()
    plt.ylabel('ratio')
    plt.savefig(f'grid_ratio_{conf.seed}.png')
    plt.close()
    logger.info('plot results done')
Esempio n. 4
0
def main(conf: str, seed: int, gpu_index: int, data_path: str):
    global device
    conf = load_config(conf)
    if seed:
        conf.seed = seed
    if gpu_index and device == torch.device('cuda'):
        device = torch.device(f'cuda:{gpu_index}')
    if data_path is not None:
        conf['dataset']['params']['data_path'] = data_path
    logger.info(device)
    logger.info(conf)

    results = {}
    for add_seed in range(10):
        conf.seed += 1
        set_seed(conf.seed)
        from models import sub_task
        tau4vec = set_task(conf.sub_task_params, 'tau4vec', sub_task)
        logger.info('set_task: tau4vec')
        set_seed(conf.seed)
        higgsId = set_task(conf.sub_task_params, 'higgsId', sub_task)
        logger.info('set_task: higgsId')
        # #####################################################################
        # #####################################################################
        logger.info('copy the pretrain models')
        pre_model = [tau4vec, higgsId]
        task = [tau4vec, higgsId]
        for num_task, sub in enumerate(task):
            for num_model in range(len(sub)):
                pre_model[num_task][num_model].load_state_dict(
                    task[num_task][num_model].state_dict())
        # #####################################################################
        # #####################################################################
        from models import MyDataset
        from models import MyMetrics
        set_seed(conf.seed)
        dataset = set_module([MyDataset], conf, 'dataset')
        set_seed(conf.seed)
        dataloader = DataLoader(dataset, batch_size=100, shuffle=True)
        logger.info('set dataloader')
        # #####################################################################
        # pre-train ###########################################################
        # #####################################################################
        logger.info('----- pretrain[0] start -----')
        pretrain_conf = conf.sub_task_params.tau4vec.pretrain
        for i, sub_model in enumerate(tau4vec):
            logger.info(f'pretrain: [0][{i}]')
            set_seed(conf.seed)
            loss_func = set_module([nn, MyLoss], pretrain_conf, 'loss_func')
            input_key = pretrain_conf.data.input_key
            target_key = pretrain_conf.data.target_key
            optimizer = pretrain_conf.optimizer.name
            epochs = pretrain_conf.epochs
            patience = pretrain_conf.patience
            lr = pretrain_conf.optimizer.params.lr
            tauvec_base = Tauvec_BaseTask()
            model = sub_model.to(device)
            tauvec_base.initialize(model=model,
                                   dataloaders=dataloader,
                                   input_key=input_key,
                                   target_key=target_key,
                                   criterion=loss_func,
                                   device=device,
                                   optimizer=optimizer,
                                   hp_epochs=epochs,
                                   lr=lr,
                                   patience=patience)
            tauvec_base.execute()
            tau4vec[i] = tauvec_base.get_model()
        logger.info('----- pretrain[0] end -----')

        logger.info('----- pretrain[1] start -----')
        pretrain_conf = conf.sub_task_params.higgsId.pretrain
        pretrained_higgsId = {}
        for i, j in product(range(3), range(3)):
            logger.info(f'pretrain: [{i}][{j}]')
            set_seed(conf.seed)
            model = higgsId[j]
            model.load_state_dict(pre_model[1][j].state_dict())
            model.to(device)
            loss_func = set_module([nn, MyLoss], pretrain_conf, 'loss_func')
            metrics = set_module([nn, MyMetrics], pretrain_conf, 'metrics')
            activation = pretrain_conf.activation.name
            input_key = pretrain_conf.data.input_key
            target_key = pretrain_conf.data.target_key
            optimizer = pretrain_conf.optimizer.name
            epochs = pretrain_conf.epochs
            patience = pretrain_conf.patience
            lr = pretrain_conf.optimizer.params.lr
            higgsid_base = HiggsId_BaseTask()
            higgsid_base.initialize(model=model,
                                    dataloaders=dataloader,
                                    input_key=input_key,
                                    target_key=target_key,
                                    criterion=loss_func,
                                    preprocess=tau4vec[i],
                                    device=device,
                                    optimizer=optimizer,
                                    metrics=metrics,
                                    activation=activation,
                                    hp_epochs=epochs,
                                    lr=lr,
                                    patience=patience)
            higgsid_base.execute()
            pretrained_higgsId[
                f'{FIRST_MODEL_NAME[i]}:{SECOND_MODEL_NAME[j]}'] = higgsid_base.get_model(
                )
        logger.info('----- pretrain[1] end -----')
        results[conf.seed] = evaluate(tau4vec, pretrained_higgsId, dataloader,
                                      conf, device)
        logger.info(results)

    logger.info(results)
    logger.info('all train and eval step are done')
Esempio n. 5
0
def main(conf: str, seed: int, gpu_index: int, data_path: str, event: int):
    global DEVICE, FIRST_MODEL_NAME, SECOND_MODEL_NAME, MODELNAME_CHOICE_INDEX
    conf = load_config(conf)
    if seed is not None:
        conf.seed = seed
    if gpu_index is not None and DEVICE == torch.device('cuda'):
        DEVICE = torch.device(f'cuda:{gpu_index}')
    if data_path is not None:
        conf['dataset']['params']['data_path'] = data_path
    if event is not None:
        conf['dataset']['params']['max_events'] = event
    logger.info(DEVICE)
    logger.info(conf)

    FIRST_MODEL_NAME = [
        i['name'].split('_')[-1][:-4] + f'-{num}'
        for num, i in enumerate(conf.sub_task_params.tau4vec.tasks)
    ]
    SECOND_MODEL_NAME = [
        i['name'].split('_')[-1][:-4] + f'-{num}'
        for num, i in enumerate(conf.sub_task_params.higgsId.tasks)
    ]
    MODELNAME_CHOICE_INDEX = {
        f'{n1}_{n2}': v
        for (n1, n2), v in zip(
            product(FIRST_MODEL_NAME, SECOND_MODEL_NAME),
            product(range(len(FIRST_MODEL_NAME)), range(len(
                SECOND_MODEL_NAME))))
    }

    set_seed(conf.seed)
    from models import sub_task
    tau4vec = set_task(conf.sub_task_params, 'tau4vec', sub_task)
    logger.info('set_task: tau4vec')
    set_seed(conf.seed)
    higgsId = set_task(conf.sub_task_params, 'higgsId', sub_task)
    logger.info('set_task: higgsId')
    from models import MyDataset
    from models import MyMetrics
    set_seed(conf.seed)
    dataset = set_module([MyDataset], conf, 'dataset')
    set_seed(conf.seed)
    dataloader = DataLoader(dataset, batch_size=100, shuffle=True)
    logger.info('set dataloader')
    # #########################################################################
    # pre-train ###############################################################
    # #########################################################################
    logger.info('----- pretrain[0] start -----')
    pretrain_conf = conf.sub_task_params.tau4vec.pretrain
    for i, sub_model in enumerate(tau4vec):
        logger.info(f'pretrain: [0][{i}]')
        set_seed(conf.seed)
        optimizer = set_module([optim],
                               pretrain_conf,
                               'optimizer',
                               params=sub_model.parameters())
        loss_func = set_module([nn, MyLoss], pretrain_conf, 'loss_func')
        metrics = set_module([MyMetrics], pretrain_conf, 'metrics')
        activation = set_module([nn], pretrain_conf, 'activation')
        input_key = pretrain_conf.data.input_key
        target_key = pretrain_conf.data.target_key
        patience = pretrain_conf.patience
        tau4vec[i] = sub_task.pre_train(epochs=pretrain_conf.epochs,
                                        model=sub_model,
                                        dataloader=dataloader,
                                        optimizer=optimizer,
                                        loss_func=loss_func,
                                        input_key=input_key,
                                        target_key=target_key,
                                        device=DEVICE,
                                        patience=patience,
                                        metrics=metrics,
                                        activation=activation)
    logger.info('----- pretrain[0] end -----')
    logger.info('----- pretrain[1] start -----')
    pretrain_conf = conf.sub_task_params.higgsId.pretrain
    for i, sub_model in enumerate(higgsId):
        logger.info(f'pretrain: [1][{i}]')
        set_seed(conf.seed)
        optimizer = set_module([optim],
                               pretrain_conf,
                               'optimizer',
                               params=sub_model.parameters())
        loss_func = set_module([nn, MyLoss], pretrain_conf, 'loss_func')
        metrics = set_module([MyMetrics], pretrain_conf, 'metrics')
        activation = set_module([nn], pretrain_conf, 'activation')
        input_key = pretrain_conf.data.input_key
        target_key = pretrain_conf.data.target_key
        patience = pretrain_conf.patience
        higgsId[i] = sub_task.pre_train(epochs=pretrain_conf.epochs,
                                        model=sub_model,
                                        dataloader=dataloader,
                                        optimizer=optimizer,
                                        loss_func=loss_func,
                                        input_key=input_key,
                                        target_key=target_key,
                                        device=DEVICE,
                                        patience=patience,
                                        metrics=metrics,
                                        activation=activation)
    logger.info('----- pretrain[1] end -----')

    # #########################################################################
    # #########################################################################
    logger.info('copy the pretrain models')
    pre_trained_tau4vec = set_task(conf.sub_task_params, 'tau4vec', sub_task)
    pre_trained_higgsId = set_task(conf.sub_task_params, 'higgsId', sub_task)
    pre_trained_model = [pre_trained_tau4vec, pre_trained_higgsId]
    task = [tau4vec, higgsId]
    for num_task, sub in enumerate(task):
        for num_model in range(len(sub)):
            pre_trained_model[num_task][num_model].load_state_dict(
                deepcopy(task[num_task][num_model].state_dict()))
    # #########################################################################
    # #########################################################################

    logger.info('----- SPOS-NAS start -----')
    sposnas_conf = conf.SPOS_NAS

    def make_output_dict():
        return {
            'X': [],
            'AUC': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
            'LOSS_1ST': {f: []
                         for f in FIRST_MODEL_NAME},
            'LOSS_2ND': {
                f'{f}_{s}': []
                for f, s in product(FIRST_MODEL_NAME, SECOND_MODEL_NAME)
            },
            'RATIO': {f: []
                      for f in FIRST_MODEL_NAME},
            'ONLY_PT_RATIO': {f: []
                              for f in FIRST_MODEL_NAME},
        }

    # evaluate only pre-train model
    loss_func = [
        set_module([nn, MyLoss], sposnas_conf, 'loss_first'),
        set_module([nn, MyLoss], sposnas_conf, 'loss_second')
    ]
    loss_weight = [0.5, 0.5]
    metrics = get_module([MyMetrics], 'Calc_Auc')()
    from models.SPOS_NAS import SPOS
    model = SPOS(task=task, loss_func=loss_func, loss_weight=loss_weight)
    model.to(DEVICE)
    logger.info('evaluate only pre-train model')
    dummy = make_output_dict()
    evaluate(model, conf, dataloader, metrics, dummy)

    output_dict = make_output_dict()
    X_list = [i for i in range(11)]
    X_list[1:1] = [0.01, 0.1]
    X_list[-1:-1] = [9.9, 9.99]
    for X in (np.array(X_list) * 0.1).round(10):
        output_dict['X'].append(X)
        logger.info(f'loss_ratio: {X:.6f} (loss_1*X + loss_2*(1-X)) start')
        set_seed(conf.seed)
        logger.info('load pretrain models...')
        for num_task, sub in enumerate(task):
            for num_model in range(len(sub)):
                task[num_task][num_model].load_state_dict(
                    deepcopy(
                        pre_trained_model[num_task][num_model].state_dict()))
        logger.info('load pretrain models done')
        logger.info('set model parameters...')
        loss_func = [
            set_module([nn, MyLoss], sposnas_conf, 'loss_first'),
            set_module([nn, MyLoss], sposnas_conf, 'loss_second')
        ]
        loss_weight = [X, 1. - X]
        metrics = get_module([MyMetrics], 'Calc_Auc')()

        model = SPOS(task=task,
                     loss_func=loss_func,
                     loss_weight=loss_weight,
                     save_dir='SPOS')
        model.to(DEVICE)
        optimizer = set_module([optim],
                               sposnas_conf,
                               'optimizer',
                               params=model.parameters())
        scheduler = set_module([optim.lr_scheduler],
                               sposnas_conf,
                               'scheduler',
                               optimizer=optimizer)
        logger.info('set model parameters done')
        logger.info('fit model...')
        model.fit(epochs=sposnas_conf.epochs,
                  dataloader=dataloader,
                  device=DEVICE,
                  optimizer=optimizer,
                  scheduler=scheduler,
                  patience=sposnas_conf.patience)
        logger.info('fit model done')
        logger.info('eval model...')
        output_dict = evaluate(model, conf, dataloader, metrics, output_dict)
        logger.info('eval model done')

        set_seed(conf.seed)
        logger.info('re-train start')
        selected_model, _ = max(
            {k: v[-1]
             for k, v in output_dict['AUC'].items()}.items(),
            key=lambda x: x[1])
        logger.info(f'selected_model: {selected_model}')
        selected_choice = MODELNAME_CHOICE_INDEX[selected_model]
        model.fit(epochs=sposnas_conf.epochs,
                  dataloader=dataloader,
                  device=DEVICE,
                  optimizer=optimizer,
                  scheduler=scheduler,
                  patience=sposnas_conf.patience,
                  choice=selected_choice)
        logger.info('re-train done')
        dummy = None
        dummy = make_output_dict()
        dummy = evaluate(model, conf, dataloader, metrics, dummy)

        def result_parser(res, selected_model, seed, X):
            AUC = res['AUC'][selected_model][0]
            LOSS_1ST = res['LOSS_1ST'][selected_model.split('_')[0]][0]
            LOSS_2ND = res['LOSS_2ND'][selected_model][0]
            RATIO = res['RATIO'][selected_model.split('_')[0]][0]
            ONLY_PT_RATIO = res['ONLY_PT_RATIO'][selected_model.split('_')
                                                 [0]][0]
            target_result = dict(seed=seed,
                                 X=X,
                                 AUC=AUC,
                                 LOSS_1ST=LOSS_1ST,
                                 LOSS_2ND=LOSS_2ND,
                                 RATIO=RATIO,
                                 ONLY_PT_RATIO=ONLY_PT_RATIO)
            logger.info(f're-train results: {target_result}')

        result_parser(dummy, selected_model, conf.seed, X)

    logger.info('all train and eval step are done')
    logger.info('plot results done')
Esempio n. 6
0
def evaluate(model, conf, dataloader, metrics, result, is_gp_3dim):
    is_gp_check = False
    if conf['dataset']['params']['max_events'] == 50000:
        is_gp_check = True
    with torch.no_grad():
        logger.info('start eval mode')
        model.eval()
        dataloader.dataset.test()
        test_dataset = dataloader.dataset
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=100,
                                     shuffle=False)
        A = range(len(FIRST_MODEL_NAME))
        B = range(len(SECOND_MODEL_NAME))
        count = 0
        num_name_conb = {
            num: f'{f}_{s}' for num, (f, s) in enumerate(
                product(
                    FIRST_MODEL_NAME, SECOND_MODEL_NAME
                )
            )
        }
        num_name_1st = {
            num: f for num, (f, s) in enumerate(
                product(
                    FIRST_MODEL_NAME, SECOND_MODEL_NAME
                )
            )
        }
        for choice in product(A, B):
            outputs_data = []
            targets_data = []
            temp_outputs_data = []
            temp_targets_data = []
            for data in test_dataloader:
                inputs = add_device(data['inputs'], DEVICE)
                targets = add_device(data['targets'], DEVICE)
                outputs, choice = model(inputs, choice)
                outputs_data.extend(tensor_to_array(outputs[1]))
                targets_data.extend(tensor_to_array(targets[1]))
                temp_outputs_data.extend(tensor_to_array(outputs[0]))
                temp_targets_data.extend(tensor_to_array(targets[0]))
            targets_data = np.array(targets_data)
            outputs_data = np.array(outputs_data)
            auc_score = metrics(targets_data, outputs_data)
            result['AUC'][num_name_conb[count]].append(auc_score)
            temp_outputs_data = np.array(temp_outputs_data)
            temp_targets_data = np.array(temp_targets_data)
            upper = np.loadtxt('./logs/GP_upper.csv', delimiter=',')
            lower = np.loadtxt('./logs/GP_lower.csv', delimiter=',')

            c_1 = set_module([torch.nn, MyLoss], conf.SPOS_NAS, 'loss_first')
            c_2 = set_module([torch.nn, MyLoss], conf.SPOS_NAS, 'loss_second')
            loss_1st = c_1(torch.tensor(temp_outputs_data),
                           torch.tensor(temp_targets_data))
            loss_2nd = c_2(torch.tensor(outputs_data),
                           torch.tensor(targets_data))

            if is_gp_check:
                from models.sub_task import set_phi_within_valid_range

                def reshape3vec(data):
                    return data.reshape(-1, 3)

                temp_outputs_data = set_phi_within_valid_range(
                    reshape3vec(temp_outputs_data)
                )
                upper = set_phi_within_valid_range(
                    reshape3vec(upper)
                )
                lower = set_phi_within_valid_range(
                    reshape3vec(lower)
                )
                if not is_gp_3dim:
                    temp_outputs_data = temp_outputs_data.reshape(-1, 6)
                    upper = upper.reshape(-1, 6)
                    lower = lower.reshape(-1, 6)

                query = (
                    ((lower < upper)
                     & (temp_outputs_data < upper)
                     & (lower < temp_outputs_data))
                    | ((upper < lower)
                       & (upper < temp_outputs_data)
                       & (lower < temp_outputs_data))
                    | ((upper < lower)
                       & (temp_outputs_data < upper)
                       & (temp_outputs_data < lower))
                )
                ratio = np.sum(
                    np.where(query, True, False).all(axis=1)
                )/(len(temp_outputs_data))
                result['RATIO'][num_name_1st[count]] = [ratio]

                query = (
                    ((lower[:, 0] < upper[:, 0])
                     & (temp_outputs_data[:, 0] < upper[:, 0])
                     & (lower[:, 0] < temp_outputs_data[:, 0]))
                    | ((upper[:, 0] < lower[:, 0])
                       & (upper[:, 0] < temp_outputs_data[:, 0])
                       & (lower[:, 0] < temp_outputs_data[:, 0]))
                    | ((upper[:, 0] < lower[:, 0])
                       & (temp_outputs_data[:, 0] < upper[:, 0])
                       & (temp_outputs_data[:, 0] < lower[:, 0]))
                )
                if not is_gp_3dim:
                    query = (
                        ((lower[:, [0, 3]] < upper[:, [0, 3]])
                         & (temp_outputs_data[:, [0, 3]] < upper[:, [0, 3]])
                         & (lower[:, [0, 3]] < temp_outputs_data[:, [0, 3]]))
                        | ((upper[:, [0, 3]] < lower[:, [0, 3]])
                           & (upper[:, [0, 3]] < temp_outputs_data[:, [0, 3]])
                           & (lower[:, [0, 3]] < temp_outputs_data[:, [0, 3]]))
                        | ((upper[:, [0, 3]] < lower[:, [0, 3]])
                           & (temp_outputs_data[:, [0, 3]] < upper[:, [0, 3]])
                           & (temp_outputs_data[:, [0, 3]] < lower[:, [0, 3]]))
                    )
                only_pt_ratio = np.sum(
                    np.where(query, True, False)
                )/(len(temp_outputs_data))
                result['ONLY_PT_RATIO'][num_name_1st[count]] = [only_pt_ratio]
            else:
                ratio = -1.0
                only_pt_ratio = -1.0
                result['RATIO'][num_name_1st[count]] = [ratio]
                result['ONLY_PT_RATIO'][num_name_1st[count]] = [only_pt_ratio]

            result['LOSS_1ST'][num_name_1st[count]] = [loss_1st.item()]

            result['LOSS_2ND'][num_name_conb[count]].append(loss_2nd.item())
            logger.info(f'[Choice:{choice} / auc:{auc_score:.6f}] / ' +
                        f'first_loss: {loss_1st:.6f} / ' +
                        f'ratio: {ratio:.6f} / ' +
                        f'only_pt_ratio: {only_pt_ratio:.6f}')
            count += 1

    logger.info(result)
    return result
Esempio n. 7
0
def main(
        conf: str,
        seed: int,
        gpu_index: int,
        data_path: str,
        event: int,
        weight: float,
        n_times_model: int,
        prefix: str,
        is_gp_3dim: bool
):
    global DEVICE, FIRST_MODEL_NAME, SECOND_MODEL_NAME, MODELNAME_CHOICE_INDEX
    start = time.time()
    conf = load_config(conf)
    if seed is not None:
        conf.seed = seed
    if gpu_index is not None and DEVICE == torch.device('cuda'):
        # WARNING: Enable gp_re_index dict in gpu02 only
        gpu_re_index = {0: 0, 1: 1, 2: 4, 3: 5, 4: 2, 5: 3, 6: 6, 7: 7}
        gpu_index = gpu_re_index[gpu_index]
        DEVICE = torch.device(f'cuda:{gpu_index}')
    if data_path is not None:
        conf['dataset']['params']['data_path'] = data_path
    if event is not None:
        conf['dataset']['params']['max_events'] = event
    conf['is_gp_3dim'] = is_gp_3dim
    logger.info(DEVICE)
    logger.info(conf)

    model_confs_tau4vec = conf.sub_task_params.tau4vec
    model_confs_tau4vec['tasks'] = model_confs_tau4vec['tasks'] * n_times_model
    model_confs_higgsId = conf.sub_task_params.higgsId
    model_confs_higgsId['tasks'] = model_confs_higgsId['tasks'] * n_times_model
    sub_models_conf = {
        'tau4vec': model_confs_tau4vec,
        'higgsId': model_confs_higgsId
    }
    FIRST_MODEL_NAME = [
        i['name'].split('_')[-1][:-4] + f'-{num}'
        for num, i in enumerate(model_confs_tau4vec['tasks'])
    ]
    SECOND_MODEL_NAME = [
        i['name'].split('_')[-1][:-4] + f'-{num}'
        for num, i in enumerate(model_confs_higgsId['tasks'])
    ]
    MODELNAME_CHOICE_INDEX = {
        f'{n1}_{n2}': v
        for (n1, n2), v in zip(
                product(FIRST_MODEL_NAME,
                        SECOND_MODEL_NAME),
                product(range(len(FIRST_MODEL_NAME)),
                        range(len(SECOND_MODEL_NAME)))
        )
    }

    set_seed(conf.seed)
    from models import sub_task
    tau4vec = set_task(sub_models_conf, 'tau4vec', sub_task)
    logger.info('set_task: tau4vec')
    set_seed(conf.seed)
    higgsId = set_task(sub_models_conf, 'higgsId', sub_task)
    logger.info('set_task: higgsId')
    from models import MyDataset
    from models import MyMetrics
    set_seed(conf.seed)
    dataset = set_module([MyDataset], conf, 'dataset')
    set_seed(conf.seed)
    dataloader = DataLoader(dataset,
                            batch_size=100,
                            shuffle=True)
    logger.info('set dataloader')
    # #########################################################################
    # pre-train ###############################################################
    # #########################################################################
    logger.info('----- pretrain[0] start -----')
    pretrain_conf = model_confs_tau4vec['pretrain']
    for i, sub_model in enumerate(tau4vec):
        logger.info(f'pretrain: [0][{i}]')
        set_seed(conf.seed)
        optimizer = set_module([optim],
                               pretrain_conf,
                               'optimizer',
                               params=sub_model.parameters())
        loss_func = set_module([nn, MyLoss], pretrain_conf, 'loss_func')
        metrics = set_module([MyMetrics], pretrain_conf, 'metrics')
        activation = set_module([nn], pretrain_conf, 'activation')
        input_key = pretrain_conf['data']['input_key']
        target_key = pretrain_conf['data']['target_key']
        patience = pretrain_conf['patience']
        tau4vec[i] = sub_task.pre_train(epochs=pretrain_conf['epochs'],
                                        model=sub_model,
                                        dataloader=dataloader,
                                        optimizer=optimizer,
                                        loss_func=loss_func,
                                        input_key=input_key,
                                        target_key=target_key,
                                        device=DEVICE,
                                        patience=patience,
                                        metrics=metrics,
                                        activation=activation)
    logger.info('----- pretrain[0] end -----')
    logger.info('----- pretrain[1] start -----')
    pretrain_conf = conf.sub_task_params.higgsId.pretrain
    for i, sub_model in enumerate(higgsId):
        logger.info(f'pretrain: [1][{i}]')
        set_seed(conf.seed)
        optimizer = set_module([optim],
                               pretrain_conf,
                               'optimizer',
                               params=sub_model.parameters())
        loss_func = set_module([nn, MyLoss], pretrain_conf, 'loss_func')
        metrics = set_module([MyMetrics], pretrain_conf, 'metrics')
        activation = set_module([nn], pretrain_conf, 'activation')
        input_key = pretrain_conf['data']['input_key']
        target_key = pretrain_conf['data']['target_key']
        patience = pretrain_conf['patience']
        higgsId[i] = sub_task.pre_train(epochs=pretrain_conf['epochs'],
                                        model=sub_model,
                                        dataloader=dataloader,
                                        optimizer=optimizer,
                                        loss_func=loss_func,
                                        input_key=input_key,
                                        target_key=target_key,
                                        device=DEVICE,
                                        patience=patience,
                                        metrics=metrics,
                                        activation=activation)
    logger.info('----- pretrain[1] end -----')

    # #########################################################################
    # #########################################################################
    logger.info('copy the pretrain models')
    pre_trained_tau4vec = set_task(sub_models_conf, 'tau4vec', sub_task)
    pre_trained_higgsId = set_task(sub_models_conf, 'higgsId', sub_task)
    pre_trained_model = [pre_trained_tau4vec, pre_trained_higgsId]
    task = [tau4vec, higgsId]
    for num_task, sub in enumerate(task):
        for num_model in range(len(sub)):
            pre_trained_model[num_task][num_model].load_state_dict(
                deepcopy(task[num_task][num_model].state_dict())
            )
    # #########################################################################
    # #########################################################################

    logger.info('----- SPOS-NAS start -----')
    sposnas_conf = conf.SPOS_NAS

    def make_output_dict():
        return {
            'X': [],
            'AUC': {
                f'{f}_{s}': [] for f, s in product(
                    FIRST_MODEL_NAME, SECOND_MODEL_NAME
                )
            },
            'LOSS_1ST': {
                f: [] for f in FIRST_MODEL_NAME
            },
            'LOSS_2ND': {
                f'{f}_{s}': [] for f, s in product(
                    FIRST_MODEL_NAME, SECOND_MODEL_NAME
                )
            },
            'RATIO': {
                f: [] for f in FIRST_MODEL_NAME
            },
            'ONLY_PT_RATIO': {
                f: [] for f in FIRST_MODEL_NAME
            },
        }

    # SPOS-NAS
    loss_func = [set_module([nn, MyLoss], sposnas_conf, 'loss_first'),
                 set_module([nn, MyLoss], sposnas_conf, 'loss_second')]
    loss_weight = [weight, 1. - weight]
    metrics = get_module([MyMetrics], 'Calc_Auc')()

    model = SPOS(task=task, loss_func=loss_func,
                 loss_weight=loss_weight)
    model.to(DEVICE)

    output_dict = make_output_dict()
    output_dict['X'].append(weight)
    logger.info(f'loss_ratio: {weight:.6f} (loss_1*X + loss_2*(1-X)) start')
    set_seed(conf.seed)
    logger.info('load pretrain models...')
    for num_task, sub in enumerate(task):
        for num_model in range(len(sub)):
            task[num_task][num_model].load_state_dict(
                deepcopy(pre_trained_model[num_task][num_model].state_dict())
            )
    logger.info('load pretrain models done')
    logger.info('set model parameters...')

    optimizer = set_module([optim],
                           sposnas_conf,
                           'optimizer',
                           params=model.parameters())
    scheduler = set_module([optim.lr_scheduler],
                           sposnas_conf,
                           'scheduler',
                           optimizer=optimizer)
    logger.info('set model parameters done')
    logger.info('fit model...')
    model.fit(epochs=sposnas_conf.epochs,
              dataloader=dataloader,
              device=DEVICE,
              optimizer=optimizer,
              scheduler=scheduler,
              patience=sposnas_conf.patience)
    logger.info('fit model done')
    logger.info('eval model...')
    output_dict = evaluate(model, conf, dataloader, metrics, output_dict, is_gp_3dim)
    logger.info('eval model done')

    set_seed(conf.seed)
    logger.info('re-train start')
    selected_model, _ = max(
        {
            k: v[-1] for k, v in output_dict['AUC'].items()
        }.items(), key=lambda x: x[1]
    )
    logger.info(f'selected_model: {selected_model}')
    selected_choice = MODELNAME_CHOICE_INDEX[selected_model]
    model.fit(epochs=sposnas_conf.epochs,
              dataloader=dataloader,
              device=DEVICE,
              optimizer=optimizer,
              scheduler=scheduler,
              patience=sposnas_conf.patience,
              choice=selected_choice)
    logger.info('re-train done')

    elapsed_time = time.time() - start
    events = conf.dataset.params.max_events * 2
    if prefix:
        output_file = (f'result.SPOS_NAS-{prefix}_' +
                       f's{seed}_w{weight}_e{events}_' +
                       f'n{n_times_model*3}.json')
    else:
        output_file = (f'result.SPOS_NAS-s{seed}_w{weight}_e{events}_' +
                       f'n{n_times_model*3}.json')

    with open(os.path.join('logs', output_file), 'w') as fo:
        json.dump(
            [{
                'agent': 'SPOS-NAS',
                'tasks': {
                    'tau4vec': {
                        'weight': weight,
                        'loss_test': -1,
                        'mse_test': -1,
                        'ratio_2sigma_GP_test': -1,
                        'models': FIRST_MODEL_NAME,
                        'model_selected': selected_model.split('_')[0]
                    },
                    'higgsId': {
                        'weight': 1. - weight,
                        'loss_test': -1,
                        'auc_test': -1,
                        'models': SECOND_MODEL_NAME,
                        'model_selected': selected_model.split('_')[1]
                    }
                },
                'loss_test': -1,
                'nevents': conf.dataset.params.max_events * 2,
                'seed': conf.seed,
                'walltime': elapsed_time
            }],
            fo,
            indent=2
        )

    dummy = make_output_dict()
    dummy = evaluate(model, conf, dataloader, metrics, dummy, is_gp_3dim)

    def result_parser(res, selected_model, seed, time):
        AUC = res['AUC'][selected_model][0]
        LOSS_1ST = res['LOSS_1ST'][selected_model.split('_')[0]][0]
        LOSS_2ND = res['LOSS_2ND'][selected_model][0]
        RATIO = res['RATIO'][selected_model.split('_')[0]][0]
        ONLY_PT_RATIO = res[
            'ONLY_PT_RATIO'
        ][selected_model.split('_')[0]][0]
        target_result = dict(
            seed=seed,
            AUC=AUC,
            LOSS_1ST=LOSS_1ST,
            LOSS_2ND=LOSS_2ND,
            RATIO=RATIO,
            ONLY_PT_RATIO=ONLY_PT_RATIO
        )
        logger.info(f're-train results: {target_result}')
        return {
            'agent': 'SPOS-NAS',
            'tasks': {
                'tau4vec': {
                    'weight': weight,
                    'loss_test': target_result['LOSS_1ST'],
                    'mse_test': target_result['LOSS_1ST'] * 10000,
                    'ratio_2sigma_GP_test': target_result['RATIO'],
                    'models': FIRST_MODEL_NAME,
                    'model_selected': selected_model.split('_')[0]
                },
                'higgsId': {
                    'weight': 1. - weight,
                    'loss_test': target_result['LOSS_2ND'],
                    'auc_test': target_result['AUC'],
                    'models': SECOND_MODEL_NAME,
                    'model_selected': selected_model.split('_')[1]
                }
            },
            'loss_test': (weight * target_result['LOSS_1ST']
                          + (1. - weight) * target_result['LOSS_2ND']),
            'nevents': conf.dataset.params.max_events * 2,
            'seed': seed,
            'walltime': time
        }

    with open(os.path.join('logs', output_file), 'w') as fo:
        json.dump(
            [result_parser(dummy, selected_model, conf.seed, elapsed_time)],
            fo,
            indent=2
        )

    logger.info('all train and eval step are done')