コード例 #1
0
ファイル: main.py プロジェクト: wqwqqe/KRED
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--rootpath', type=str, default='/mnt/KRED_publish/data/', help='root path of data')

    ##turning paras
    parser.add_argument('--learning_rate', action='store_true', default=0.0001, help='learning rate')
    parser.add_argument('--epoch', action='store_true', default=100, help='epoch num')
    parser.add_argument('--batch_size', action='store_true', default=16, help='batch size')
    parser.add_argument('--l2_regular', action='store_true', default=0.00001, help='l2 regular')

    ##task specific parameter
    parser.add_argument('--training_type', action='store_true', default="single_task", help='single_task training or multi-task training')
    parser.add_argument('--task', action='store_true', default="user2item", help='task types: user2item, item2item, vert_classify, pop_predict, local_news')

    parser.add_argument('--news_entity_num', action='store_true', default=20, help='fix a news entity num to news_entity_num')
    parser.add_argument('--entity_neighbor_num', action='store_true', default=20, help='nerighbor num for a entity')
    parser.add_argument('--user_his_num', action='store_true', default=20, help='user history num')
    parser.add_argument('--negative_num', action='store_true', default=6, help='1 postive and negative_num-1 negative in training set')
    parser.add_argument('--smooth_lamda', action='store_true', default=10, help='smooth_lamda in softmax in loss function')

    parser.add_argument('--embedding_dim', action='store_true', default=90, help='embedding dim for enity_embedding dv uv')
    parser.add_argument('--layer_dim', action='store_true', default=128, help='layer dim')

    parser.add_argument('--logdir', action='store_true', default="EXP_num", help='the dir for save predict results')


    args = parser.parse_args()

    data = load_data(args)

    train_test(args, data)
コード例 #2
0
def main():

    #loads input file into a dataframe
    test = preprocess.load_input(sys.argv[1])

    #loads training pcap files into a dataframe
    dataset = preprocess.load_data()

    #trains model (Naive Bayes) and makes a prediction
    train.train_test(dataset, test)
コード例 #3
0
def main():
    args = get_args()
    if args.use_dropout == 0:
        args.use_dropout = False

    if args.use_dropout ==0:
        args.use_dropout = False

    for x in vars(args).items():
        print(x)
    #from utils import data_transforms
    #print(data_transforms)

    if args.lr_sch ==5 and torch.__version__ != '0.4.0' :
        print("for cosine annealing, change to torch==0.4.0 in setup.py")
        raise AssertionError()
    elif args.lr_sch !=5 and torch.__version__ == '0.4.0':
        print("warning : this is torch version {}! nsml report will not be recorded".format(torch.__version__))


    model, optimizer, scheduler = model_all.get_model(args)

    if args.use_gpu:
        if torch.cuda.device_count() > 1:
            print("[gpu] Let's use", torch.cuda.device_count(), "GPUs!")
            # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs
            model = torch.nn.DataParallel(model)
        elif torch.cuda.device_count() == 1:
            print("[gpu] Let's use", torch.cuda.device_count(), "GPUs!")
        else:
            print("[gpu] no available gpus")
        model = model.cuda()
    

    nsml.bind(infer=infer, model=model, optimizer=optimizer)

    if args.pause:
        nsml.paused(scope=locals())

    nsml.save()
    if args.mode == 'train':
        dataloaders, dataset_sizes = utils.data_loader(args, train=True, batch_size=args.batch_size)
        model = train.train_test(model, optimizer, scheduler, dataloaders, dataset_sizes, args)
    
    utils.save_model(model, 'model_state')
    with open('args.pickle', 'wb') as farg:
        pickle.dump(args, farg)

    loader = utils.data_loader(args, train=False, batch_size=1)
    predict, acc = utils.get_forward_result(model, loader, args)
    predict = torch.cat(predict, 0)
    nsml.bind(save=lambda x: utils.save_csv(x,
                                            data_csv_fname=os.path.join(DATASET_PATH, 'train', 'test') + '/test_data',
                                            results=predict,
                                            test_loader=loader))
    nsml.save('result')
コード例 #4
0
ファイル: main.py プロジェクト: GShyx/Grad_Path_MTL
def main():
    # Training settings
    parser = argparse.ArgumentParser(
        description='Graduation Project - SZ170110132 MaHaixuan')

    parser.add_argument(
        '--datasets',
        type=str,
        default='fashionMNIST',
        help=
        'datasets: fashionMNIST, CIFAR, CELEBA10 or CELEBA40 (default: fashionMNIST)'
    )
    parser.add_argument('--vgg',
                        type=str,
                        default='11',
                        help='vgg version: 11 or 16 (default: 11)')
    # parser.add_argument('--no-cuda', action='store_true', default=False,
    #                     help='disables CUDA training')
    parser.add_argument(
        '--batch-size',
        type=int,
        default=64,
        help='input batch size for training and testing (default: 64)')
    parser.add_argument('--share-unit-rate',
                        type=float,
                        default=0.8,
                        help='input share unit rate a(default: 0.8)')
    parser.add_argument('--epoch',
                        type=int,
                        default=5,
                        help='number of epoch to train (default: 5)')
    parser.add_argument('--seed',
                        type=int,
                        default=1,
                        help='random seed (default: 1)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.01,
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        help='SGD momentum (default: 0.5)')
    parser.add_argument(
        '-r',
        action='store_true',
        default=False,
        help='use random mask will skip the pretrain (default: False)')
    parser.add_argument(
        '-t',
        action='store_true',
        default=False,
        help='not train all data for test code (default: False)')
    parser.add_argument('-s',
                        action='store_true',
                        default=False,
                        help='train single task (default: False)')
    parser.add_argument('-i',
                        action='store_true',
                        default=False,
                        help='test in train (default: False)')
    parser.add_argument('-m',
                        action='store_false',
                        default=True,
                        help='save model (default: True)')
    parser.add_argument(
        '--which-single-task',
        type=int,
        default=0,
        help=
        'if use-noshare-mask is true, which task do you want to train (default: 0)'
    )

    # parser.add_argument('--log-interval', type=int, default=10, metavar='N',
    #                     help='how many batches to wait before logging training status')

    # parser.add_argument('--save-model', action='store_true', default=False,
    #                     help='For Saving the current Model')

    args = parser.parse_args()

    torch.manual_seed(args.seed)
    # use_cuda = (not args.no_cuda) and torch.cuda.is_available()
    # device = torch.device("cuda" if use_cuda else "cpu")
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    batch_size = args.batch_size
    a = args.share_unit_rate
    b = a

    use_random_mask = args.r
    use_noshare_mask = args.s
    test_in_train = args.i
    single_task = args.which_single_task

    total_itts = 1
    train_all_data = not args.t
    epoch = args.epoch if train_all_data else 1
    datasets_choice = args.datasets
    vgg_choice = args.vgg
    save_model_choice = args.m

    # batch_size = 64
    # a = 0.8
    # b = a
    #
    # use_random_mask = True
    # use_noshare_mask = False
    # single_task = 0
    #
    # epoch = 5
    # total_itts = 1
    # train_all_data = True
    # datasets_choice = 'fashionMNIST'
    # vgg_choice = '11'

    if datasets_choice == 'fashionMNIST':
        train_loader, test_loader, task_count, channels, datasets_name = load_fashionMNIST(
            batch_size)
    elif datasets_choice == 'CIFAR':
        train_loader, test_loader, task_count, channels, datasets_name = load_CIFAR(
            batch_size)
    elif datasets_choice == 'CELEBA10':
        train_loader, test_loader, task_count, channels, datasets_name = load_CELEBA(
            batch_size)
    elif datasets_choice == 'CELEBA40':
        train_loader, test_loader, task_count, channels, datasets_name = load_CELEBA(
            batch_size, task_count=40)
    else:
        print('wrong datasets')
        exit(1)

    if vgg_choice == '11':
        conv_layer_list = [64, 128, 256, 256, 512, 512, 512, 512]
    elif vgg_choice == '16':
        conv_layer_list = [
            64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512
        ]
    else:
        print('wrong vgg')
        exit(1)

    # conv_layer_list = [64, 128, 256, 256, 512, 512, 512, 512]
    # conv_layer_list = [64, 64, 128, 128, 256, 256, 256, 512, 512, 512, 512, 512, 512]

    ones_unit_mapping_list = create_ori_mask(conv_layer_list,
                                             task_count,
                                             mask_type='ones_tensor')
    zeros_unit_mapping_list = create_ori_mask(conv_layer_list,
                                              task_count,
                                              mask_type='zeros_numpy')

    if use_random_mask:
        random_unit_mapping_list = create_ori_mask(conv_layer_list,
                                                   task_count,
                                                   mask_type='random_tensor',
                                                   a=a,
                                                   b=b)
        unit_mapping_list = random_unit_mapping_list
    elif use_noshare_mask:
        noshare_unit_mapping_list = create_ori_mask(conv_layer_list,
                                                    task_count,
                                                    mask_type='noshare_tensor',
                                                    a=a,
                                                    b=b,
                                                    single_task=single_task)
        unit_mapping_list = noshare_unit_mapping_list
    else:
        if vgg_choice == '11':
            pre_m = vgg11(task_count=task_count,
                          unit_mapping_list=ones_unit_mapping_list,
                          channels=channels)
        elif vgg_choice == '16':
            pre_m = vgg16(task_count=task_count,
                          unit_mapping_list=ones_unit_mapping_list,
                          channels=channels)
        else:
            print('wrong vgg')
            exit(1)

        # pre_m = routed_vgg.vgg16(sigma=0, unit_mapping_list=ones_unit_mapping_list, channels=channels)

        pre_model = pre_m.to(device)

        pre_optimizer = optim.SGD(pre_model.parameters(),
                                  lr=args.lr,
                                  momentum=args.momentum)
        pre_scheduler = optim.lr_scheduler.MultiStepLR(pre_optimizer,
                                                       milestones=[12, 24],
                                                       gamma=0.1)
        pre_criterion = nn.CrossEntropyLoss()
        unit_mapping_list = pre_train(args,
                                      pre_model,
                                      task_count,
                                      device,
                                      train_loader,
                                      pre_optimizer,
                                      pre_criterion,
                                      total_itts,
                                      zeros_unit_mapping_list,
                                      a,
                                      b,
                                      train_all_data=train_all_data)
        if save_model_choice:
            save_model('./pretrain_model', pre_model, datasets_name,
                       vgg_choice, epoch, use_random_mask, use_noshare_mask,
                       single_task, a, b, train_all_data)
    # for epoch in range(1, args.epochs + 1):
    # train(args, model, device, train_loader, optimizer, epoch)

    if vgg_choice == '11':
        m = vgg11(task_count=task_count,
                  unit_mapping_list=unit_mapping_list,
                  channels=channels)
    elif vgg_choice == '16':
        m = vgg16(task_count=task_count,
                  unit_mapping_list=unit_mapping_list,
                  channels=channels)
    else:
        print('wrong vgg')
        exit(1)
    # m = routed_vgg.vgg16(sigma=0, unit_mapping_list=unit_mapping_list, channels=channels)
    model = m.to(device)
    optimizer = optim.SGD(model.parameters(),
                          lr=args.lr,
                          momentum=args.momentum)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=[12, 24],
                                               gamma=0.1)
    criterion = nn.CrossEntropyLoss()

    if test_in_train and use_noshare_mask:
        train_test_single_task(args,
                               model,
                               task_count,
                               device,
                               train_loader,
                               test_loader,
                               optimizer,
                               criterion,
                               epoch,
                               total_itts,
                               single_task,
                               train_all_data=train_all_data)
    elif test_in_train:
        train_test(args,
                   model,
                   task_count,
                   device,
                   train_loader,
                   test_loader,
                   optimizer,
                   criterion,
                   epoch,
                   total_itts,
                   train_all_data=train_all_data)
    elif use_noshare_mask:
        train_single_task(args,
                          model,
                          task_count,
                          device,
                          train_loader,
                          optimizer,
                          criterion,
                          epoch,
                          total_itts,
                          single_task,
                          train_all_data=train_all_data)
        test_single_task(args,
                         model,
                         task_count,
                         device,
                         test_loader,
                         optimizer,
                         criterion,
                         epoch,
                         total_itts,
                         single_task,
                         train_all_data=train_all_data)
    else:
        train(args,
              model,
              task_count,
              device,
              train_loader,
              optimizer,
              criterion,
              epoch,
              total_itts,
              train_all_data=train_all_data)
        test(args,
             model,
             task_count,
             device,
             test_loader,
             optimizer,
             criterion,
             epoch,
             total_itts,
             train_all_data=train_all_data)

    if save_model_choice:
        save_model('./train_model', model, datasets_name, vgg_choice, epoch,
                   use_random_mask, use_noshare_mask, single_task, a, b,
                   train_all_data)

    print(
        'datasets:{}\nvgg:{}\nepoch:{}\nuse random mask:{}\nuse noshare mask:{}\nsingle task:{}\na:{}\nb:{}'
        .format(datasets_name, vgg_choice, epoch, use_random_mask,
                use_noshare_mask, single_task, a, b))
コード例 #5
0
                    action='store_true',
                    default=500,
                    help='batch size')
parser.add_argument('--l2_regular',
                    action='store_true',
                    default=0.00001,
                    help='l2 regular')

parser.add_argument('--news_entity_num',
                    action='store_true',
                    default=20,
                    help='fix a news entity num to news_entity_num')
parser.add_argument('--negative_num',
                    action='store_true',
                    default=6,
                    help='negative sampling number')
parser.add_argument('--embedding_dim',
                    action='store_true',
                    default=90,
                    help='embedding dim for enity_embedding dv uv')
parser.add_argument('--layer_dim',
                    action='store_true',
                    default=128,
                    help='layer dim')

args = parser.parse_args()

data = load_data(args)

train_test(args, data)
コード例 #6
0
ファイル: main.py プロジェクト: XStargate/MoA_prediction
def main():

    seed_everything(seed_value=42)
    cfg = Config()

    data_dir = '../../data'
    save_path = './'
    load_path = './'
    runty = 'traineval'
    assert runty == 'traineval' or runty == 'eval',  \
        "Run type is wrong. Should be 'traineval' or 'eval'"

    train_features = pd.read_csv(os.path.join(data_dir, 'train_features.csv'))
    train_targets_scored = pd.read_csv(
        os.path.join(data_dir, 'train_targets_scored.csv'))
    train_targets_nonscored = pd.read_csv(
        os.path.join(data_dir, 'train_targets_nonscored.csv'))
    test_features = pd.read_csv(os.path.join(data_dir, 'test_features.csv'))
    submission = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv'))

    train_features2 = train_features.copy()
    test_features2 = test_features.copy()

    if (runty == 'traineval'):
        test_features_private = test_features.copy()
    elif (runty == 'eval'):
        test_features_private = pd.read_csv(
            os.path.join(data_dir, 'test_features_private_fake.csv'))

    test_features_private2 = test_features_private.copy()

    train_featurs, test_features, test_features_private =  \
        rankGauss(train_features=train_features, test_features=test_features,
                  test_features_p=test_features_private, runty=runty)

    train_features, test_features, test_features_private, train_pca, test_pca, test_pca_p =    \
        _pca(train_features=train_features, test_features=test_features,
             runty=runty, test_features_private=test_features_private,
             ncomp_g=cfg.ncomp_g, ncomp_c=cfg.ncomp_c)

    train_features, test_features, test_features_private =   \
        _pca_select(train_features, test_features, test_features_private)

    train_features, test_features, test_features_private =   \
        fe_cluster_all(train_features=train_features, test_features=test_features,
                       test_features_private=test_features_private,
                       train_features2=train_features2, test_features2=test_features2,
                       test_features_private2=test_features_private2,
                       train_pca=train_pca, test_pca=test_pca, test_pca_p=test_pca_p)

    if (runty == 'traineval'):
        train, test, target = process(train_features, test_features,
                                      train_targets_scored)
    elif (runty == 'eval'):
        train, test, target = process(train_features, test_features_private,
                                      train_targets_scored)

    folds = train.copy()

    target_cols = target.drop('sig_id', axis=1).columns.values.tolist()

    oof = np.zeros((len(train), len(target_cols)))
    predictions = np.zeros((len(test), len(target_cols)))

    for seed in cfg.seeds:
        mskf = MultilabelStratifiedKFold(n_splits=cfg.nfolds,
                                         shuffle=True,
                                         random_state=seed)
        for fold, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target)):
            folds.loc[v_idx, 'kfold'] = int(fold)
        folds['kfold'] = folds['kfold'].astype(int)

        trte = train_test(folds,
                          test,
                          target,
                          save_path,
                          load_path,
                          runty=runty)

        if (runty == 'train'):
            oof_ = trte.run_k_fold(seed)
            oof += oof_ / len(cfg.seeds)
        elif (runty == 'eval'):
            predictions_ = trte.run_k_fold(seed)
            predictions += predictions_ / len(cfg.seeds)
        elif (runty == 'traineval'):
            oof_, predictions_ = trte.run_k_fold(seed)
            oof += oof_ / len(cfg.seeds)
            predictions += predictions_ / len(cfg.seeds)

        # oof_, predictions_ = trte.run_k_fold(seed)
        # oof += oof_ / len(cfg.seed)
        # predictions += predictions_ / len(cfg.seed)

    if (runty == 'train'):
        train[target_cols] = oof
        valid_results = train_targets_scored.drop(columns=target_cols).merge(
            train[['sig_id'] + target_cols], on='sig_id', how='left').fillna(0)

        y_true = train_targets_scored[target_cols].values
        y_pred = valid_results[target_cols].values

        score = 0
        for i in range(len(target_cols)):
            score_ = log_loss(y_true[:, i], y_pred[:, i])
            score += score_ / (target.shape[1] - 1)

        print("CV log_loss: ", score)

    elif (runty == 'eval'):
        test[target_cols] = predictions

        sub = submission.drop(columns=target_cols).merge(test[['sig_id'] +
                                                              target_cols],
                                                         on='sig_id',
                                                         how='left').fillna(0)

        # clip the submission
        # sub_c = sub_clip(sub, test_features)
        # sub_c.to_csv('submission.csv', index=False)

        sub.to_csv('submission.csv', index=False)

    elif (runty == 'traineval'):
        train[target_cols] = oof
        valid_results = train_targets_scored.drop(columns=target_cols).merge(
            train[['sig_id'] + target_cols], on='sig_id', how='left').fillna(0)

        y_true = train_targets_scored[target_cols].values
        y_pred = valid_results[target_cols].values

        score = 0
        for i in range(len(target_cols)):
            score_ = log_loss(y_true[:, i], y_pred[:, i])
            score += score_ / (target.shape[1] - 1)

        print("CV log_loss: ", score)

        test[target_cols] = predictions

        sub = submission.drop(columns=target_cols).merge(test[['sig_id'] +
                                                              target_cols],
                                                         on='sig_id',
                                                         how='left').fillna(0)

        # clip the submission
        # sub_c = sub_clip(sub, test_features)
        # sub_c.to_csv('submission.csv', index=False)

        sub.to_csv('submission.csv', index=False)
コード例 #7
0
    num_layers = 2

    data, [max_price, min_price] = feature_add(train_data)
    dataset = stockDataset(data, seq_len=seq_len, label_idx=0)

    in_features = len(data[0])

    model = Net(in_features=in_features, hidden_dim=hidden_dim, n_classes=1, num_layers=num_layers, drop_prob=drop_prob).to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=lr)
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=step_size, gamma=0.1)
    criterion = nn.MSELoss(reduction='mean')

    dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True, num_workers=0)

    model = train_test(model=model, optimizer=optimizer, criterion=criterion, scheduler=scheduler,
                       train_dataloader=dataloader, num_epochs=num_epochs, device=device)

    # global test_input
    test_data = pd.read_csv(f'data/{args.testing}', header= None, names= ['open', 'high', 'low', 'close'])
    test_input = ((train_data.iloc[-seq_len:].to_numpy() -min_price)/(max_price-min_price)).tolist()
    # print(len(test_input))

    # initial position 
    model.eval()
    with torch.no_grad():
        pred= model(torch.tensor(test_input, dtype= torch.float).to(device).unsqueeze(dim= 0))

    if ((pred.item() - test_input[-5][0])/test_input[-5][0]) >= 0.014:
        hold_position = 0
    elif ((pred.item() - test_input[-5][0])/test_input[-5][0]) <= -0.0115:
        hold_position = 1 
コード例 #8
0
def main():

    seed_everything(seed_value=42)
    cfg = Config()

    data_dir = '/kaggle/input/lish-moa'
    save_path = './'
    load_path = '../input/model-pytorch-rankgauss'
    runty = 'eval'

    train_features = pd.read_csv(os.path.join(data_dir, 'train_features.csv'))
    train_targets_scored = pd.read_csv(
        os.path.join(data_dir, 'train_targets_scored.csv'))
    train_targets_nonscored = pd.read_csv(
        os.path.join(data_dir, 'train_targets_nonscored.csv'))
    test_features = pd.read_csv(os.path.join(data_dir, 'test_features.csv'))
    submission = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv'))

    train_features, test_features = rankGauss(train_features=train_features,
                                              test_features=test_features)

    train, test, targets_scored =   \
        process(train_features=train_features, test_features=test_features,
                train_targets_scored=train_targets_scored,
                train_targets_nonscored=train_targets_nonscored,
                runty=runty, save_path=save_path, load_path=load_path)

    target = targets_scored
    folds = train.copy()

    target_cols = target.drop('sig_id', axis=1).columns.values.tolist()

    oof = np.zeros((len(train), len(target_cols)))
    predictions = np.zeros((len(test), len(target_cols)))

    for seed in cfg.seeds:
        mskf = MultilabelStratifiedKFold(n_splits=cfg.nfolds,
                                         shuffle=True,
                                         random_state=seed)
        for fold, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target)):
            folds.loc[v_idx, 'kfold'] = int(fold)
        folds['kfold'] = folds['kfold'].astype(int)

        trte = train_test(folds,
                          test,
                          target,
                          save_path,
                          load_path,
                          runty=runty)

        if (runty == 'train'):
            oof_ = trte.run_k_fold(seed)
            oof += oof_ / len(cfg.seeds)
        elif (runty == 'eval'):
            predictions_ = trte.run_k_fold(seed)
            predictions += predictions_ / len(cfg.seeds)
        elif (runty == 'traineval'):
            oof_, predictions_ = trte.run_k_fold(seed)
            oof += oof_ / len(cfg.seeds)
            predictions += predictions_ / len(cfg.seeds)

        # oof_, predictions_ = trte.run_k_fold(seed)
        # oof += oof_ / len(cfg.seed)
        # predictions += predictions_ / len(cfg.seed)

    if (runty == 'train'):
        train[target_cols] = oof
        valid_results = train_targets_scored.drop(columns=target_cols).merge(
            train[['sig_id'] + target_cols], on='sig_id', how='left').fillna(0)

        y_true = train_targets_scored[target_cols].values
        y_pred = valid_results[target_cols].values

        score = 0
        for i in range(len(target_cols)):
            score_ = log_loss(y_true[:, i], y_pred[:, i])
            score += score_ / (target.shape[1] - 1)

        print("CV log_loss: ", score)

    elif (runty == 'eval'):
        test[target_cols] = predictions

        sub = submission.drop(columns=target_cols).merge(test[['sig_id'] +
                                                              target_cols],
                                                         on='sig_id',
                                                         how='left').fillna(0)

        # clip the submission
        # sub_c = sub_clip(sub, test_features)
        # sub_c.to_csv('submission.csv', index=False)

        sub.to_csv('submission_rankgauss.csv', index=False)

    elif (runty == 'traineval'):
        train[target_cols] = oof
        valid_results = train_targets_scored.drop(columns=target_cols).merge(
            train[['sig_id'] + target_cols], on='sig_id', how='left').fillna(0)

        y_true = train_targets_scored[target_cols].values
        y_pred = valid_results[target_cols].values

        score = 0
        for i in range(len(target_cols)):
            score_ = log_loss(y_true[:, i], y_pred[:, i])
            score += score_ / (target.shape[1] - 1)

        print("CV log_loss: ", score)

        test[target_cols] = predictions

        sub = submission.drop(columns=target_cols).merge(test[['sig_id'] +
                                                              target_cols],
                                                         on='sig_id',
                                                         how='left').fillna(0)

        # clip the submission
        # sub_c = sub_clip(sub, test_features)
        # sub_c.to_csv('submission.csv', index=False)

        sub.to_csv('submission.csv', index=False)