Esempio n. 1
0
def main(cfg):
    logger.info(cfg.pretty())
    logger.info(os.getcwd())
    logger.info(hydra.utils.get_original_cwd())
    utils.seed_everything()

    if cfg.debug:
        logger.info("running debug mode")
        EPOCH = 1
    else:
        EPOCH = cfg.epoch

    df = pd.read_csv(utils.DATA_DIR / cfg.train_csv)
    # remove row becase XC195038.mp3 cannot load
    df = df.drop(df[df.filename == "XC195038.mp3"].index)
    df = df.drop(
        df[(df.filename == "XC575512.mp3") & (df.ebird_code == "swathr")].index
    )
    df = df.drop(
        df[(df.filename == "XC433319.mp3") & (df.ebird_code == "aldfly")].index
    )
    df = df.drop(
        df[(df.filename == "XC471618.mp3") & (df.ebird_code == "redcro")].index
    )
    train_audio_dir = utils.DATA_DIR / cfg.train_audio_dir
    print(df.shape)

    skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)

    df["fold"] = -1
    for fold_id, (train_index, val_index) in enumerate(skf.split(df, df["ebird_code"])):
        df.iloc[val_index, -1] = fold_id

    # # check the propotion
    fold_proportion = pd.pivot_table(
        df, index="ebird_code", columns="fold", values="xc_id", aggfunc=len
    )
    print(fold_proportion.shape)

    use_fold = 0
    if cfg.gpu:
        device = torch.device("cuda:0")
    else:
        device = torch.device("cpu")

    warnings.simplefilter("ignore")

    # loaders
    logging.info(f"fold: {use_fold}")
    loaders = {
        "train": data.DataLoader(
            # PANNsDataset(train_file_list, None),
            pann_utils.PANNsDataset(
                df=df.query("fold != @use_fold").reset_index(), datadir=train_audio_dir,
            ),
            shuffle=True,
            drop_last=True,
            **cfg.dataloader,
        ),
        "valid": data.DataLoader(
            # PANNsDataset(val_file_list, None),
            pann_utils.PANNsDataset(
                df=df.query("fold == @use_fold").reset_index(), datadir=train_audio_dir,
            ),
            shuffle=False,
            drop_last=False,
            **cfg.dataloader,
        ),
    }

    # model
    model_config = cfg.model
    model_config["classes_num"] = 527
    model = pann_utils.get_model(model_config)

    if cfg.multi and cfg.gpu:
        logger.info("Using pararell gpu")
        model = nn.DataParallel(model)

    model.to(device)

    optimizer = optim.Adam(model.parameters(), lr=0.001)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
    criterion = pann_utils.PANNsLoss().to(device)
    callbacks = [
        pann_utils.F1Callback(input_key="targets", output_key="logits", prefix="f1"),
        pann_utils.mAPCallback(input_key="targets", output_key="logits", prefix="mAP"),
        CheckpointCallback(save_n_best=0),
    ]

    runner = SupervisedRunner(
        device=device, input_key="waveform", input_target_key="targets"
    )
    runner.train(
        model=model,
        criterion=criterion,
        loaders=loaders,
        optimizer=optimizer,
        scheduler=scheduler,
        num_epochs=EPOCH,
        verbose=True,
        logdir=f"fold0",
        callbacks=callbacks,
        main_metric="epoch_f1",
        minimize_metric=False,
    )

    logging.info("train all...")
    loaders = {
        "train": data.DataLoader(
            # PANNsDataset(train_file_list, None),
            pann_utils.PANNsDataset(df=df.reset_index(), datadir=train_audio_dir,),
            shuffle=True,
            drop_last=True,
            **cfg.dataloader,
        ),
    }

    # model
    model_config = cfg.model
    model_config["classes_num"] = 527
    model = pann_utils.get_model(model_config)

    if cfg.multi and cfg.gpu:
        logger.info("Using pararell gpu")
        model = nn.DataParallel(model)

    model.to(device)

    optimizer = optim.Adam(model.parameters(), lr=0.001)
    scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=10)
    criterion = pann_utils.PANNsLoss().to(device)
    callbacks = [
        pann_utils.F1Callback(input_key="targets", output_key="logits", prefix="f1"),
        pann_utils.mAPCallback(input_key="targets", output_key="logits", prefix="mAP"),
        CheckpointCallback(save_n_best=0),
    ]

    runner = SupervisedRunner(
        device=device, input_key="waveform", input_target_key="targets"
    )
    runner.train(
        model=model,
        criterion=criterion,
        loaders=loaders,
        optimizer=optimizer,
        scheduler=scheduler,
        num_epochs=EPOCH,
        verbose=True,
        logdir=f"all",
        callbacks=callbacks,
        main_metric="epoch_f1",
        minimize_metric=False,
    )

    logger.info(os.getcwd())
Esempio n. 2
0
import albumentations as A
import cv2
import torch

from albumentations.pytorch import ToTensorV2
from utils import seed_everything

DATASET = 'PASCAL_VOC'
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
seed_everything()  #If you want deteministic behavior
NUM_WORKERS = 4
BATCH_SIZE = 32
IMAGE_SIZE = 416
NUM_CLASSES = 20
LEARNING_RATE = 1e-5
WEIGHT_DECAY = 0  #1e-4
NUM_EPOCHS = 100
CONF_THRESHOLD = 0.6
MAP_IOU_THRESH = 0.5
NMS_IOU_THRESH = 0.45
S = [IMAGE_SIZE // 32, IMAGE_SIZE // 16, IMAGE_SIZE // 8]
PIN_MEMORY = True
LOAD_MODEL = False
SAVE_MODEL = False
CHECKPOINT_FILE = "checkpoint.pth.tar"
IMG_DIR = DATASET + "/images/"
LABEL_DIR = DATASET + "/labels/"

ANCHORS = [
    [(0.28, 0.22), (0.38, 0.48), (0.9, 0.78)],
    [(0.07, 0.15), (0.15, 0.11), (0.14, 0.29)],
import os
import gym
import numpy as np
import time
from waste_env import WasteEnv
import pickle
from utils import get_mapping, item_conf, seed_everything
import argparse
seed_everything(42)  #We love deterministic algorithms


def load_Q(fname):
    with open(fname, 'rb') as handle:
        return pickle.load(handle)


def greedy(Q, s):
    return np.argmax(Q[s])


def test_agent(Q, env, n_tests, delay=1):
    MAPPING = get_mapping()
    for test in range(n_tests):
        print(f"\n>>>>>>>>>>>> [START] Test #{test}\n")
        s = env.reset()
        done = False
        epsilon = 0
        total_profit = 0
        total_reward = 0
        while True:
            time.sleep(delay / 5)
Esempio n. 4
0
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            'weight_decay':
            0.0
        },
    ]
    optimizer = AdamW(optimizer_parameters, lr=3e-5)
    scheduler = get_linear_schedule_with_warmup(
        optimizer, num_warmup_steps=0, num_training_steps=num_train_steps)

    es = utils.EarlyStopping(patience=2, mode="max")
    print(f"Training is Starting for fold={fold}")

    for epoch in range(config.EPOCHS):
        engine.train_fn(train_data_loader,
                        model,
                        optimizer,
                        device,
                        scheduler=scheduler)
        jaccard = engine.eval_fn(valid_data_loader, model, device)
        #print(f"Jaccard Score = {jaccard}")
        es(jaccard, model, model_path=f"model_{fold}.bin")
        if es.early_stop:
            print("Early stopping")
            break


if __name__ == '__main__':
    seed_everything(config.SEED)
    utils.add_folds()
    for i in range(5):
        run(i)
Esempio n. 5
0
parser = argparse.ArgumentParser()

parser.add_argument("--model_name", type=str, default="b1")
parser.add_argument("--num_folds", type=int, default=5)
parser.add_argument("--num_epochs", type=int, default=3)
parser.add_argument("--batch_size", type=int, default=8)
parser.add_argument("--lr", type=float, default=1e-3)

parser.add_argument("--meta_train", type=str, default="../data/train.csv")
parser.add_argument("--data", type=str, default="../data/train")
parser.add_argument("--external_data", type=str, default="../data/img/")

parser.add_argument("--seed", type=int, default=1512)

args = parser.parse_args()
seed_everything(args.seed)

logging.basicConfig(
    filename=f"../logs/efficient_net_{args.model_name}.txt",
    filemode='w',
    format='%(asctime)s - %(levelname)s - %(name)s -   %(message)s',
    datefmt='%m/%d/%Y %H:%M:%S',
    level=logging.INFO)
logger = logging.getLogger(__name__)

console = logging.StreamHandler()
console.setLevel(logging.INFO)
logging.getLogger().addHandler(console)

logging.info("Experiment config")
logging.info(args.__dict__)
Esempio n. 6
0
import pickle

import sentencepiece as spm
import torch
import torch.optim as optim
from torch.utils.data import DataLoader

from config import Config
from nn import Seq2SeqModel, LabelSmoothing, get_optimizer
from utils import DialogDataset, train, seed_everything

if __name__ == '__main__':

    seed_everything(Config.seed)

    start_epoch = 0

    model = Seq2SeqModel(bert_model_dir=Config.bert_path).cuda()
    sp = spm.SentencePieceProcessor()
    sp.Load(Config.sp_path)

    criterion = LabelSmoothing(len(sp), pad_id=Config.pad_id, smoothing=Config.smoothing)
    _opt = optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9)
    optimizer = get_optimizer(_opt, factor=Config.factor, warmup=Config.warmup)

    if Config.load:
        save_obj = torch.load(f'{Config.output_dir}/{Config.fn}.pth')
        model.load(save_obj['model'])
        # optimizer.load(save_obj['opt'], save_obj['param'])
        # start_epoch = save_obj['epoch']
Esempio n. 7
0
import wandb

from sklearn.model_selection import StratifiedKFold, TimeSeriesSplit
from sklearn.metrics import roc_auc_score

import lightgbm as lgb
import xgboost as xgb

from catboost import CatBoostClassifier, Pool

from utils import seed_everything, print_score, plot_feature_importances, plot_roc_curve
from features import generate_label, feature_engineering

TOTAL_THRES = 300  # 구매액 임계값
SEED = 42  # 랜덤 시드
seed_everything(SEED)  # 시드 고정
FEEATURE_FILE_NAME = "select"
"""
    머신러닝 모델 없이 입력인자으로 받는 year_month의 이전 달 총 구매액을 구매 확률로 예측하는 베이스라인 모델
"""


def lgbm_params():
    model_params = {
        'learning_rate': 0.024,
        "objective": "binary",  # 이진 분류
        "boosting_type": "gbdt",  # dart 오래걸림 'rf', 'gbdt'
        "metric": "auc",  # 평가 지표 설정
        'num_leaves': 8,
        'max_bin': 198,
        'min_data_in_leaf': 28,
Esempio n. 8
0
    model.eval()

    valid_dataset = dataset.TweetDataset(
        tweets=df_valid.text.values,
        sentiments=df_valid.sentiment.values,
        selected_texts=df_valid.selected_text.values)

    valid_data_loader = torch.utils.data.DataLoader(
        valid_dataset,
        batch_size=config.VALID_BATCH_SIZE,
        num_workers=4,
        shuffle=False)

    jaccard = eval_fn(valid_data_loader, model, device)

    return jaccard


if __name__ == '__main__':
    utils.seed_everything(config.SEED)

    fold_scores = []
    for i in range(config.N_FOLDS):
        fold_score = run(i)
        fold_scores.append(fold_score)

    for i in range(config.N_FOLDS):
        print(f'Fold={i}, Jaccard = {fold_scores[i]}')
    print(f'Mean = {np.mean(fold_scores)}')
    print(f'Std = {np.std(fold_scores)}')
import numpy as np
import torch
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
from utils import seed_everything
from dataset import ImageDataset, get_transforms
from trainer import CycleGAN
from visualize import unnorm
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
''' Seeding all random parameters for reproducibility '''
seed_everything(42)
'''Default configuration for training CycleGAN'''
size = 256
batch_size = 1
mean = [0.5, 0.5, 0.5]
std = [0.5, 0.5, 0.5]
'''Reading data and creating Dataloader'''
monet_dir = 'monet_jpg/'
photo_dir = 'photo_jpg/'
transform = get_transforms(size, mean, std)
img_dataset = ImageDataset(monet_dir=monet_dir,
                           photo_dir=photo_dir,
                           transform=transform)
img_dl = DataLoader(img_dataset, batch_size=batch_size, pin_memory=True)
'''Fixed set of monet and photos for visualizing them throughout the training process'''
idx = np.random.randint(0, len(img_dataset), 5)
fixed_photo = torch.cat([img_dataset[i][0].unsqueeze(0) for i in idx], 0)
fixed_monet = torch.cat([img_dataset[i][1].unsqueeze(0) for i in idx], 0)
''' Creating an instance of the trainer class '''
gan = CycleGAN(3, 3, 100, device, (fixed_photo, fixed_monet), decay_epoch=50)
gan.train(img_dl)
Esempio n. 10
0
def main():
    t = Timer()
    seed_everything(cfg.common.seed)

    logger_path.mkdir(exist_ok=True)
    logging.basicConfig(filename=logger_path / 'train.log',
                        level=logging.DEBUG)

    dh.save(logger_path / 'config.yml', cfg)

    with t.timer('load data'):
        if cfg.common.debug:
            train_df = pd.read_csv(const.INPUT_DATA_DIR / 'train.csv',
                                   dtype=const.DTYPE,
                                   nrows=5_000_000)
        else:
            train_df = pd.read_csv(const.INPUT_DATA_DIR / 'train.csv',
                                   dtype=const.DTYPE)

    with t.timer('preprocess'):
        questions_df = pd.read_csv(const.INPUT_DATA_DIR / 'questions.csv')
        q2p = dict(questions_df[['question_id', 'part']].values)
        train_df['part'] = train_df['content_id'].map(q2p)

        train_df['prior_question_had_explanation'] = train_df[
            'prior_question_had_explanation'].astype(float)

        te_content_df = pd.read_feather(
            '../features/te_content_id_by_answered_correctly_train.feather')
        avg_u_target_df = pd.read_feather(
            '../features/answered_correctly_avg_u_train.feather')

        if cfg.common.debug:
            te_content_df = te_content_df.iloc[:5_000_000]
            avg_u_target_df = avg_u_target_df.iloc[:5_000_000]

        train_df['te_content_id_by_answered_correctly'] = te_content_df[
            'te_content_id_by_answered_correctly']
        train_df['answered_correctly_avg_u'] = avg_u_target_df[
            'answered_correctly_avg_u']

    with t.timer('make folds'):
        valid_idx = np.load('../data/processed/cv1_valid_v2.npy')
        if cfg.common.debug:
            valid_idx = valid_idx[np.where(valid_idx < len(train_df))]

        fold_df = pd.DataFrame(index=range(len(train_df)))
        fold_df['fold_0'] = 0
        fold_df.loc[valid_idx, 'fold_0'] += 1

    with t.timer('drop index'):
        if cfg.common.drop:
            drop_idx = factory.get_drop_idx(cfg.common.drop)
            if cfg.common.debug:
                drop_idx = drop_idx[np.where(drop_idx < len(train_df))]
            train_df = train_df.drop(drop_idx, axis=0).reset_index(drop=True)
            fold_df = fold_df.drop(drop_idx, axis=0).reset_index(drop=True)

        train_df['step'] = train_df.groupby(
            'user_id').cumcount() // cfg.data.train.step_size
        train_df['user_step_id'] = train_df['user_id'].astype(
            str) + '__' + train_df['step'].astype(str)

    with t.timer('train model'):
        trainer = NNTrainer(run_name, fold_df, cfg)
        cv = trainer.train(train_df, target_df=train_df[const.TARGET_COLS[0]])
        trainer.save()

        run_name_cv = f'{run_name}_{cv:.4f}'
        logger_path.rename(f'../logs/{run_name_cv}')
        logging.disable(logging.FATAL)

    with t.timer('kaggle api'):
        kaggle = Kaggle(cfg, run_name_cv)
        if cfg.common.kaggle.data:
            kaggle.create_dataset()
        if cfg.common.kaggle.notebook:
            kaggle.push_notebook()

    with t.timer('notify'):
        process_minutes = t.get_processing_time()
        notificator = Notificator(run_name=run_name_cv,
                                  model_name=cfg.model.backbone,
                                  cv=round(cv, 4),
                                  process_time=round(process_minutes, 2),
                                  comment=comment,
                                  params=notify_params)
        notificator.send_line()
        notificator.send_notion()
        # notificator.send_slack()

    with t.timer('git'):
        git = Git(run_name=run_name_cv)
        git.push()
        git.save_hash()
Esempio n. 11
0
def main():

    ########################################################################
    ######################## training parameters ###########################
    ########################################################################

    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        type=str,
                        default='ImageNet',
                        metavar='N',
                        help='dataset to run experiments on')
    parser.add_argument(
        '--batch_size',
        type=int,
        default=256,
        metavar='N',
        help=
        'input batch size for training (default: 256; note that batch_size 64 gives worse performance for imagenet, so don\'t change this. )'
    )
    parser.add_argument('--exp',
                        type=str,
                        default='default',
                        metavar='N',
                        help='name of experiment')
    parser.add_argument('--epochs',
                        type=int,
                        default=30,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.2,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--weight_decay',
                        type=float,
                        default=5 * 1e-4,
                        help='weight_decay (default: 1e-5)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--step_size',
                        type=float,
                        default=10,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.1,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--load_model',
                        type=str,
                        default=None,
                        help='model to initialise from')
    parser.add_argument('--caffe',
                        action='store_true',
                        default=False,
                        help='caffe pretrained model')
    parser.add_argument('--test',
                        action='store_true',
                        default=False,
                        help='run in test mode')
    parser.add_argument(
        '--ensemble_inference',
        action='store_true',
        default=True,
        help='run in ensemble inference mode'
    )  # testing is always in ensemble inference mode anyways !
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=5021,
                        metavar='S',
                        help='random seed (default: 5021)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument(
        '--stopping_criterion',
        type=int,
        default=15,
        metavar='N',
    )
    parser.add_argument(
        '--low_threshold',
        type=int,
        default=0,
        metavar='N',
    )
    parser.add_argument(
        '--high_threshold',
        type=int,
        default=100000,
        metavar='N',
    )
    parser.add_argument(
        '--open_ratio',
        type=int,
        default=1,
        help='ratio of closed_set to open_set data',
    )
    parser.add_argument(
        '--picker',
        type=str,
        default='generalist',
        help=
        'dataloader or model picker - experts | generalist : experts uses manyshot, medianshot, fewshot partitioning; \
                                                                    generalist uses the generalist model',
    )
    parser.add_argument(
        '--num_learnable',
        type=int,
        default='-1',
        help=
        'number of learnable layers : -1 ( all ) | 1 ( only classifier ) | 2 ( classifier and last fc ) | 3 - 6 ( classifier, fc + $ind - 2$ resnet super-blocks ) '
    )
    parser.add_argument('--scheduler',
                        type=str,
                        default='stepLR',
                        help=' stepLR | cosine lr scheduler')
    parser.add_argument('--max_epochs',
                        type=int,
                        default=None,
                        help='max number of epochs, for cosine lr scheduler')

    args = parser.parse_args()

    print("\n==================Options=================")
    pprint(vars(args), indent=4)
    print("==========================================\n")

    use_cuda = not args.no_cuda and torch.cuda.is_available()

    # make everything deterministic
    if (args.seed is not None):
        print('Seeding everything with seed {}.'.format(args.seed))
        seed_everything(args.seed)
    else:
        print('Note : Seed is random.')

    device = torch.device("cuda" if use_cuda else "cpu")

    exp_dir = os.path.join('checkpoint', args.exp)
    if not os.path.isdir(exp_dir):
        os.makedirs(exp_dir)

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # batch size settings : note that these are important for memory and performance reasons
    if (args.dataset.lower() == 'imagenet' and args.test):
        args.batch_size = 64
    elif (args.dataset.lower() == 'imagenet' and not (args.test)):
        args.batch_size = 256
    elif (args.dataset.lower() == 'places' and not (args.test)):
        args.batch_size = 32
    elif (args.dataset.lower() == 'places' and args.test):
        args.batch_size = 8

    ########################################################################
    ######################## load data and pre-trained models ##############
    ########################################################################

    print('Loading train loader.')
    train_loader = torch.utils.data.DataLoader(Threshold_Dataset(
        root=data_root[args.dataset],
        orig_txt='./data/{}_LT/{}_LT_train.txt'.format(args.dataset,
                                                       args.dataset),
        txt='./data/{}_LT/{}_LT_train.txt'.format(args.dataset, args.dataset),
        low_threshold=args.low_threshold,
        high_threshold=args.high_threshold,
        open_ratio=args.open_ratio,
        transform=data_transforms['train'],
        picker=args.picker),
                                               batch_size=args.batch_size,
                                               shuffle=True,
                                               **kwargs)
    print('Loading val loader.')
    val_loader = torch.utils.data.DataLoader(Threshold_Dataset(
        root=data_root[args.dataset],
        orig_txt='./data/{}_LT/{}_LT_train.txt'.format(args.dataset,
                                                       args.dataset),
        txt='./data/{}_LT/{}_LT_val.txt'.format(args.dataset, args.dataset),
        low_threshold=args.low_threshold,
        high_threshold=args.high_threshold,
        open_ratio=1,
        transform=data_transforms['val'],
        picker=args.picker),
                                             batch_size=args.batch_size,
                                             shuffle=True,
                                             **kwargs)

    num_classes = train_loader.dataset.num_classes + 1 - int(
        args.picker == 'generalist'
    )  # add 1 for the open/dustbin class if not generalist model
    if (args.dataset.lower() == 'imagenet'):
        feature_extractor = create_model_resnet10().to(
            device)  # use this for imagenet
        args.lr = 1e-1
    else:
        feature_extractor = create_model_resnet152(caffe=True).to(
            device
        )  # use this for places. pass caffe=true to load pretrained imagenet model
        args.lr = 1e-2

    print('Learning rate : {:.4f}'.format(args.lr))
    classifier = DotProduct_Classifier(num_classes=num_classes,
                                       feat_dim=512).to(device)
    optimizer = torch.optim.SGD(chain(feature_extractor.parameters(),
                                      classifier.parameters()),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    if (args.scheduler == 'stepLR'):
        scheduler = optim.lr_scheduler.StepLR(optimizer,
                                              step_size=args.step_size,
                                              gamma=args.gamma)
        print('Using StepLR scheduler with params, stepsize : {}, gamma : {}'.
              format(args.step_size, args.gamma))
    elif (args.scheduler == 'cosine'):
        scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer,
                                                         T_max=args.max_epochs)
        print(
            'Using CosineAnnealingLR scheduler with params, T_max : {}'.format(
                args.max_epochs))
    else:
        raise Exception('Invalid scheduler argument.')

    # load pretrained model
    if (args.load_model is not None):

        if (not args.caffe):

            pretrained_model = torch.load(args.load_model)

            weights_feat = pretrained_model['state_dict_best']['feat_model']
            weights_feat = {
                k: weights_feat['module.' + k] if 'module.' +
                k in weights_feat else weights_feat[k]
                for k in feature_extractor.state_dict()
            }
            feature_extractor.load_state_dict(
                weights_feat)  # loading feature extractor weights

            weights_class = pretrained_model['state_dict_best']['classifier']
            weights_class = {
                k: weights_class['module.' + k] if 'module.' +
                k in weights_class else weights_class[k]
                for k in classifier.state_dict()
            }

            if (classifier.state_dict()['fc.weight'].shape ==
                    weights_class['fc.weight'].shape):
                classifier.load_state_dict(
                    weights_class
                )  # loading classifier weights if classifiers match
            else:
                print(
                    'Classifiers of pretrained model and current model are different with dimensions : ',
                    classifier.state_dict()['fc.weight'].shape,
                    weights_class['fc.weight'].shape)

            print(
                'Loaded pretrained model on entire dataset from epoch : {:d} with acc : {:.4f}'
                .format(pretrained_model['best_epoch'],
                        pretrained_model['best_acc']))
        else:

            weights_feat = torch.load(args.load_model)
            weights_feat = {
                k: weights_feat[k]
                if k in weights_feat else feature_extractor.state_dict()[k]
                for k in feature_extractor.state_dict()
            }
            feature_extractor.load_state_dict(
                weights_feat)  # loading feature extractor weights
            print('Loaded imagenet pretrained model from Caffe.')

    ########################################################################
    ######################## set learnable layers ##########################
    ########################################################################

    if (args.num_learnable == -1):
        print('Learning feature extractor and classifier.')

    elif (args.num_learnable >= 1 and args.num_learnable <= 6):

        if (args.num_learnable == 1):

            set_weights('feature_extractor', feature_extractor, False)
            set_weights('classifier', classifier, True)

        elif (args.num_learnable == 2):

            print('Setting feature extractor weights.')
            for ind, (name,
                      layer) in enumerate(feature_extractor.named_children()):
                if (ind == 9):
                    set_weights(name, layer, True)
                else:
                    set_weights(name, layer, False)
            set_weights('classifier', classifier, True)

        else:

            print('Setting feature extractor weights.')
            for ind, (name,
                      layer) in enumerate(feature_extractor.named_children()):
                if (ind >= 10 - args.num_learnable):
                    set_weights(name, layer, True)
                else:
                    set_weights(name, layer, False)
            set_weights('classifier', classifier, True)

    else:
        raise Exception('Invalid num_learnable layers : {}'.format(
            args.num_learnable))

    ########################################################################
    ######################## training with early stopping ##################
    ########################################################################
    if (not args.test):

        results = vars(args)
        results['train_losses'] = []
        results['train_accuracies'] = []
        results['test_losses'] = []
        results['test_accuracies'] = []
        best_acc, best_epoch = -0.1, 0

        epoch = 1
        while (True):

            sys.stdout.flush()
            train_loss, train_err = train(args, feature_extractor, classifier,
                                          device, train_loader, optimizer,
                                          scheduler, epoch)
            test_loss, test_err = test(args, feature_extractor, classifier,
                                       device, val_loader)

            results['train_losses'].append(train_loss)
            results['test_losses'].append(test_loss)
            results['train_accuracies'].append(train_err)
            results['test_accuracies'].append(test_err)

            if (test_err > best_acc):
                best_acc = test_err
                best_epoch = epoch
                results['best_acc'], results[
                    'best_epoch'] = best_acc, best_epoch

                # save best model
                best_model_weights = {}
                best_model_weights['feat_model'] = copy.deepcopy(
                    feature_extractor.state_dict())
                best_model_weights['classifier'] = copy.deepcopy(
                    classifier.state_dict())
                model_states = {
                    'epoch': epoch,
                    'best_epoch': best_epoch,
                    'state_dict_best': best_model_weights,
                    'best_acc': best_acc,
                }
                torch.save(model_states, os.path.join(exp_dir,
                                                      "best_model.pt"))

            elif (epoch > best_epoch + args.stopping_criterion):
                print('Best model obtained. Error : ', best_acc)
                plot_curves(results, exp_dir)
                break

            elif (args.scheduler == 'cosine' and epoch == args.max_epochs):
                print('Best model obtained. Error : ', best_acc)
                plot_curves(results, exp_dir)
                break

            savepath = os.path.join(exp_dir, 'results.pickle')
            with open(savepath, 'wb') as f:
                pickle.dump(results, f)
            plot_curves(results, exp_dir)
            epoch = epoch + 1
Esempio n. 12
0
def main(base_name, SEQ_SIZE=16, BATCH_SIZE=256, HIDDEN_SIZE=1000, N_EPOCHS=30, \
         N_LAYERS=1, threshold=0.5, seq_threshold=0.5, use_gpu=False):
    """
    Function to read c3d FC7 features and train an RNN on them, evaluate on the 
    validation videos, write the predictions in a JSON, save the trained model and
    losses to pickle. 
    
    Parameters:
    ------
    
    base_name: path to the wts, losses, predictions and log files
    SEQ_SIZE: No. of frames sent to the RNN at a time
    BATCH_SIZE: Depends on GPU memory
    HIDDEN_SIZE: Size of hidden layer in the RNN
    N_EPOCHS: Training iterations (no. of times the training set is seen)
    N_LAYERS: No. of hidden layers in the RNN
    threshold and seq_threshold: threshold values during prediction
    use_gpu: True if training to be done on GPU, False for CPU
    
    """

    if not os.path.exists(base_name):
        os.makedirs(base_name)
    
    seed = 1234
    utils.seed_everything(seed)
    
    print(60*"#")
    
    # Form dataloaders 
    train_lst_main_ext = get_main_dataset_files(MAIN_DATASET)   #with extensions
    train_lst_main = [t.rsplit('.', 1)[0] for t in train_lst_main_ext]   # remove the extension
    val_lst_main_ext = get_main_dataset_files(VAL_DATASET)
    val_lst_main = [t.rsplit('.', 1)[0] for t in val_lst_main_ext]
    
    # Divide the samples files into training set, validation and test sets
    train_lst, val_lst, test_lst = utils.split_dataset_files(DATASET)
    print("SEQ_SIZE : {}".format(SEQ_SIZE))
    
    # form the names of the list of label files, should be at destination 
    train_lab = [f+".json" for f in train_lst]
    val_lab = [f+".json" for f in val_lst]
    test_lab = [f+".json" for f in test_lst]
    train_lab_main = [f+".json" for f in train_lst_main]
    val_lab_main = [f+".json" for f in val_lst_main]
    
    # get complete path lists of label files
    tr_labs = [os.path.join(LABELS, f) for f in train_lab]
    val_labs = [os.path.join(LABELS, f) for f in test_lab]
    tr_labs_main = [os.path.join(MAIN_LABELS, f) for f in train_lab_main]
    val_labs_main = [os.path.join(VAL_LABELS, f) for f in val_lab_main]
    #####################################################################
    
    sizes = [utils.getNFrames(os.path.join(DATASET, f+".avi")) for f in train_lst]
    val_sizes = [utils.getNFrames(os.path.join(DATASET, f+".avi")) for f in test_lst]
    sizes_main = [utils.getNFrames(os.path.join(MAIN_DATASET, f)) for f in train_lst_main_ext]
    val_sizes_main = [utils.getNFrames(os.path.join(VAL_DATASET, f)) for f in val_lst_main_ext]
    
    ###########################################################################
    # Merge the training highlights and main dataset variables
    train_lab.extend(train_lab_main)
    tr_labs.extend(tr_labs_main)
    sizes.extend(sizes_main)
    
    print("No. of training videos : {}".format(len(train_lst)))
    
    print("Size : {}".format(sizes))
    hlDataset = VideoDataset(tr_labs, sizes, seq_size=SEQ_SIZE, is_train_set = True)
    print(hlDataset.__len__())
    
    #####################################################################
    
    # Create a DataLoader object and sample batches of examples. 
    # These batch samples are used to extract the features from videos parallely
    train_loader = DataLoader(dataset=hlDataset, batch_size=BATCH_SIZE, shuffle=True)
    datasets_loader = {'train': train_loader}       # can have a test loader also
    
    # read into dictionary {vidname: np array, ...}
    print("Loading features from disk...")
    features = utils.readAllPartitionFeatures(HOGFeatsPath, train_lst)
    #HOGfeatures = utils.readAllHOGfeatures(HOGfeaturesPath, train_lst)
    mainFeatures = utils.readAllPartitionFeatures(HOGMainFeatsPath, train_lst_main)
    features.update(mainFeatures)     # Merge dicts
    print(len(train_loader.dataset))
    
    ########
    
    #HOG feature output size
    INP_VEC_SIZE = features[list(features.keys())[0]].shape[-1] 
    print("INP_VEC_SIZE = ", INP_VEC_SIZE)
    
    # Creating the RNN and training
    classifier = RNNClassifier(INP_VEC_SIZE, HIDDEN_SIZE, 1, N_LAYERS, \
                               bidirectional=False, use_gpu=use_gpu)
#    classifier = LSTMModel(INP_VEC_SIZE, HIDDEN_SIZE, 1, N_LAYERS, \
#                           use_gpu=use_gpu)
    if use_gpu:
#        if torch.cuda.device_count() > 1:
#            print("Let's use", torch.cuda.device_count(), "GPUs!")
#            # Parallely run on multiple GPUs using DataParallel
#            classifier = nn.DataParallel(classifier)
        classifier.cuda(0)

    optimizer = torch.optim.Adam(classifier.parameters(), lr=0.001)
    #criterion = nn.CrossEntropyLoss()
    
    criterion = nn.BCELoss()

    step_lr_scheduler = StepLR(optimizer, step_size=10, gamma=0.1)
    start = time.time()
    
    print("Training for %d epochs..." % N_EPOCHS)
    # Training the model on the features for N_EPOCHS 
    train(features, classifier, datasets_loader, optimizer, step_lr_scheduler, \
          criterion, SEQ_SIZE, N_EPOCHS, use_gpu, base_name)
    mod_file = os.path.join(base_name, \
                "GRU_HOG_ep"+str(N_EPOCHS)+"_seq"+str(SEQ_SIZE)+"_Adam.pt")
    classifier.load_state_dict(torch.load(mod_file))    
    end = time.time()
    print("Time for training : {}".format(end-start))
    #####################################################################
    
    # Test a video or calculate the accuracy using the learned model
    print("Prediction video meta info.")
    print("Size : {}".format(val_sizes))
    hlvalDataset = VideoDataset(val_labs_main, val_sizes_main, seq_size=SEQ_SIZE, \
                                is_train_set = False)
    print(hlvalDataset.__len__())
    
    # Create a DataLoader object and sample batches of examples. 
    # These batch samples are used to extract the features from videos parallely
    val_loader = DataLoader(dataset=hlvalDataset, batch_size=BATCH_SIZE, shuffle=False)
    #print(len(val_loader.dataset))

    classifier.eval()
    val_keys, predictions = predict(HOGValFeatsPath, val_lst_main, classifier, val_loader, \
                                    use_gpu)
    
    with open(os.path.join(base_name, "predictions_seq"+str(SEQ_SIZE)+".pkl"), "wb") as fp:
        pickle.dump(predictions, fp)
    
    with open(os.path.join(base_name, "val_keys_seq"+str(SEQ_SIZE)+".pkl"), "wb") as fp:
        pickle.dump(val_keys, fp)

    #####################################################################

    # [4949, 4369, 4455, 4317, 4452]
    #predictions = [p.cpu() for p in predictions]  # convert to CPU tensor values
    localization_dict = getScoredLocalizations(val_keys, predictions, BATCH_SIZE, \
                                         threshold, seq_threshold)

#    print localization_dict

    # Apply filtering    
    i = 60  # optimum
    filtered_shots = utils.filter_action_segments(localization_dict, epsilon=i)
    #i = 7  # 
    #filtered_shots = filter_non_action_segments(filtered_shots, epsilon=i)
    filt_shots_filename = os.path.join(base_name, "predicted_localizations_HLMainTest_th0_5_filt"\
            +str(i)+"_ep"+str(N_EPOCHS)+"_seq"+str(SEQ_SIZE)+".json")
    with open(filt_shots_filename, 'w') as fp:
        json.dump(filtered_shots, fp)
    print("Prediction file {} !!".format(filt_shots_filename))
    
    tiou =  calculate_tIoU(VAL_LABELS, filtered_shots)
    #####################################################################
    # count no. of parameters in the model
    print("#Parameters : {} ".format(utils.count_parameters(classifier)))
    print("TIoU : {}".format(tiou))
    print(60*'#')
    return tiou
Esempio n. 13
0
def make_lgb_oof_prediction(train,
                            y,
                            test,
                            features,
                            categorical_features='auto',
                            model_params=None,
                            folds=10):
    # 시드 고정
    seed_everything(seed)

    x_train = train[features]
    x_test = test[features]

    test_preds = np.zeros(x_test.shape[0])

    y_oof = np.zeros(x_train.shape[0])

    score = 0

    fi = pd.DataFrame()
    fi['feature'] = features

    skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=seed)

    for fold, (tr_idx, val_idx) in enumerate(skf.split(x_train, y)):
        # train index, validation index로 train 데이터를 나눔
        x_tr, x_val = x_train.loc[tr_idx, features], x_train.loc[val_idx,
                                                                 features]
        y_tr, y_val = y[tr_idx], y[val_idx]

        print(
            f'fold: {fold+1}, x_tr.shape: {x_tr.shape}, x_val.shape: {x_val.shape}'
        )

        # LightGBM 데이터셋 선언
        dtrain = lgb.Dataset(x_tr, label=y_tr)
        dvalid = lgb.Dataset(x_val, label=y_val)

        # LightGBM 모델 훈련
        clf = lgb.train(
            model_params,
            dtrain,
            valid_sets=[dtrain, dvalid],  # Validation 성능을 측정할 수 있도록 설정
            categorical_feature=categorical_features,
            verbose_eval=200)

        # Validation 데이터 예측
        val_preds = clf.predict(x_val)

        # Validation index에 예측값 저장
        y_oof[val_idx] = val_preds

        # 폴드별 Validation 스코어 측정
        print(f"Fold {fold + 1} | AUC: {roc_auc_score(y_val, val_preds)}")
        print('-' * 80)

        # score 변수에 폴드별 평균 Validation 스코어 저장
        score += roc_auc_score(y_val, val_preds) / folds

        # 테스트 데이터 예측하고 평균해서 저장
        test_preds += clf.predict(x_test) / folds

        # 폴드별 피처 중요도 저장
        fi[f'fold_{fold+1}'] = clf.feature_importance()

        del x_tr, x_val, y_tr, y_val
        gc.collect()

    print(f"\nMean AUC = {score}")  # 폴드별 Validation 스코어 출력
    print(f"OOF AUC = {roc_auc_score(y, y_oof)}"
          )  # Out Of Fold Validation 스코어 출력

    # 폴드별 피처 중요도 평균값 계산해서 저장
    fi_cols = [col for col in fi.columns if 'fold_' in col]
    fi['importance'] = fi[fi_cols].mean(axis=1)

    return y_oof, test_preds, fi
Esempio n. 14
0
def main():

    # fix seed for train reproduction
    seed_everything(args.SEED)

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    print("device", device)

    # TODO dataset loading

    # TODO sampling dataset for debugging
    if args.DEBUG:
        total_num = 100
        # image_path = image_path[:total_num]
        # labels = labels[:total_num]

    skf = StratifiedKFold(n_splits=args.n_folds,
                          shuffle=True,
                          random_state=args.SEED)
    for fold_num, (trn_idx,
                   val_idx) in enumerate(skf.split(image_path, labels)):

        print(f"fold {fold_num} training starts...")
        trn_img_paths = np.array(image_path)[trn_idx]
        trn_labels = np.array(labels)[trn_idx]
        val_img_paths = np.array(image_path)[val_idx]
        val_labels = np.array(labels)[val_idx]

        default_transforms = transforms.Compose(
            [transforms.Resize(args.input_size)])
        train_transforms = get_transform(target_size=(args.input_size,
                                                      args.input_size),
                                         transform_list=args.train_augments,
                                         augment_ratio=args.augment_ratio)

        valid_transforms = get_transform(target_size=(args.input_size,
                                                      args.input_size),
                                         transform_list=args.valid_augments,
                                         augment_ratio=args.augment_ratio,
                                         is_train=False)

        train_dataset = PathDataset(trn_img_paths, trn_labels,
                                    default_transforms, train_transforms)
        valid_dataset = PathDataset(trn_img_paths, trn_labels,
                                    default_transforms, valid_transforms)
        train_loader = DataLoader(dataset=train_dataset,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=True,
                                  pin_memory=True)
        valid_loader = DataLoader(dataset=valid_dataset,
                                  batch_size=args.batch_size,
                                  num_workers=args.num_workers,
                                  shuffle=False,
                                  pin_memory=True)

        # define model
        model = build_model(args, device)

        # optimizer definition
        optimizer = build_optimizer(args, model)
        scheduler = build_scheduler(args, optimizer, len(train_loader))
        criterion = nn.BCELoss()

        trn_cfg = {
            'train_loader': train_loader,
            'valid_loader': valid_loader,
            'model': model,
            'criterion': criterion,
            'optimizer': optimizer,
            'scheduler': scheduler,
            'device': device,
        }

        train(args, trn_cfg)

        del model, train_loader, valid_loader, train_dataset, valid_dataset
        gc.collect()
Esempio n. 15
0
    arg('--num_classes', type=int, default=4,
        help='num tags to predict')  # Fixed
    arg('--model', type=str, default='Resnet50')
    arg('--input_size', type=int, default=296)
    arg('--test_augments', default='resize, horizontal_flip', type=str)
    arg('--augment_ratio',
        default=0.5,
        type=float,
        help='probability of implementing transforms')
    arg('--device', type=int, default=0)
    arg('--hidden_size', type=int, default=128)
    args = parser.parse_args()

    device = args.device
    use_gpu = cuda.is_available()

    SEED = 2019
    seed_everything(SEED)

    global model
    model = models.densenet201(pretrained=False)
    model.classifier = nn.Linear(1920, args.num_classes)
    bind_model(model)
    if args.mode == 'train':
        nsml.save('last')

    if use_gpu:
        model = model.to(device)

    if args.pause:
        nsml.paused(scope=locals())
Esempio n. 16
0
    shift_day = LAG_DAY[0]
    roll_wind = LAG_DAY[1]
    lag_df = base_test[['id','d',config.TARGET]]
    col_name = 'rolling_mean_tmp_'+str(shift_day)+'_'+str(roll_wind)
    lag_df[col_name] = lag_df.groupby(['id'])[config.TARGET].transform(lambda x: x.shift(shift_day).rolling(roll_wind).mean())
    return lag_df[[col_name]]


########################### Model params
#################################################################################
import lightgbm as lgb
lgb_params = config.MODEL_CONFIG['LightGBM']

########################### Vars
#################################################################################
seed_everything(config.SEED)            # to be as deterministic 
lgb_params['seed'] = config.SEED        # as possible


remove_features = ['id','state_id','store_id',
                   'date','wm_yr_wk','d',config.TARGET]

mean_features = ['enc_state_id_mean', 'enc_state_id_std', 'enc_store_id_mean',
                'enc_store_id_std', 'enc_cat_id_mean', 'enc_cat_id_std',
                'enc_dept_id_mean', 'enc_dept_id_std', 'enc_state_id_cat_id_mean',
                'enc_state_id_cat_id_std', 'enc_state_id_dept_id_mean',
                'enc_state_id_dept_id_std', 'enc_store_id_cat_id_mean',
                'enc_store_id_cat_id_std', 'enc_store_id_dept_id_mean',
                'enc_store_id_dept_id_std', 'enc_item_id_mean', 'enc_item_id_std',
                'enc_item_id_state_id_mean', 'enc_item_id_state_id_std',
                'enc_item_id_store_id_mean', 'enc_item_id_store_id_std']
Esempio n. 17
0
def main(argv=None):
    tstart = time.time()
    # Arguments
    parser = argparse.ArgumentParser(
        description=
        'FACIL - Framework for Analysis of Class Incremental Learning')

    # miscellaneous args
    parser.add_argument('--gpu',
                        type=int,
                        default=0,
                        help='GPU (default=%(default)s)')
    parser.add_argument('--results-path',
                        type=str,
                        default='../results',
                        help='Results path (default=%(default)s)')
    parser.add_argument('--exp-name',
                        default=None,
                        type=str,
                        help='Experiment name (default=%(default)s)')
    parser.add_argument('--seed',
                        type=int,
                        default=0,
                        help='Random seed (default=%(default)s)')
    parser.add_argument(
        '--log',
        default=['disk'],
        type=str,
        choices=['disk', 'tensorboard'],
        help='Loggers used (disk, tensorboard) (default=%(default)s)',
        nargs='*',
        metavar="LOGGER")
    parser.add_argument('--save-models',
                        action='store_true',
                        help='Save trained models (default=%(default)s)')
    parser.add_argument('--last-layer-analysis',
                        action='store_true',
                        help='Plot last layer analysis (default=%(default)s)')
    parser.add_argument(
        '--no-cudnn-deterministic',
        action='store_true',
        help='Disable CUDNN deterministic (default=%(default)s)')
    # dataset args
    parser.add_argument('--datasets',
                        default=['cifar100'],
                        type=str,
                        choices=list(dataset_config.keys()),
                        help='Dataset or datasets used (default=%(default)s)',
                        nargs='+',
                        metavar="DATASET")
    parser.add_argument(
        '--num-workers',
        default=4,
        type=int,
        required=False,
        help=
        'Number of subprocesses to use for dataloader (default=%(default)s)')
    parser.add_argument(
        '--pin-memory',
        default=False,
        type=bool,
        required=False,
        help=
        'Copy Tensors into CUDA pinned memory before returning them (default=%(default)s)'
    )
    parser.add_argument(
        '--batch-size',
        default=64,
        type=int,
        required=False,
        help='Number of samples per batch to load (default=%(default)s)')
    parser.add_argument(
        '--num-tasks',
        default=4,
        type=int,
        required=False,
        help='Number of tasks per dataset (default=%(default)s)')
    parser.add_argument(
        '--nc-first-task',
        default=None,
        type=int,
        required=False,
        help='Number of classes of the first task (default=%(default)s)')
    parser.add_argument(
        '--use-valid-only',
        action='store_true',
        help='Use validation split instead of test (default=%(default)s)')
    parser.add_argument(
        '--stop-at-task',
        default=0,
        type=int,
        required=False,
        help='Stop training after specified task (default=%(default)s)')
    # model args
    parser.add_argument('--network',
                        default='resnet32',
                        type=str,
                        choices=allmodels,
                        help='Network architecture used (default=%(default)s)',
                        metavar="NETWORK")
    parser.add_argument(
        '--keep-existing-head',
        action='store_true',
        help='Disable removing classifier last layer (default=%(default)s)')
    parser.add_argument('--pretrained',
                        action='store_true',
                        help='Use pretrained backbone (default=%(default)s)')
    # training args
    parser.add_argument('--approach',
                        default='finetuning',
                        type=str,
                        choices=approach.__all__,
                        help='Learning approach used (default=%(default)s)',
                        metavar="APPROACH")
    parser.add_argument(
        '--nepochs',
        default=200,
        type=int,
        required=False,
        help='Number of epochs per training session (default=%(default)s)')
    parser.add_argument('--lr',
                        default=0.1,
                        type=float,
                        required=False,
                        help='Starting learning rate (default=%(default)s)')
    parser.add_argument('--lr-min',
                        default=1e-4,
                        type=float,
                        required=False,
                        help='Minimum learning rate (default=%(default)s)')
    parser.add_argument(
        '--lr-factor',
        default=3,
        type=float,
        required=False,
        help='Learning rate decreasing factor (default=%(default)s)')
    parser.add_argument(
        '--lr-patience',
        default=5,
        type=int,
        required=False,
        help=
        'Maximum patience to wait before decreasing learning rate (default=%(default)s)'
    )
    parser.add_argument('--clipping',
                        default=10000,
                        type=float,
                        required=False,
                        help='Clip gradient norm (default=%(default)s)')
    parser.add_argument('--momentum',
                        default=0.0,
                        type=float,
                        required=False,
                        help='Momentum factor (default=%(default)s)')
    parser.add_argument('--weight-decay',
                        default=0.0,
                        type=float,
                        required=False,
                        help='Weight decay (L2 penalty) (default=%(default)s)')
    parser.add_argument('--warmup-nepochs',
                        default=0,
                        type=int,
                        required=False,
                        help='Number of warm-up epochs (default=%(default)s)')
    parser.add_argument(
        '--warmup-lr-factor',
        default=1.0,
        type=float,
        required=False,
        help='Warm-up learning rate factor (default=%(default)s)')
    parser.add_argument(
        '--multi-softmax',
        action='store_true',
        help='Apply separate softmax for each task (default=%(default)s)')
    parser.add_argument(
        '--fix-bn',
        action='store_true',
        help='Fix batch normalization after first task (default=%(default)s)')
    parser.add_argument(
        '--eval-on-train',
        action='store_true',
        help='Show train loss and accuracy (default=%(default)s)')
    # gridsearch args
    parser.add_argument(
        '--gridsearch-tasks',
        default=-1,
        type=int,
        help=
        'Number of tasks to apply GridSearch (-1: all tasks) (default=%(default)s)'
    )

    # Args -- Incremental Learning Framework
    args, extra_args = parser.parse_known_args(argv)
    args.results_path = os.path.expanduser(args.results_path)
    base_kwargs = dict(nepochs=args.nepochs,
                       lr=args.lr,
                       lr_min=args.lr_min,
                       lr_factor=args.lr_factor,
                       lr_patience=args.lr_patience,
                       clipgrad=args.clipping,
                       momentum=args.momentum,
                       wd=args.weight_decay,
                       multi_softmax=args.multi_softmax,
                       wu_nepochs=args.warmup_nepochs,
                       wu_lr_factor=args.warmup_lr_factor,
                       fix_bn=args.fix_bn,
                       eval_on_train=args.eval_on_train)

    if args.no_cudnn_deterministic:
        print('WARNING: CUDNN Deterministic will be disabled.')
        utils.cudnn_deterministic = False

    utils.seed_everything(seed=args.seed)
    print('=' * 108)
    print('Arguments =')
    for arg in np.sort(list(vars(args).keys())):
        print('\t' + arg + ':', getattr(args, arg))
    print('=' * 108)

    # Args -- CUDA
    if torch.cuda.is_available():
        torch.cuda.set_device(args.gpu)
        device = 'cuda'
    else:
        print('WARNING: [CUDA unavailable] Using CPU instead!')
        device = 'cpu'
    # Multiple gpus
    # if torch.cuda.device_count() > 1:
    #     self.C = torch.nn.DataParallel(C)
    #     self.C.to(self.device)
    ####################################################################################################################

    # Args -- Network
    from networks.network import LLL_Net
    if args.network in tvmodels:  # torchvision models
        tvnet = getattr(importlib.import_module(name='torchvision.models'),
                        args.network)
        if args.network == 'googlenet':
            init_model = tvnet(pretrained=args.pretrained, aux_logits=False)
        else:
            init_model = tvnet(pretrained=args.pretrained)
        set_tvmodel_head_var(init_model)
    else:  # other models declared in networks package's init
        net = getattr(importlib.import_module(name='networks'), args.network)
        # WARNING: fixed to pretrained False for other model (non-torchvision)
        init_model = net(pretrained=False)

    # Args -- Continual Learning Approach
    from approach.incremental_learning import Inc_Learning_Appr
    Appr = getattr(importlib.import_module(name='approach.' + args.approach),
                   'Appr')
    assert issubclass(Appr, Inc_Learning_Appr)
    appr_args, extra_args = Appr.extra_parser(extra_args)
    print('Approach arguments =')
    for arg in np.sort(list(vars(appr_args).keys())):
        print('\t' + arg + ':', getattr(appr_args, arg))
    print('=' * 108)

    # Args -- Exemplars Management
    from datasets.exemplars_dataset import ExemplarsDataset
    Appr_ExemplarsDataset = Appr.exemplars_dataset_class()
    if Appr_ExemplarsDataset:
        assert issubclass(Appr_ExemplarsDataset, ExemplarsDataset)
        appr_exemplars_dataset_args, extra_args = Appr_ExemplarsDataset.extra_parser(
            extra_args)
        print('Exemplars dataset arguments =')
        for arg in np.sort(list(vars(appr_exemplars_dataset_args).keys())):
            print('\t' + arg + ':', getattr(appr_exemplars_dataset_args, arg))
        print('=' * 108)
    else:
        appr_exemplars_dataset_args = argparse.Namespace()

    # Args -- GridSearch
    if args.gridsearch_tasks > 0:
        from gridsearch import GridSearch
        gs_args, extra_args = GridSearch.extra_parser(extra_args)
        Appr_finetuning = getattr(
            importlib.import_module(name='approach.finetuning'), 'Appr')
        assert issubclass(Appr_finetuning, Inc_Learning_Appr)
        GridSearch_ExemplarsDataset = Appr.exemplars_dataset_class()
        print('GridSearch arguments =')
        for arg in np.sort(list(vars(gs_args).keys())):
            print('\t' + arg + ':', getattr(gs_args, arg))
        print('=' * 108)

    assert len(extra_args) == 0, "Unused args: {}".format(' '.join(extra_args))
    ####################################################################################################################

    # Log all arguments
    full_exp_name = reduce(
        (lambda x, y: x[0] + y[0]),
        args.datasets) if len(args.datasets) > 0 else args.datasets[0]
    full_exp_name += '_' + args.approach
    if args.exp_name is not None:
        full_exp_name += '_' + args.exp_name
    logger = MultiLogger(args.results_path,
                         full_exp_name,
                         loggers=args.log,
                         save_models=args.save_models)
    logger.log_args(
        argparse.Namespace(**args.__dict__, **appr_args.__dict__,
                           **appr_exemplars_dataset_args.__dict__))

    # Loaders
    utils.seed_everything(seed=args.seed)
    trn_loader, val_loader, tst_loader, taskcla = get_loaders(
        args.datasets,
        args.num_tasks,
        args.nc_first_task,
        args.batch_size,
        num_workers=args.num_workers,
        pin_memory=args.pin_memory)
    # Apply arguments for loaders
    if args.use_valid_only:
        tst_loader = val_loader
    max_task = len(taskcla) if args.stop_at_task == 0 else args.stop_at_task

    # Network and Approach instances
    utils.seed_everything(seed=args.seed)
    net = LLL_Net(init_model, remove_existing_head=not args.keep_existing_head)
    utils.seed_everything(seed=args.seed)
    # taking transformations and class indices from first train dataset
    first_train_ds = trn_loader[0].dataset
    transform, class_indices = first_train_ds.transform, first_train_ds.class_indices
    appr_kwargs = {**base_kwargs, **dict(logger=logger, **appr_args.__dict__)}
    if Appr_ExemplarsDataset:
        appr_kwargs['exemplars_dataset'] = Appr_ExemplarsDataset(
            transform, class_indices, **appr_exemplars_dataset_args.__dict__)
    utils.seed_everything(seed=args.seed)
    appr = Appr(net, device, **appr_kwargs)

    # GridSearch
    if args.gridsearch_tasks > 0:
        ft_kwargs = {
            **base_kwargs,
            **dict(logger=logger,
                   exemplars_dataset=GridSearch_ExemplarsDataset(
                       transform, class_indices))
        }
        appr_ft = Appr_finetuning(net, device, **ft_kwargs)
        gridsearch = GridSearch(appr_ft, args.seed, gs_args.gridsearch_config,
                                gs_args.gridsearch_acc_drop_thr,
                                gs_args.gridsearch_hparam_decay,
                                gs_args.gridsearch_max_num_searches)

    # Loop tasks
    print(taskcla)
    acc_taw = np.zeros((max_task, max_task))
    acc_tag = np.zeros((max_task, max_task))
    forg_taw = np.zeros((max_task, max_task))
    forg_tag = np.zeros((max_task, max_task))
    for t, (_, ncla) in enumerate(taskcla):
        # Early stop tasks if flag
        if t >= max_task:
            continue

        print('*' * 108)
        print('Task {:2d}'.format(t))
        print('*' * 108)

        # Add head for current task
        net.add_head(taskcla[t][1])
        net.to(device)

        # GridSearch
        if t < args.gridsearch_tasks:

            # Search for best finetuning learning rate -- Maximal Plasticity Search
            print('LR GridSearch')
            best_ft_acc, best_ft_lr = gridsearch.search_lr(
                appr.model, t, trn_loader[t], val_loader[t])
            # Apply to approach
            appr.lr = best_ft_lr
            gen_params = gridsearch.gs_config.get_params('general')
            for k, v in gen_params.items():
                if not isinstance(v, list):
                    setattr(appr, k, v)

            # Search for best forgetting/intransigence tradeoff -- Stability Decay
            print('Trade-off GridSearch')
            best_tradeoff, tradeoff_name = gridsearch.search_tradeoff(
                args.approach, appr, t, trn_loader[t], val_loader[t],
                best_ft_acc)
            # Apply to approach
            if tradeoff_name is not None:
                setattr(appr, tradeoff_name, best_tradeoff)

            print('-' * 108)

        # Train
        appr.train(t, trn_loader[t], val_loader[t])
        print('-' * 108)

        # Test
        for u in range(t + 1):
            test_loss, acc_taw[t, u], acc_tag[t,
                                              u] = appr.eval(u, tst_loader[u])
            if u < t:
                forg_taw[t, u] = acc_taw[:t, u].max(0) - acc_taw[t, u]
                forg_tag[t, u] = acc_tag[:t, u].max(0) - acc_tag[t, u]
            print(
                '>>> Test on task {:2d} : loss={:.3f} | TAw acc={:5.1f}%, forg={:5.1f}%'
                '| TAg acc={:5.1f}%, forg={:5.1f}% <<<'.format(
                    u, test_loss, 100 * acc_taw[t, u], 100 * forg_taw[t, u],
                    100 * acc_tag[t, u], 100 * forg_tag[t, u]))
            logger.log_scalar(task=t,
                              iter=u,
                              name='loss',
                              group='test',
                              value=test_loss)
            logger.log_scalar(task=t,
                              iter=u,
                              name='acc_taw',
                              group='test',
                              value=100 * acc_taw[t, u])
            logger.log_scalar(task=t,
                              iter=u,
                              name='acc_tag',
                              group='test',
                              value=100 * acc_tag[t, u])
            logger.log_scalar(task=t,
                              iter=u,
                              name='forg_taw',
                              group='test',
                              value=100 * forg_taw[t, u])
            logger.log_scalar(task=t,
                              iter=u,
                              name='forg_tag',
                              group='test',
                              value=100 * forg_tag[t, u])

        # Save
        print('Save at ' + os.path.join(args.results_path, full_exp_name))
        logger.log_result(acc_taw, name="acc_taw", step=t)
        logger.log_result(acc_tag, name="acc_tag", step=t)
        logger.log_result(forg_taw, name="forg_taw", step=t)
        logger.log_result(forg_tag, name="forg_tag", step=t)
        logger.save_model(net.state_dict(), task=t)
        logger.log_result(acc_taw.sum(1) /
                          np.tril(np.ones(acc_taw.shape[0])).sum(1),
                          name="avg_accs_taw",
                          step=t)
        logger.log_result(acc_tag.sum(1) /
                          np.tril(np.ones(acc_tag.shape[0])).sum(1),
                          name="avg_accs_tag",
                          step=t)
        aux = np.tril(
            np.repeat([[tdata[1] for tdata in taskcla[:max_task]]],
                      max_task,
                      axis=0))
        logger.log_result((acc_taw * aux).sum(1) / aux.sum(1),
                          name="wavg_accs_taw",
                          step=t)
        logger.log_result((acc_tag * aux).sum(1) / aux.sum(1),
                          name="wavg_accs_tag",
                          step=t)

        # Last layer analysis
        if args.last_layer_analysis:
            weights, biases = last_layer_analysis(net.heads,
                                                  t,
                                                  taskcla,
                                                  y_lim=True)
            logger.log_figure(name='weights', iter=t, figure=weights)
            logger.log_figure(name='bias', iter=t, figure=biases)

            # Output sorted weights and biases
            weights, biases = last_layer_analysis(net.heads,
                                                  t,
                                                  taskcla,
                                                  y_lim=True,
                                                  sort_weights=True)
            logger.log_figure(name='weights', iter=t, figure=weights)
            logger.log_figure(name='bias', iter=t, figure=biases)
    # Print Summary
    utils.print_summary(acc_taw, acc_tag, forg_taw, forg_tag)
    print('[Elapsed time = {:.1f} h]'.format(
        (time.time() - tstart) / (60 * 60)))
    print('Done!')

    return acc_taw, acc_tag, forg_taw, forg_tag, logger.exp_path
Esempio n. 18
0
def main():
    argparser = argparse.ArgumentParser()
    argparser.add_argument("mesh_filename", type=str, help="Point cloud to reconstruct")
    argparser.add_argument("radius", type=float, help="Patch radius (The parameter, r, in the paper)")
    argparser.add_argument("padding", type=float, help="Padding factor for patches (The parameter, c, in the paper)")
    argparser.add_argument("min_pts_per_patch", type=int,
                           help="Minimum number of allowed points inside a patch used to not fit to "
                                "patches with too little data")
    argparser.add_argument("--output", "-o", type=str, default="out",
                           help="Name for the output files: e.g. if you pass in --output out, the program will save "
                                "a dense upsampled point-cloud named out.ply, and a file containing reconstruction "
                                "metadata and model weights named out.pt. Default: out -- "
                                "Note: the number of points per patch in the upsampled point cloud is 64 by default "
                                "and can be set by specifying --upsamples-per-patch.")
    argparser.add_argument("--upsamples-per-patch", "-nup", type=int, default=8,
                           help="*Square root* of the number of upsamples per patch to generate in the output. i.e. if "
                                "you pass in --upsamples-per-patch 8, there will be 64 upsamples per patch.")
    argparser.add_argument("--angle-threshold", "-a", type=float, default=95.0,
                           help="Threshold (in degrees) used to discard points in "
                                "a patch whose normal is facing the wrong way.")
    argparser.add_argument("--local-epochs", "-nl", type=int, default=128,
                           help="Number of fitting iterations done for each chart to its points")
    argparser.add_argument("--global-epochs", "-ng", type=int, default=128,
                           help="Number of fitting iterations done to make each chart agree "
                                "with its neighboring charts")
    argparser.add_argument("--learning-rate", "-lr", type=float, default=1e-3,
                           help="Step size for gradient descent.")
    argparser.add_argument("--devices", "-d", type=str, default=["cuda"], nargs="+",
                           help="A list of devices on which to partition the models for each patch. For large inputs, "
                                "reconstruction can be memory and compute intensive. Passing in multiple devices will "
                                "split the load across these. E.g. --devices cuda:0 cuda:1 cuda:2")
    argparser.add_argument("--plot", action="store_true",
                           help="Plot the following intermediate states:. (1) patch neighborhoods, "
                                "(2) Intermediate reconstruction before global consistency step, "
                                "(3) Reconstruction after global consistency step. "
                                "This flag is useful for debugging but does not scale well to large inputs.")
    argparser.add_argument("--interpolate", action="store_true",
                           help="If set, then force all patches to agree with the input at overlapping points "
                                "(i.e. the reconstruction will try to interpolate the input point cloud). "
                                "Otherwise, we fit all patches to the average of overlapping patches at each point.")
    argparser.add_argument("--max-sinkhorn-iters", "-si", type=int, default=32,
                           help="Maximum number of Sinkhorn iterations")
    argparser.add_argument("--sinkhorn-epsilon", "-sl", type=float, default=1e-3,
                           help="The reciprocal (1/lambda) of the Sinkhorn regularization parameter.")
    argparser.add_argument("--seed", "-s", type=int, default=-1,
                           help="Random seed to use when initializing network weights. "
                                "If the seed not positive, a seed is selected at random.")
    argparser.add_argument("--exact-emd", "-e", action="store_true",
                           help="Use exact optimal transport distance instead of sinkhorn. "
                                "This will be slow and should not make a difference in the output")
    argparser.add_argument("--use-best", action="store_true",
                           help="Use the model with the lowest loss as the final result.")
    argparser.add_argument("--normal-neighborhood-size", "-ns", type=int, default=64,
                           help="Neighborhood size used to estimate the normals in the final dense point cloud. "
                                "Default: 64")
    argparser.add_argument("--save-pre-cc", action="store_true",
                           help="Save a copy of the model before the cycle consistency step")
    argparser.add_argument("--batch-size", type=int, default=-1, help="Split fitting MLPs into batches")
    args = argparser.parse_args()

    # We'll populate this dictionary and save it as output
    output_dict = {
        "pre_cycle_consistency_model": None,
        "final_model": None,
        "patch_uvs": None,
        "patch_idx": None,
        "patch_txs": None,
        "radius": args.radius,
        "padding": args.padding,
        "min_pts_per_patch": args.min_pts_per_patch,
        "angle_threshold": args.angle_threshold,
        "interpolate": args.interpolate,
        "global_epochs": args.global_epochs,
        "local_epochs": args.local_epochs,
        "learning_rate": args.learning_rate,
        "devices": args.devices,
        "sinkhorn_epsilon": args.sinkhorn_epsilon,
        "max_sinkhorn_iters": args.max_sinkhorn_iters,
        "seed": utils.seed_everything(args.seed),
        "batch_size": args.batch_size
    }

    # Read a point cloud and normals from a file, center it about its mean, and align it along its principle vectors
    x, n = utils.load_point_cloud_by_file_extension(args.mesh_filename, compute_normals=True)

    # Compute a set of neighborhood (patches) and a uv samples for each neighborhood. Store the result in a list
    # of pairs (uv_j, xi_j) where uv_j are 2D uv coordinates for the j^th patch, and xi_j are the indices into x of
    # the j^th patch. We will try to reconstruct a function phi, such that phi(uv_j) = x[xi_j].
    print("Computing neighborhoods...")
    bbox_diag = np.linalg.norm(np.max(x, axis=0) - np.min(x, axis=0))
    patch_idx, patch_uvs, patch_xs, patch_tx = compute_patches(x, n, args.radius*bbox_diag, args.padding,
                                                               angle_thresh=args.angle_threshold,
                                                               min_pts_per_patch=args.min_pts_per_patch)
    num_patches = len(patch_uvs)
    output_dict["patch_uvs"] = patch_uvs
    output_dict["patch_idx"] = patch_idx
    output_dict["patch_txs"] = patch_tx

    if args.plot:
        plot_patches(x, patch_idx)

    # Initialize one model per patch and convert the input data to a pytorch tensor
    print("Creating models...")
    if args.batch_size > 0:
        num_batches = int(np.ceil(num_patches / args.batch_size))
        batch_size = args.batch_size
        print("Splitting fitting into %d batches" % num_batches)
    else:
        num_batches = 1
        batch_size = num_patches
    phi = nn.ModuleList([MLP(2, 3) for i in range(num_patches)])
    # x = torch.from_numpy(x.astype(np.float32)).to(args.device)

    phi_optimizers = []
    phi_optimizers_devices = []
    uv_optimizer = torch.optim.Adam(patch_uvs, lr=args.learning_rate)
    sinkhorn_loss = SinkhornLoss(max_iters=args.max_sinkhorn_iters, return_transport_matrix=True)
    mse_loss = nn.MSELoss()

    # Fit a function, phi_i, for each patch so that phi_i(patch_uvs[i]) = x[patch_idx[i]]. i.e. so that the function
    # phi_i "agrees" with the point cloud on each patch.
    #
    # We also store the correspondences between the uvs and points which we use later for the consistency step. The
    # correspondences are stored in a list, pi where pi[i] is a vector of integers used to permute the points in
    # a patch.
    pi = [None for _ in range(num_patches)]

    # Cache model with the lowest loss if --use-best is passed
    best_models = [None for _ in range(num_patches)]
    best_losses = [np.inf for _ in range(num_patches)]

    print("Training local patches...")
    for b in range(num_batches):
        print("Fitting batch %d/%d" % (b + 1, num_batches))
        start_idx = b * batch_size
        end_idx = min((b + 1) * batch_size, num_patches)
        optimizer_batch = torch.optim.Adam(phi[start_idx:end_idx].parameters(), lr=args.learning_rate)
        phi_optimizers.append(optimizer_batch)
        for i in range(start_idx, end_idx):
            dev_i = args.devices[i % len(args.devices)]
            phi[i] = phi[i].to(dev_i)
            patch_uvs[i] = patch_uvs[i].to(dev_i)
            patch_xs[i] = patch_xs[i].to(dev_i)
            
        for epoch in range(args.local_epochs):
            optimizer_batch.zero_grad()
            uv_optimizer.zero_grad()

            # sum_loss = torch.tensor([0.0]).to(args.devices[0])
            losses = []
            torch.cuda.synchronize()
            epoch_start_time = time.time()
            for i in range(start_idx, end_idx):
                uv_i = patch_uvs[i]
                x_i = patch_xs[i]
                y_i = phi[i](uv_i)

                with torch.no_grad():
                    if args.exact_emd:
                        M_i = pairwise_distances(x_i.unsqueeze(0), y_i.unsqueeze(0)).squeeze().cpu().squeeze().numpy()
                        p_i = ot.emd(np.ones(x_i.shape[0]), np.ones(y_i.shape[0]), M_i)
                        p_i = torch.from_numpy(p_i.astype(np.float32)).to(args.devices[0])
                    else:
                        _, p_i = sinkhorn_loss(x_i.unsqueeze(0), y_i.unsqueeze(0))
                    pi_i = p_i.squeeze().max(0)[1]
                    pi[i] = pi_i

                loss_i = mse_loss(x_i[pi_i].unsqueeze(0), y_i.unsqueeze(0))

                if args.use_best and loss_i.item() < best_losses[i]:
                    best_losses[i] = loss_i.item()
                    model_copy = copy.deepcopy(phi[i]).to('cpu')
                    best_models[i] = copy.deepcopy(model_copy.state_dict())
                loss_i.backward()
                losses.append(loss_i)
                # sum_loss += loss_i.to(args.devices[0])

            # sum_loss.backward()
            sum_loss = sum([l.item() for l in losses])
            torch.cuda.synchronize()
            epoch_end_time = time.time()

            print("%d/%d: [Total = %0.5f] [Mean = %0.5f] [Time = %0.3f]" %
                  (epoch, args.local_epochs, sum_loss,
                   sum_loss / (end_idx - start_idx), epoch_end_time - epoch_start_time))
            optimizer_batch.step()
            uv_optimizer.step()
            
        for i in range(start_idx, end_idx):
            dev_i = 'cpu'
            phi[i] = phi[i].to(dev_i)
            patch_uvs[i] = patch_uvs[i].to(dev_i)
            patch_xs[i] = patch_xs[i].to(dev_i)
            pi[i] = pi[i].to(dev_i)
        optimizer_batch_devices = move_optimizer_to_device(optimizer_batch, 'cpu')
        phi_optimizers_devices.append(optimizer_batch_devices)
                    
        print("Done batch %d/%d" % (b + 1, num_batches))

    print("Mean best losses:", np.mean(best_losses[i]))
    
    if args.use_best:
        for i, phi_i in enumerate(phi):
            phi_i.load_state_dict(best_models[i])

    if args.save_pre_cc:
        output_dict["pre_cycle_consistency_model"] = copy.deepcopy(phi.state_dict())

    if args.plot:
        raise NotImplementedError("TODO: Fix plotting code")
        plot_reconstruction(x, patch_uvs, patch_tx, phi, scale=1.0/args.padding)

    # Do a second, global, stage of fitting where we ask all patches to agree with each other on overlapping points.
    # If the user passed --interpolate, we ask that the patches agree on the original input points, otherwise we ask
    # that they agree on the average of predictions from patches overlapping a given point.
    if not args.interpolate:
        print("Computing patch means...")
        with torch.no_grad():
            patch_xs = patch_means(pi, patch_uvs, patch_idx, patch_tx, phi, x, args.devices, num_batches)

    print("Training cycle consistency...")
    for b in range(num_batches):
        print("Fitting batch %d/%d" % (b + 1, num_batches))
        start_idx = b * batch_size
        end_idx = min((b + 1) * batch_size, num_patches)
        for i in range(start_idx, end_idx):
            dev_i = args.devices[i % len(args.devices)]
            phi[i] = phi[i].to(dev_i)
            patch_uvs[i] = patch_uvs[i].to(dev_i)
            patch_xs[i] = patch_xs[i].to(dev_i)
            pi[i] = pi[i].to(dev_i)
        optimizer = phi_optimizers[b]
        move_optimizer_to_device(optimizer, phi_optimizers_devices[b])
        for epoch in range(args.global_epochs):
            optimizer.zero_grad()
            uv_optimizer.zero_grad()

            sum_loss = torch.tensor([0.0]).to(args.devices[0])
            epoch_start_time = time.time()
            for i in range(start_idx, end_idx):
                uv_i = patch_uvs[i]
                x_i = patch_xs[i]
                y_i = phi[i](uv_i)
                pi_i = pi[i]
                loss_i = mse_loss(x_i[pi_i].unsqueeze(0), y_i.unsqueeze(0))

                if loss_i.item() < best_losses[i]:
                    best_losses[i] = loss_i.item()
                    model_copy = copy.deepcopy(phi[i]).to('cpu')
                    best_models[i] = copy.deepcopy(model_copy.state_dict())

                sum_loss += loss_i.to(args.devices[0])

            sum_loss.backward()
            epoch_end_time = time.time()

            print("%d/%d: [Total = %0.5f] [Mean = %0.5f] [Time = %0.3f]" %
                  (epoch, args.global_epochs, sum_loss.item(),
                   sum_loss.item() / (end_idx - start_idx), epoch_end_time-epoch_start_time))
            optimizer.step()
            uv_optimizer.step()
        for i in range(start_idx, end_idx):
            dev_i = 'cpu'
            phi[i] = phi[i].to(dev_i)
            patch_uvs[i] = patch_uvs[i].to(dev_i)
            patch_xs[i] = patch_xs[i].to(dev_i)
            pi[i] = pi[i].to(dev_i)
        move_optimizer_to_device(optimizer, 'cpu')
                    
    print("Mean best losses:", np.mean(best_losses[i]))
    for i, phi_i in enumerate(phi):
        phi_i.load_state_dict(best_models[i])

    output_dict["final_model"] = phi.state_dict()

    print("Generating dense point cloud...")
    v, n = upsample_surface(patch_uvs, patch_tx, phi, args.devices,
                            scale=(1.0/args.padding),
                            num_samples=args.upsamples_per_patch,
                            normal_samples=args.normal_neighborhood_size,
                            num_batches=num_batches,
                            compute_normals=False)

    print("Saving dense point cloud...")
    pcu.write_ply(args.output + ".ply", v, np.zeros([], dtype=np.int32), n, np.zeros([], dtype=v.dtype))

    print("Saving metadata...")
    torch.save(output_dict, args.output + ".pt")

    if args.plot:
        plot_reconstruction(x, patch_uvs, patch_tx, phi, scale=1.0/args.padding)
Esempio n. 19
0
def main():

    ########################################################################
    ######################## training parameters ###########################
    ########################################################################

    parser = argparse.ArgumentParser()
    parser.add_argument('--dataset',
                        type=str,
                        default='ImageNet',
                        metavar='N',
                        help='dataset to run experiments on')
    parser.add_argument(
        '--batch_size',
        type=int,
        default=256,
        metavar='N',
        help=
        'input batch size for training (default: 256; note that batch_size 64 gives worse performance for imagenet, so don\'t change this. )'
    )
    parser.add_argument('--exp',
                        type=str,
                        default='default',
                        metavar='N',
                        help='name of experiment')
    parser.add_argument('--logits_exp',
                        type=str,
                        default='default',
                        metavar='N',
                        help='name of experiment containing logits')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed',
                        type=int,
                        default=5021,
                        metavar='S',
                        help='random seed (default: 5021)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.1,
                        metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--weight_decay',
                        type=float,
                        default=5 * 1e-4,
                        help='weight_decay (default: 1e-5)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--step_size',
                        type=float,
                        default=30,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--gamma',
                        type=float,
                        default=0.1,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument(
        '--log-interval',
        type=int,
        default=10,
        metavar='N',
        help='how many batches to wait before logging training status')
    parser.add_argument(
        '--stopping_criterion',
        type=int,
        default=30,
        metavar='N',
    )
    parser.add_argument('--test',
                        action='store_true',
                        default=False,
                        help='test mode')
    parser.add_argument('--load_model',
                        type=str,
                        default=None,
                        help='model to initialise from')

    args = parser.parse_args()

    print("\n==================Options=================")
    pprint(vars(args), indent=4)
    print("==========================================\n")

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    # make everything deterministic, reproducible
    if (args.seed is not None):
        print('Seeding everything with seed {}.'.format(args.seed))
        seed_everything(args.seed)
    else:
        print('Note : Seed is random.')

    device = torch.device("cuda" if use_cuda else "cpu")

    exp_dir = os.path.join('checkpoint', args.exp)
    if not os.path.isdir(exp_dir):
        os.makedirs(exp_dir)

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    dataset = args.dataset
    num_classes = 1000 if dataset.lower() == 'imagenet' else 365

    ########################################################################
    ########################         load data  		####################
    ########################################################################

    datadir = './checkpoint/{}'.format(args.logits_exp)

    if (not args.test):
        data_manyshot = torch.load('{}/results_val_manyshot.pickle'.format(
            datadir))  # for experts with reject option
        data_mediumshot = torch.load('{}/results_val_mediumshot.pickle'.format(
            datadir))  # for experts with reject option
        data_fewshot = torch.load('{}/results_val_fewshot.pickle'.format(
            datadir))  # for experts with reject option

    else:
        data_manyshot = torch.load(
            '{}/results_test_aligned_manyshot.pickle'.format(
                datadir))  # for experts with reject option
        data_mediumshot = torch.load(
            '{}/results_test_aligned_mediumshot.pickle'.format(
                datadir))  # for experts with reject option
        data_fewshot = torch.load(
            '{}/results_test_aligned_fewshot.pickle'.format(
                datadir))  # for experts with reject option
        data_general = torch.load(
            '{}/results_test_aligned_general.pickle'.format(dataset.lower()))

    manyshot_logits = data_manyshot['logits'].clone().detach()
    mediumshot_logits = data_mediumshot['logits'].clone().detach()
    fewshot_logits = data_fewshot['logits'].clone().detach()
    labels = data_manyshot['labels'] if not args.test else data_general[
        'labels']

    manyshotClassMask, mediumshotClassMask, fewshotClassMask = data_manyshot[
        'class_mask'], data_mediumshot['class_mask'], data_fewshot[
            'class_mask']

    # logit tuning to correct for open set sampling ratio
    if (dataset.lower() == 'imagenet'):
        manyshot_logits[:, -1] = manyshot_logits[:, -1] - np.log(2 / (1 + 16))
        mediumshot_logits[:,
                          -1] = mediumshot_logits[:, -1] - np.log(2 / (1 + 16))
        fewshot_logits[:, -1] = fewshot_logits[:, -1] - np.log(2 / (1 + 16))

    else:
        manyshot_logits[:, -1] = manyshot_logits[:, -1] - np.log(2 / (1 + 16))
        mediumshot_logits[:,
                          -1] = mediumshot_logits[:, -1] - np.log(2 / (1 + 8))
        fewshot_logits[:, -1] = fewshot_logits[:, -1] - np.log(2 / (1 + 8))

    manyshot_features = manyshot_logits.data.cpu().numpy()
    mediumshot_features = mediumshot_logits.data.cpu().numpy()
    fewshot_features = fewshot_logits.data.cpu().numpy()
    labels = labels.data.cpu().numpy()

    if (not args.test):
        train_loader = torch.utils.data.DataLoader(Calibration_Dataset(
            orig_txt='./data/{}_LT/{}_LT_train.txt'.format(
                args.dataset, args.dataset),
            manyshot_features=manyshot_features,
            mediumshot_features=mediumshot_features,
            fewshot_features=fewshot_features,
            labels=labels),
                                                   batch_size=args.batch_size,
                                                   shuffle=True,
                                                   **kwargs)
    else:
        test_loader = torch.utils.data.DataLoader(
            Calibration_Dataset(orig_txt='./data/{}_LT/{}_LT_train.txt'.format(
                args.dataset, args.dataset),
                                manyshot_features=manyshot_features,
                                mediumshot_features=mediumshot_features,
                                fewshot_features=fewshot_features,
                                labels=labels),
            batch_size=args.batch_size,
            shuffle=False,
            **kwargs)  # dont shuffle test set as usual

    ########################################################################
    ######################## initialise model and optimizer ################
    ########################################################################

    model = CalibrateExperts(args.dataset.lower(), manyshotClassMask,
                             mediumshotClassMask, fewshotClassMask).cuda()
    optimizer = torch.optim.SGD(model.parameters(),
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.StepLR(optimizer,
                                          step_size=args.step_size,
                                          gamma=args.gamma)
    print(
        'Using StepLR scheduler with params, stepsize : {}, gamma : {}'.format(
            args.step_size, args.gamma))

    if (args.test):
        pretrained_model = torch.load(args.load_model)
        weights = pretrained_model['state_dict_best']['model']
        weights = {
            k:
            weights['module.' + k] if 'module.' + k in weights else weights[k]
            for k in model.state_dict()
        }
        model.load_state_dict(weights)  # loading model weights
        print('Loaded pretrained model.')

    ########################################################################
    ######################## training with early stopping ##################
    ########################################################################

    if (not args.test):

        results = vars(args)
        results['train_losses'], results['train_accuracies'] = [], []
        best_acc, best_epoch = 0, 0

        epoch = 1
        while (True):

            sys.stdout.flush()

            train_loss, train_acc = train(args, model, device, train_loader,
                                          optimizer, scheduler, epoch)

            results['train_losses'].append(train_loss)
            results['train_accuracies'].append(train_acc)

            if (train_acc > best_acc):
                best_acc = train_acc
                best_epoch = epoch
                results['best_acc'], results[
                    'best_epoch'] = best_acc, best_epoch

                # save best model
                best_model_weights = {}
                best_model_weights['model'] = copy.deepcopy(model.state_dict())
                model_states = {
                    'epoch': epoch,
                    'best_epoch': best_epoch,
                    'state_dict_best': best_model_weights,
                    'best_acc': best_acc,
                }
                torch.save(model_states, os.path.join(exp_dir,
                                                      "best_model.pt"))

            elif (epoch > best_epoch + args.stopping_criterion):
                print('Best model obtained. Error : ', best_acc)
                plot_curves(results, exp_dir)  # plot
                break

            savepath = os.path.join(exp_dir, 'results.pickle')
            with open(savepath, 'wb') as f:
                pickle.dump(results, f)
            plot_curves(results, exp_dir)  # plot
            epoch = epoch + 1

    ########################################################################
    ########################        testing         ########################
    ########################################################################

    else:

        loss, acc, preds = test(args, model, device, test_loader)

        if (dataset == 'ImageNet'):
            split_ranges = {
                'manyshot': [0, 19550],
                'medianshot': [19550, 43200],
                'fewshot': [43200, 50000],
                'all': [0, 50000]
            }  # imagenet
        else:
            split_ranges = {
                'manyshot': [0, 13200],
                'medianshot': [13200, 29400],
                'fewshot': [29400, 36500],
                'all': [0, 36500]
            }  # places

        for split_name, split_range in split_ranges.items():

            gt_target = torch.from_numpy(
                labels[int(split_range[0]):int(split_range[1])]).cuda()
            split_preds = preds[int(split_range[0]):int(split_range[1])]

            correct = split_preds.eq(
                gt_target.view_as(split_preds)).sum().item()
            accuracy = 100 * (correct / (split_range[1] - split_range[0]))

            print('{} accuracy : {:.2f}'.format(split_name, accuracy))
Esempio n. 20
0
def run_single_nn(cfg,
                  train,
                  test,
                  folds,
                  num_features,
                  cat_features,
                  target,
                  device,
                  logger,
                  fold_num=0,
                  seed=7):

    # Set seed
    logger.info(f'Set seed {seed}')
    seed_everything(seed=seed)

    # loader
    trn_idx = folds[folds['fold'] != fold_num].index
    val_idx = folds[folds['fold'] == fold_num].index
    train_folds = train.loc[trn_idx].reset_index(drop=True)
    valid_folds = train.loc[val_idx].reset_index(drop=True)
    train_target = target[trn_idx]
    valid_target = target[val_idx]
    train_dataset = TrainDataset(train_folds, num_features, cat_features,
                                 train_target)
    valid_dataset = TrainDataset(valid_folds, num_features, cat_features,
                                 valid_target)
    train_loader = DataLoader(train_dataset,
                              batch_size=cfg.batch_size,
                              shuffle=True,
                              num_workers=4,
                              pin_memory=True,
                              drop_last=True)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=cfg.batch_size,
                              shuffle=False,
                              num_workers=4,
                              pin_memory=True,
                              drop_last=False)

    # model
    if cfg.ex_name == "baseline":
        model = TabularNN(cfg)
    if "add_cate_x" in cfg.ex_name:
        model = TabularNNV2(cfg)
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=cfg.lr,
                                 weight_decay=cfg.weight_decay)
    scheduler = torch.optim.lr_scheduler.OneCycleLR(
        optimizer=optimizer,
        pct_start=0.1,
        div_factor=1e3,
        max_lr=1e-2,
        epochs=cfg.epochs,
        steps_per_epoch=len(train_loader))
    if "ema" in cfg.ex_name:
        ema = ExponentialMovingAverage(model.parameters(), decay=0.995)
    else:
        ema = None

    # log
    log_df = pd.DataFrame(columns=(['EPOCH'] + ['TRAIN_LOSS'] +
                                   ['VALID_LOSS']))

    # train & validate
    best_loss = np.inf
    for epoch in range(cfg.epochs):
        train_loss = train_fn(train_loader, model, optimizer, scheduler,
                              device, ema)
        valid_loss, val_preds = validate_fn(valid_loader, model, device)
        log_row = {
            'EPOCH': epoch,
            'TRAIN_LOSS': train_loss,
            'VALID_LOSS': valid_loss,
        }
        log_df = log_df.append(pd.DataFrame(log_row, index=[0]), sort=False)
        # logger.info(log_df.tail(1))
        if valid_loss < best_loss:
            logger.info(f'epoch{epoch} save best model... {valid_loss}')
            best_loss = valid_loss
            oof = np.zeros((len(train), len(cfg.target_cols)))
            oof[val_idx] = val_preds
            if ema is not None:
                ema.copy_to(model.parameters())
            torch.save(
                model.state_dict(),
                os.path.join(cfg.ex_name, f"fold{fold_num}_seed{seed}.pth"))

    # predictions
    test_dataset = TestDataset(test, num_features, cat_features)
    test_loader = DataLoader(test_dataset,
                             batch_size=cfg.batch_size,
                             shuffle=False,
                             num_workers=4,
                             pin_memory=True)
    if cfg.ex_name == "baseline":
        model = TabularNN(cfg)
    if "add_cate_x" in cfg.ex_name:
        model = TabularNNV2(cfg)
    model.load_state_dict(
        torch.load(os.path.join(cfg.ex_name,
                                f"fold{fold_num}_seed{seed}.pth")))
    model.to(device)
    predictions = inference_fn(test_loader, model, device)

    # del
    torch.cuda.empty_cache()

    return oof, predictions
Esempio n. 21
0
 def __init__(self, inp, out,SEED,bias=True):
     super().__init__()
     seed_everything(SEED)#seed_everythingをあることでmodelの初期値が固定されるはず
     self.encoder = nn.Linear(inp, out, bias=bias)
Esempio n. 22
0
                temp_mask.append(mask)

            oms = np.array(temp_mask)

            oms = oms.reshape([oms.shape[0], size * size]).astype(int)
            preds_array = np.vstack((preds_array, oms))

            file_name_list.append([i['file_name'] for i in image_infos])
        print("End prediction.")
        file_names = [y for x in file_name_list for y in x]

    return file_names, preds_array


if __name__ == '__main__':
    seed_everything(21)
    # best model 저장된 경로
    model_path = './saved/psudo_test_best_moiu.pt'
    # submission저장
    output_file = "./submission/psudo_test_best_miou.csv"
    dataset_path = '../../input/data'
    test_path = dataset_path + '/test.json'
    batch_size = 16  # Mini-batch size

    # 모델
    # model = DeepLabV3_vgg16pretrained(
    #     n_classes=12, n_blocks=[3, 4, 23, 3], atrous_rates=[6, 12, 18, 24])
    model = get_smp_model('FPN', 'efficientnet-b0')
    device = "cuda" if torch.cuda.is_available() else "cpu"

    category_names = [
Esempio n. 23
0
    dataset_test = SurgicalDataset(data_root=args.data_root,
                                   seq_set=[4, 7],
                                   is_train=False)
    test_loader = DataLoader(dataset=dataset_test,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=2,
                             drop_last=True)

    print('Sample size of test dataset:', dataset_test.__len__())
    model = SalSegNet(num_classes=args.num_classes).to(device)
    model = torch.nn.parallel.DataParallel(model)
    model.load_state_dict(torch.load('epoch_75.pth.tar'))
    dices_per_class, SIM_SCORE, EMD_SCORE = validate(test_loader, model, args)
    print(
        'Mean Avg Dice:%.4f [Bipolar Forceps:%.4f, Prograsp Forceps:%.4f, Large Needle Driver:%.4f, Vessel Sealer:%.4f]'
        % (dices_per_class[:4].mean(), dices_per_class[0], dices_per_class[1],
           dices_per_class[2], dices_per_class[3]))
    print('Saliency Metrics: SIM:%.4f, EMD:%.4f' %
          (np.mean(SIM_SCORE), np.mean(EMD_SCORE)))


if __name__ == '__main__':
    class_names = [
        "Bipolar Forceps", "Prograsp Forceps", "Large Needle Driver",
        "Vessel Sealer", "Grasping Retractor", "Monopolar Curve, Scissors",
        "Other"
    ]
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    seed_everything()
    main()
Esempio n. 24
0
def main():
    
    seed_everything(seed_value=42)
    cfg = Config()
    
    data_dir = '../../data'
    save_path = './'
    load_path = './'
    runty = 'traineval'
    assert runty == 'traineval' or runty == 'eval',  \
        "Run type is wrong. Should be 'traineval' or 'eval'"
    
    train_features = pd.read_csv(os.path.join(data_dir, 'train_features.csv'))
    train_targets_scored = pd.read_csv(os.path.join(data_dir, 'train_targets_scored.csv'))
    train_targets_nonscored = pd.read_csv(os.path.join(data_dir, 'train_targets_nonscored.csv'))
    train_drug = pd.read_csv(os.path.join(data_dir, 'train_drug.csv'))
    test_features = pd.read_csv(os.path.join(data_dir, 'test_features.csv'))
    submission = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv'))

    train_features, test_features = rankGauss(
        train_features=train_features, test_features=test_features)

    train, test, targets_scored, targets_nonscored =   \
        process(train_features=train_features, test_features=test_features, 
                train_targets_scored=train_targets_scored,
                train_targets_nonscored=train_targets_nonscored,
                train_drug=train_drug, runty=runty, save_path=save_path, 
                load_path=load_path)
    
    target_cols = [x for x in train_targets_scored.columns if x != 'sig_id']
    
    train = make_cv_folds(train, cfg.seeds, cfg.nfolds, cfg.drug_thresh, target_cols)
    
    oof = np.zeros((len(train), len(target_cols)))
    predictions = np.zeros((len(test), len(target_cols)))
    
    trte = train_test(train, test, targets_scored, targets_nonscored, save_path, load_path, runty)
    
    time_begin = time()
    
    for seed in cfg.seeds:
        if (runty == 'traineval'):
            oof_, predictions_ = trte.run_k_fold(seed)
            oof += oof_ / len(cfg.seeds)
            predictions += predictions_ / len(cfg.seeds)
        elif (runty == 'eval'):
            predictions_ = trte.run_k_fold(seed)
            predictions += predictions_ / len(cfg.seeds)
        
    time_diff = time() - time_begin
    
    train[target_cols] = oof
    test[target_cols] = predictions
    
    valid_results = train_targets_scored.drop(columns=target_cols).merge(
        train[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)

    y_true = train_targets_scored[target_cols].values
    y_pred = valid_results[target_cols].values

    if (runty == 'traineval'):
        score = 0
        for i in range(len(target_cols)):
            score += log_loss(y_true[:, i], y_pred[:, i])

        print("CV log_loss: ", score / y_pred.shape[1])
    
    sub = submission.drop(columns=target_cols).merge(test[['sig_id']+target_cols], on='sig_id', how='left').fillna(0)
    sub.to_csv('submission.csv', index=False)
Esempio n. 25
0
def main():

    seed_everything(seed_value=42)
    cfg = Config()

    data_dir = '../../data'
    save_path = './'
    load_path = './'
    runty = 'traineval'
    assert runty == 'traineval' or runty == 'eval',  \
        "Run type is wrong. Should be 'traineval' or 'eval'"

    train_features = pd.read_csv(os.path.join(data_dir, 'train_features.csv'))
    train_targets_scored = pd.read_csv(
        os.path.join(data_dir, 'train_targets_scored.csv'))
    train_targets_nonscored = pd.read_csv(
        os.path.join(data_dir, 'train_targets_nonscored.csv'))
    test_features = pd.read_csv(os.path.join(data_dir, 'test_features.csv'))
    submission = pd.read_csv(os.path.join(data_dir, 'sample_submission.csv'))

    train_features2 = train_features.copy()
    test_features2 = test_features.copy()

    if (runty == 'traineval'):
        test_features_private = test_features.copy()
    elif (runty == 'eval'):
        test_features_private = pd.read_csv(
            os.path.join(data_dir, 'test_features_private_fake.csv'))

    test_features_private2 = test_features_private.copy()

    train_featurs, test_features, test_features_private =  \
        rankGauss(train_features=train_features, test_features=test_features,
                  test_features_p=test_features_private, runty=runty)

    train_features, test_features, test_features_private, train_pca, test_pca, test_pca_p =    \
        _pca(train_features=train_features, test_features=test_features,
             runty=runty, test_features_private=test_features_private,
             ncomp_g=cfg.ncomp_g, ncomp_c=cfg.ncomp_c)

    train_features, test_features, test_features_private =   \
        _pca_select(train_features, test_features, test_features_private)

    train_features, test_features, test_features_private =   \
        fe_cluster_all(train_features=train_features, test_features=test_features,
                       test_features_private=test_features_private,
                       train_features2=train_features2, test_features2=test_features2,
                       test_features_private2=test_features_private2,
                       train_pca=train_pca, test_pca=test_pca, test_pca_p=test_pca_p)

    if (runty == 'traineval'):
        train, test, target = process(train_features, test_features,
                                      train_targets_scored)
    elif (runty == 'eval'):
        train, test, target = process(train_features, test_features_private,
                                      train_targets_scored)

    folds = train.copy()

    target_cols = target.drop('sig_id', axis=1).columns.values.tolist()

    oof = np.zeros((len(train), len(target_cols)))
    predictions = np.zeros((len(test), len(target_cols)))

    for seed in cfg.seeds:
        mskf = MultilabelStratifiedKFold(n_splits=cfg.nfolds,
                                         shuffle=True,
                                         random_state=seed)
        for fold, (t_idx, v_idx) in enumerate(mskf.split(X=train, y=target)):
            folds.loc[v_idx, 'kfold'] = int(fold)
        folds['kfold'] = folds['kfold'].astype(int)

        trte = train_test(folds,
                          test,
                          target,
                          save_path,
                          load_path,
                          runty=runty)

        if (runty == 'train'):
            oof_ = trte.run_k_fold(seed)
            oof += oof_ / len(cfg.seeds)
        elif (runty == 'eval'):
            predictions_ = trte.run_k_fold(seed)
            predictions += predictions_ / len(cfg.seeds)
        elif (runty == 'traineval'):
            oof_, predictions_ = trte.run_k_fold(seed)
            oof += oof_ / len(cfg.seeds)
            predictions += predictions_ / len(cfg.seeds)

        # oof_, predictions_ = trte.run_k_fold(seed)
        # oof += oof_ / len(cfg.seed)
        # predictions += predictions_ / len(cfg.seed)

    if (runty == 'train'):
        train[target_cols] = oof
        valid_results = train_targets_scored.drop(columns=target_cols).merge(
            train[['sig_id'] + target_cols], on='sig_id', how='left').fillna(0)

        y_true = train_targets_scored[target_cols].values
        y_pred = valid_results[target_cols].values

        score = 0
        for i in range(len(target_cols)):
            score_ = log_loss(y_true[:, i], y_pred[:, i])
            score += score_ / (target.shape[1] - 1)

        print("CV log_loss: ", score)

    elif (runty == 'eval'):
        test[target_cols] = predictions

        sub = submission.drop(columns=target_cols).merge(test[['sig_id'] +
                                                              target_cols],
                                                         on='sig_id',
                                                         how='left').fillna(0)

        # clip the submission
        # sub_c = sub_clip(sub, test_features)
        # sub_c.to_csv('submission.csv', index=False)

        sub.to_csv('submission.csv', index=False)

    elif (runty == 'traineval'):
        train[target_cols] = oof
        valid_results = train_targets_scored.drop(columns=target_cols).merge(
            train[['sig_id'] + target_cols], on='sig_id', how='left').fillna(0)

        y_true = train_targets_scored[target_cols].values
        y_pred = valid_results[target_cols].values

        score = 0
        for i in range(len(target_cols)):
            score_ = log_loss(y_true[:, i], y_pred[:, i])
            score += score_ / (target.shape[1] - 1)

        print("CV log_loss: ", score)

        test[target_cols] = predictions

        sub = submission.drop(columns=target_cols).merge(test[['sig_id'] +
                                                              target_cols],
                                                         on='sig_id',
                                                         how='left').fillna(0)

        # clip the submission
        # sub_c = sub_clip(sub, test_features)
        # sub_c.to_csv('submission.csv', index=False)

        sub.to_csv('submission.csv', index=False)
import numpy as np

import data
import metrics
import opts
import query_methods
import trainer
import util_classes
import utils

if __name__ == '__main__':
    global opt, best_prec1
    parser = opts.myargparser()
    opt = parser.parse_args()
    utils.opt_assert(opt=opt)
    utils.seed_everything(seed=opt.seed)

    print(opt)

    if not os.path.exists(opt.logpath + opt.exp_name + '/logger/'):
        os.makedirs(opt.logpath + opt.exp_name + '/logger/')
    logger = utils.get_logger(opt.logpath + opt.exp_name + '/logger/')
    logger.debug(f"==> Starting experiment..")

    logger.debug(f"==> Initializing data..")
    dset = data.TextData(opt)
    num_acq, num_del = int(opt.num_acquise_percent * opt.num_points), 0
    if opt.num_delete_percent is not None:
        num_del = int(opt.num_delete_percent * opt.num_points)

    logger.debug(f"==> Initializing data loggers..")
Esempio n. 27
0
def pytorch_model_run_cv(x_train,
                         y_train,
                         y_train_lin,
                         features,
                         test_features,
                         x_test,
                         model_obj,
                         params,
                         feats=False,
                         clip=True):
    seed_everything()
    avg_losses_f = []
    avg_val_losses_f = []

    x_test_cuda = torch.tensor(x_test, dtype=torch.long)
    test = torch.utils.data.TensorDataset(x_test_cuda)
    test_loader = torch.utils.data.DataLoader(test,
                                              batch_size=params.batch_size,
                                              shuffle=False)

    splits = list(
        StratifiedKFold(n_splits=params.n_splits,
                        shuffle=True,
                        random_state=params.SEED).split(x_train, y_train_lin))
    for i, (train_idx, valid_idx) in enumerate(splits):
        seed_everything(i * 1000 + i)
        x_train = np.array(x_train)
        y_train = np.array(y_train)

        if feats:
            features = np.array(features)
        x_train_fold = torch.tensor(x_train[train_idx.astype(int)],
                                    dtype=torch.long)
        y_train_fold = torch.tensor(y_train[train_idx.astype(int)],
                                    dtype=torch.float32)
        if feats:
            kfold_X_features = features[train_idx.astype(int)]
            kfold_X_valid_features = features[valid_idx.astype(int)]
        x_val_fold = torch.tensor(x_train[valid_idx.astype(int)],
                                  dtype=torch.long)
        y_val_fold = torch.tensor(y_train[valid_idx.astype(int)],
                                  dtype=torch.float32)

        model = copy.deepcopy(model_obj)

        model

        loss_fn = torch.nn.BCEWithLogitsLoss(reduction='sum')

        step_size = 300
        base_lr, max_lr = 0.001, 0.003
        optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                            model.parameters()),
                                     lr=max_lr)

        scheduler = CyclicLR(optimizer,
                             base_lr=base_lr,
                             max_lr=max_lr,
                             step_size=step_size,
                             mode='exp_range',
                             gamma=0.99994)

        train = MyDataset(
            torch.utils.data.TensorDataset(x_train_fold, y_train_fold))
        valid = MyDataset(
            torch.utils.data.TensorDataset(x_val_fold, y_val_fold))

        train_loader = torch.utils.data.DataLoader(
            train, batch_size=params.batch_size, shuffle=True)
        valid_loader = torch.utils.data.DataLoader(
            valid, batch_size=params.batch_size, shuffle=False)

        print(f'Fold {i + 1}')
        for epoch in range(params.n_epochs):
            start_time = time.time()
            model.train()

            avg_loss = 0.
            for i, (x_batch, y_batch, index) in enumerate(train_loader):
                if feats:
                    f = kfold_X_features[index]
                    y_pred = model([x_batch, f])
                else:
                    y_pred = model(x_batch)

                if scheduler:
                    scheduler.batch_step()

                # Compute and print loss.
                loss = loss_fn(y_pred, y_batch)
                optimizer.zero_grad()
                loss.backward()
                if clip:
                    nn.utils.clip_grad_norm_(model.parameters(), 1)
                optimizer.step()
                avg_loss += loss.item() / len(train_loader)

            model.eval()
            # valid_preds_fold = np.zeros(x_val_fold.size(0))
            # test_preds_fold = np.zeros(len(x_test))

            avg_val_loss = 0.
            for i, (x_batch, y_batch, index) in enumerate(valid_loader):
                if feats:
                    f = kfold_X_valid_features[index]
                    y_pred = model([x_batch, f]).detach()
                else:
                    y_pred = model(x_batch).detach()

                avg_val_loss += loss_fn(y_pred,
                                        y_batch).item() / len(valid_loader)
                # valid_preds_fold[index] = y_pred.cpu().numpy()

            elapsed_time = time.time() - start_time
            print(
                'Epoch {}/{} \t loss={:.4f} \t val_loss={:.4f} \t time={:.2f}s'
                .format(epoch + 1, params.n_epochs, avg_loss, avg_val_loss,
                        elapsed_time))

        avg_losses_f.append(avg_loss)
        avg_val_losses_f.append(avg_val_loss)

    test_preds = False
    # predict all samples in the test set batch per batch
    for i, (x_batch, ) in enumerate(test_loader):
        if feats:
            f = test_features[i * params.batch_size:(i + 1) *
                              params.batch_size]
            y_pred = model([x_batch, f]).detach()
        else:
            y_pred = model(x_batch).detach()

        if test_preds is False:
            test_preds = y_pred.cpu().numpy()
        else:
            test_preds = np.append(test_preds, y_pred.cpu().numpy(), axis=0)

    print('All \t loss={:.4f} \t val_loss={:.4f} \t '.format(
        np.average(avg_losses_f), np.average(avg_val_losses_f)))
    return test_preds
Esempio n. 28
0
    return acc


def save_as_feather(feat, suffix, X_train, X_test):
    X_train[[feat]].reset_index(
        drop=True).to_feather(f'{FEATURE_DIR}/{feat}_{suffix}_train.feather')
    X_test[[feat]].reset_index(
        drop=True).to_feather(f'{FEATURE_DIR}/{feat}_{suffix}_test.feather')
    return


if __name__ == "__main__":

    t = Timer()
    with t.timer(f'fix seed RANDOM_STATE:{RANDOM_STATE}'):
        seed_everything(RANDOM_STATE)

    with t.timer(f'read label'):
        data_path = f'{INPUT_DIR}/train_data/train_task_1_2.csv'
        y_train = pd.read_csv(data_path, usecols=['IsCorrect', 'AnswerValue'])
        y_train_t1 = y_train['IsCorrect'].values
        y_train_t2 = (y_train['AnswerValue'] - 1).values  # starting at zero

    with t.timer(f'apply mms'):
        for feat in dense_features:
            if os.path.exists(f'{FEATURE_DIR}/{feat}_mms_train.feather'):
                continue
            # MMS
            f_train = pd.read_feather(f'{FEATURE_DIR}/{feat}_train.feather')
            f_test = pd.read_feather(f'{FEATURE_DIR}/{feat}_test.feather')
            tmp = pd.concat([f_train[feat], f_test[feat]])
from scipy.sparse import csr_matrix
from sklearn.metrics import mean_squared_error

import time
import tqdm
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader,Dataset
import utils

NUM_ITEMS = 30490
DAYS_PRED = 28


utils.seed_everything()    


class M5Dataset_train(Dataset):
    def __init__(self, X, cont_cols, cat_cols, id_cols, lag_cols, 
                             target='demand', e_days=28*4, d_days=28):
        
        self.e_days = e_days
        self.d_days = d_days
        cat_cols = id_cols + cat_cols 
        self.cat_cols = cat_cols
        
        self.X_cont = X[cont_cols].values
        self.X_cat = X[cat_cols].values
        self.X_lags = X[lag_cols].values
        self.ids = X[id_cols].values
Esempio n. 30
0
    def _run_training(self, fold, seed):

        seed_everything(seed)

        train = process_data(self.folds)
        test_ = process_data(self.test)

        kfold_col = f'kfold_{seed}'
        trn_idx = train[train[kfold_col] != fold].index
        val_idx = train[train[kfold_col] == fold].index

        train_df = train[train[kfold_col] != fold].reset_index(drop=True)
        valid_df = train[train[kfold_col] == fold].reset_index(drop=True)
        
        target_cols = self.targets_scored.drop('sig_id', axis=1).columns.values.tolist()
        aux_target_cols = [x for x in self.targets_nonscored.columns if x != 'sig_id']
        all_target_cols = target_cols + aux_target_cols
        
        num_targets = len(target_cols)
        num_aux_targets = len(aux_target_cols)
        num_all_targets = len(all_target_cols)

        feature_cols = [c for c in process_data(self.folds).columns if c not in all_target_cols]
        feature_cols = [c for c in feature_cols if (str(c)[0:5] != 'kfold' and c not in ['sig_id', 'drug_id'])]
        num_features = len(feature_cols)
        
        def train_model(model, tag_name, target_cols_now, fine_tune_scheduler=None):
            x_train, y_train  = train_df[feature_cols].values, train_df[target_cols_now].values
            x_valid, y_valid =  valid_df[feature_cols].values, valid_df[target_cols_now].values
        
            train_dataset = MoADataset(x_train, y_train)
            valid_dataset = MoADataset(x_valid, y_valid)
            
            trainloader = torch.utils.data.DataLoader(train_dataset, batch_size=self.cfg.batch_size, shuffle=True)
            validloader = torch.utils.data.DataLoader(valid_dataset, batch_size=self.cfg.batch_size, shuffle=False)
        
            optimizer = torch.optim.Adam(model.parameters(), lr=1e-3, weight_decay=self.cfg.weight_decay[tag_name])
            scheduler = optim.lr_scheduler.OneCycleLR(optimizer=optimizer,
                                                      steps_per_epoch=len(trainloader),
                                                      pct_start=self.cfg.pct_start,
                                                      div_factor=self.cfg.div_factor[tag_name], 
                                                      max_lr=self.cfg.max_lr[tag_name],
                                                      epochs=self.cfg.epochs)
        
            loss_fn = nn.BCEWithLogitsLoss()
            loss_tr = SmoothBCEwLogits(smoothing=self.cfg.loss_smooth)

            oof = np.zeros((len(train), len(target_cols_now)))
            best_loss = np.inf
            
            for epoch in range(self.cfg.epochs):
                if fine_tune_scheduler is not None:
                    fine_tune_scheduler.step(epoch, model)

                train_loss = train_fn(model, optimizer, scheduler, loss_tr, trainloader, self.cfg.device)
                valid_loss, valid_preds = valid_fn(model, loss_fn, validloader, self.cfg.device)
                print(f"SEED: {seed}, FOLD: {fold}, {tag_name}, EPOCH: {epoch}, train_loss: {train_loss:.6f}, valid_loss: {valid_loss:.6f}")

                if np.isnan(valid_loss):
                    break
            
                if valid_loss < best_loss:
                    best_loss = valid_loss
                    oof[val_idx] = valid_preds
                    if not os.path.exists(os.path.join(self.save_path, f"seed{seed}")):
                        os.mkdir(os.path.join(self.save_path, f"seed{seed}"))
                    torch.save(model.state_dict(), 
                               os.path.join(self.save_path, f"seed{seed}", f"{tag_name}_FOLD{fold}_.pth"))

            return oof
            
        fine_tune_scheduler = FineTuneScheduler(self.cfg.epochs)

        pretrained_model = Model(num_features, num_all_targets)
        pretrained_model.to(self.cfg.device)
        
        # Train on scored + nonscored targets
        train_model(pretrained_model, 'ALL_TARGETS', all_target_cols)

        # Load the pretrained model with the best loss
        pretrained_model = Model(num_features, num_all_targets)
        pretrained_model.load_state_dict(torch.load(os.path.join(
            self.load_path, f"seed{seed}", f"ALL_TARGETS_FOLD{fold}_.pth"),
                                                    map_location=torch.device(self.cfg.device)))
        pretrained_model.to(self.cfg.device)
        
        # Copy model without the top layer
        final_model = fine_tune_scheduler.copy_without_top(pretrained_model, num_features, 
                                                           num_all_targets, num_targets)

        # Fine-tune the model on scored targets only
        oof = train_model(final_model, 'SCORED_ONLY', target_cols, fine_tune_scheduler)

        # Load the fine-tuned model with the best loss
        model = Model(num_features, num_targets)
        model.load_state_dict(torch.load(os.path.join(
            self.load_path, f"seed{seed}", f"SCORED_ONLY_FOLD{fold}_.pth"), 
                                         map_location=torch.device(self.cfg.device)))
        # model.load_state_dict(torch.load(f"SCORED_ONLY_FOLD{fold}_.pth"))
        model.to(self.cfg.device)

        #--------------------- PREDICTION---------------------
        x_test = test_[feature_cols].values
        testdataset = TestDataset(x_test)
        testloader = torch.utils.data.DataLoader(testdataset, batch_size=self.cfg.batch_size, shuffle=False)
    
        predictions = np.zeros((len(test_), num_targets))
        predictions = inference_fn(model, testloader, self.cfg.device)
        return oof, predictions