def train(args: argparse.Namespace):
    """Train a model

    Parameters
    ----------
    args : args.Namespace
        Collection of user defined cmdline variables.
    """

    # seeds used in finding best model
    seed = args.seed
    torch.manual_seed(seed)

    gpu = 1
    device = torch.device(gpu if torch.cuda.is_available() else "cpu")
    if torch.cuda.is_available():
        torch.cuda.set_device(gpu)

    classes = ["dolphin", "not dolphin"]
    num_classes = len(classes)

    cnn_arch = args.arch
    modeltype = args.type
    features = args.features

    model = select_model(modeltype, cnn_arch, features, device)

    lr = args.lr
    optimizer = getattr(optim, args.optim)(model.parameters(), lr=lr)
    weight = args.weight
    batch_size = args.batchsize

    train_loader, test_loader = get_dataset(batch_size)

    weights = torch.FloatTensor([weight, 1.]).to(device)
    criterion = nn.CrossEntropyLoss()

    experiment = f"{cnn_arch}, batchsize={batch_size}, weights[{weight:.3f},{1.}], lr={lr:.3E}, {optimizer}, features={features}, seed={seed}"

    writer = SummaryWriter(
        f"runs/{experiment}")  #average-triton-results/{experiment}
    num_epochs = 30

    if modeltype == "Triton":
        acc = train_Triton(None, model, criterion, optimizer, train_loader,
                           test_loader, device, num_epochs, writer, 0)
    elif modeltype == "CNN":
        acc = train_CNN(None, model, criterion, optimizer, train_loader,
                        test_loader, device, num_epochs, writer, 0)

    torch.save(model, f"{args.modelpath}")
def objective(trial):
    """Function that optuna will optimise. Basically a wrapper/loader for the
       whole train/evaluate loop.
    """

    torch.manual_seed(1)
    gpu = 1
    device = torch.device(gpu if torch.cuda.is_available() else "cpu")
    if torch.cuda.is_available():
        torch.cuda.set_device(gpu)

    classes = ["dolphin", "not dolphin"]
    num_classes = len(classes)

    cnn_arch = trial.suggest_categorical("cnn_arch",
                                         ["resnet", "vgg", "densenet"])

    modeltype = "Triton"
    features = trial.suggest_int("features", 4, 16)

    model = select_model(modeltype, cnn_arch, features, device)

    optimizer_name = trial.suggest_categorical(
        "optimizer", ["Adam", "SGD", "AdamW", "Adadelta"])
    lr = trial.suggest_loguniform("lr", 1e-5, 1e-1)
    optimizer = getattr(optim, optimizer_name)(model.parameters(), lr=lr)

    batch_size = trial.suggest_categorical("batch_size", [16, 32, 64])
    train_loader, test_loader = get_dataset(batch_size)

    weight = trial.suggest_uniform("weight", 1., 5.)
    weights = torch.FloatTensor([weight, 1.]).to(device)
    criterion = nn.CrossEntropyLoss(weight=weights)

    experiment = f"batchsize={batch_size}, weight={weight:.3f}, lr={lr:.3E}, {optimizer_name}, features={features}, arch={cnn_arch}"

    writer = SummaryWriter(f"dolphin/triton-resnet-knn/{experiment}")
    num_epochs = 30

    acc = train_Triton(trial, model, criterion, optimizer, train_loader,
                       test_loader, device, num_epochs, writer, 0)

    return acc
Beispiel #3
0
    _, test_loader = data_load.data_loaders(args.data,
                                            args.test_batch_size,
                                            augmentation=args.augmentation,
                                            normalization=args.normalization,
                                            drop_last=args.drop_last,
                                            shuffle=args.shuffle)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    for X, y in train_loader:
        break

    best_err = 1
    err = 1
    sampler_indices = []
    model = [utils.select_model(args.data, args.model)]
    print(model[-1])
    if args.opt == 'adam':
        opt = optim.Adam(model[-1].parameters(), lr=args.lr)
    elif args.opt == 'sgd':
        opt = optim.SGD(model[-1].parameters(),
                        lr=args.lr,
                        momentum=args.momentum,
                        weight_decay=args.weight_decay)
    print(opt)
    if args.lr_scheduler == 'step':
        lr_scheduler = optim.lr_scheduler.StepLR(opt,
                                                 step_size=args.step_size,
                                                 gamma=args.gamma)
    elif args.lr_scheduler == 'multistep':
        lr_scheduler = MultiStepLR(opt,
def infer(modelpath: str, cnn_arch: str, modeltype: str, weight: float,
          batch_size: int, test: bool, features: int):
    """Summary

    Parameters
    ----------
    modelpath : str
        Path to the saved model
    cnn_arch : str
        CNN acrhitecture to use in model
    modeltype : str
        Which model to infer on, CNN or Triton.
    weight : float
        Description
    batch_size : int
        Batch size.
    test : bool
        If true then infer on test dataset. If False infer on validation set
    features : int
        Number of features to use in the Triton model.
    """

    # setup seed and device to use for infer
    torch.manual_seed(1)
    gpu = 1  # use only the 2nd GPU
    device = torch.device(gpu if torch.cuda.is_available() else "cpu")
    if torch.cuda.is_available():
        torch.cuda.set_device(gpu)

    # get data and setup criterion
    train_loader, test_loader = get_dataset(batch_size, test)
    weights = torch.FloatTensor([weight, 1.]).to(device)
    criterion = nn.CrossEntropyLoss(weight=weights)

    model = select_model(modeltype, cnn_arch, features, device)

    # load pretrained model for infer
    model = torch.load(modelpath)
    model.eval()
    val_losses = 0

    # infer
    if modeltype == "CNN":
        _, bacc, cm = evaluate_CNN(model,
                                   test_loader,
                                   criterion,
                                   device,
                                   0,
                                   writer=None,
                                   infer=True)
        cmname = "CNN-cm"
    elif modeltype == "Triton":
        _, bacc, cm = evaluate_Triton(model,
                                      test_loader,
                                      criterion,
                                      device,
                                      0,
                                      writer=None,
                                      infer=True)
        cmname = "Triton-cm"
    else:
        print("Model Type not implemented")
        raise NotImplementedError

    print(bacc)
    plot_confusion_matrix(cm, ["Dolphin", "Not Dolphin"],
                          "triton-cm",
                          title='cm')
Beispiel #5
0
        ret, frame = video_source.read()
        frame_show = frame.copy()

        frame_show = inference_image(model, logger, img=frame_show, compare=False, record=False, dpi=dpi)
        
        if record:
            video_out.write(frame_show)
            if i % 20 == 19:
                logger.info("Frame {0}/{1} inferenced.".format(i+1, frame_num+1))
        
        if cv2.waitKey(1) == 27:
            break

    video_source.release()
    video_out.release()
    if record:
        logger.info("(4) Inference Finished. Output video: {0}".format(inf_out_vid_src))


if __name__ == "__main__":
    # init model
    init_msg = "(1) Initiating Inference ... "
    logger, model = select_model(model_name=net, init_msg=init_msg)

    # load model weights
    load_weights(model, best_ckpt_src, logger)
    model = model.to(device)

    # inference 
    # inference_image(model, logger, compare=True)
    inference_video(model, logger)
Beispiel #6
0
def main():
    # Argument Settings
    parser = argparse.ArgumentParser(
        description='Image Tagging Classification from Naver Shopping Reviews')
    parser.add_argument('--sess_name',
                        default='example',
                        type=str,
                        help='Session name that is loaded')
    parser.add_argument('--checkpoint',
                        default='best',
                        type=str,
                        help='Checkpoint')
    parser.add_argument('--batch_size',
                        default=256,
                        type=int,
                        help='batch size')
    parser.add_argument('--num_workers',
                        default=16,
                        type=int,
                        help='The number of workers')
    parser.add_argument('--num_epoch',
                        default=100,
                        type=int,
                        help='The number of epochs')
    parser.add_argument('--model_name',
                        default='mobilenet_v2',
                        type=str,
                        help='[resnet50, rexnet, dnet1244, dnet1222]')
    parser.add_argument('--weight_file', default='model.pth', type=str)
    parser.add_argument('--optimizer', default='SGD', type=str)
    parser.add_argument('--lr', default=1e-2, type=float)
    parser.add_argument('--weight_decay', default=1e-5, type=float)
    parser.add_argument('--learning_anneal', default=1.1, type=float)
    parser.add_argument('--annealing_period', default=10, type=int)
    parser.add_argument('--num_gpu', default=1, type=int)
    parser.add_argument('--pretrain', action='store_true', default=False)
    parser.add_argument('--mode', default='train', help='Mode')
    parser.add_argument('--pause', default=0, type=int)
    parser.add_argument('--iteration', default=0, type=str)
    args = parser.parse_args()

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # Model
    logger.info('Build Model')
    model = select_model(args.model_name, pretrain=args.pretrain, n_class=41)
    total_param = sum([p.numel() for p in model.parameters()])
    logger.info(f'Model size: {total_param} tensors')
    load_weight(model, args.weight_file)
    model = model.to(device)

    nu.bind_model(model)
    nsml.save('best')

    if args.pause:
        nsml.paused(scope=locals())

    if args.num_epoch == 0:
        return

    # Set the dataset
    logger.info('Set the dataset')
    df = pd.read_csv(f'{DATASET_PATH}/train/train_label')
    train_size = int(len(df) * 0.8)

    trainset = TagImageDataset(data_frame=df[:train_size],
                               root_dir=f'{DATASET_PATH}/train/train_data',
                               transform=train_transform)
    testset = TagImageDataset(data_frame=df[train_size:],
                              root_dir=f'{DATASET_PATH}/train/train_data',
                              transform=test_transform)

    train_loader = DataLoader(dataset=trainset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.num_workers)
    test_loader = DataLoader(dataset=testset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.num_workers)

    criterion = nn.CrossEntropyLoss(reduction='mean')
    optimizer = select_optimizer(model.parameters(), args.optimizer, args.lr,
                                 args.weight_decay)

    criterion = criterion.to(device)

    if args.mode == 'train':
        logger.info('Start to train!')
        train_process(args=args,
                      model=model,
                      train_loader=train_loader,
                      test_loader=test_loader,
                      optimizer=optimizer,
                      criterion=criterion,
                      device=device)

    elif args.mode == 'test':
        nsml.load(args.checkpoint, session=args.sess_name)
        logger.info('[NSML] Model loaded from {}'.format(args.checkpoint))

        model.eval()
        logger.info('Start to test!')
        test_loss, test_acc, test_f1 = evaluate(model=model,
                                                test_loader=test_loader,
                                                device=device,
                                                criterion=criterion)
        logger.info(test_loss, test_acc, test_f1)
Beispiel #7
0
    data = preprocess(train, test, target, model_name)

    print('columns_length: ', len(data.columns))
    train = data[: len(train)]
    test = data[len(train):]

    """モデルとパラメータの設定
    """
    # TODO: txtファイルで重み保存フォルダに一緒に保存

    """学習と推論
    """
    train_x = train.drop(target, axis=1)
    y = train[target]
    test = test.drop(target, axis=1)
    model = select_model(model_name, train_x, y, test, output_path, fold_type, n_splits)
    pred = model.trainer()

    # いろいろ推論
    # model_name = 'cat'
    # fold_type = 'skfold'
    # n_splits = 4
    # target = 'Global_Sales'

    # test_sales = pd.DataFrame()
    # special_cols = ['NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
    # for target in special_cols:
    #     if target == 'Global_Sales':
    #         continue
    #     train_x = train.drop(special_cols, axis=1)
    #     y = train[target]