Пример #1
0
def main():
    args = argparse.ArgumentParser()
    args.add_argument("--inference", "-i", action="store_true")
    args.add_argument("--continue_training", "-c", action="store_true")
    args.add_argument("--real_data", "-r", action="store_true")
    args.add_argument("--no_cuda", "-k", action="store_false")
    args.add_argument("--export", "-e", action="store_true")
    options = args.parse_args()

    timeas = str(time.time())
    this_model_path = PROJECT_APP_PATH.user_data / timeas
    this_log = PROJECT_APP_PATH.user_log / timeas
    ensure_directory_exist(this_model_path)
    ensure_directory_exist(this_log)

    best_model_name = "best_validation_model.model"
    interrupted_path = str(this_model_path / best_model_name)

    torch.manual_seed(seed)

    if not options.no_cuda:
        global_torch_device("cpu")

    env = MixedObservationWrapper()
    env.seed(seed)
    train_iter = batch_generator(iter(env), batch_size)
    num_categories = env.sensor("Class").space.discrete_steps
    val_iter = train_iter

    model, params_to_update = squeezenet_retrain(num_categories)
    print(params_to_update)

    model = model.to(global_torch_device())

    if options.continue_training:
        _list_of_files = list(PROJECT_APP_PATH.user_data.rglob("*.model"))
        latest_model_path = str(max(_list_of_files, key=os.path.getctime))
        print(f"loading previous model: {latest_model_path}")
        if latest_model_path is not None:
            model.load_state_dict(torch.load(latest_model_path))

    criterion = torch.nn.CrossEntropyLoss().to(global_torch_device())

    optimizer_ft = optim.SGD(model.parameters(),
                             lr=learning_rate,
                             momentum=momentum,
                             weight_decay=wd)
    exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer_ft,
                                                       step_size=7,
                                                       gamma=0.1)

    writer = TensorBoardPytorchWriter(this_log)

    if True:
        model = predictor_response_train_model_neodroid_observations(
            model,
            train_iterator=train_iter,
            criterion=criterion,
            optimizer=optimizer_ft,
            scheduler=exp_lr_scheduler,
            writer=writer,
            interrupted_path=interrupted_path,
            val_data_iterator=val_iter,
            num_updates=num_updates,
        )

    inputs, true_label = zip(*next(train_iter))
    rgb_imgs = torch_vision_normalize_batch_nchw(
        uint_hwc_to_chw_float_tensor(
            rgb_drop_alpha_batch_nhwc(to_tensor(inputs))))

    pred = model(rgb_imgs)
    predicted = torch.argmax(pred, -1)
    true_label = to_tensor(true_label, dtype=torch.long)
    print(predicted, true_label)
    horizontal_imshow(
        inputs,
        [f"p:{int(p)},t:{int(t)}" for p, t in zip(predicted, true_label)])
    pyplot.show()

    writer.close()
    env.close()

    torch.cuda.empty_cache()

    model.eval()
    example = torch.rand(1, 3, 256, 256)
    traced_script_module = torch.jit.trace(model.to("cpu"), example)
    traced_script_module.save("resnet18_v.model")
Пример #2
0
def main():
    args = argparse.ArgumentParser()
    args.add_argument("--inference", "-i", action="store_true")
    args.add_argument("--continue_training", "-c", action="store_true")
    args.add_argument("--real_data", "-r", action="store_true")
    args.add_argument("--no_cuda", "-k", action="store_false")
    args.add_argument("--export", "-e", action="store_true")
    options = args.parse_args()

    train_model = True
    timeas = str(time.time())
    this_model_path = PROJECT_APP_PATH.user_data / timeas
    this_log = PROJECT_APP_PATH.user_log / timeas
    ensure_directory_exist(this_model_path)
    ensure_directory_exist(this_log)

    best_model_name = "best_validation_model.model"
    interrupted_path = str(this_model_path / best_model_name)

    torch.manual_seed(seed)

    if not options.no_cuda:
        global_torch_device("cpu")

    dataset = MNISTDataset2(PROJECT_APP_PATH.user_cache / "mnist", split=Split.Training)
    train_iter = iter(
        recycle(
            DataLoader(dataset, batch_size=batch_size, shuffle=True, pin_memory=True)
        )
    )

    val_iter = iter(
        recycle(
            DataLoader(
                MNISTDataset2(
                    PROJECT_APP_PATH.user_cache / "mnist", split=Split.Validation
                ),
                batch_size=batch_size,
                shuffle=True,
                pin_memory=True,
            )
        )
    )

    model, params_to_update = squeezenet_retrain(len(dataset.categories))
    print(params_to_update)
    model = model.to(global_torch_device())

    if options.continue_training:
        _list_of_files = list(PROJECT_APP_PATH.user_data.rglob("*.model"))
        latest_model_path = str(max(_list_of_files, key=os.path.getctime))
        print(f"loading previous model: {latest_model_path}")
        if latest_model_path is not None:
            model.load_state_dict(torch.load(latest_model_path))

    criterion = torch.nn.CrossEntropyLoss().to(global_torch_device())

    optimizer_ft = optim.SGD(
        model.parameters(), lr=learning_rate, momentum=momentum, weight_decay=wd
    )
    exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(
        optimizer_ft, step_size=7, gamma=0.1
    )

    writer = TensorBoardPytorchWriter(this_log)

    if train_model:
        model = predictor_response_train_model(
            model,
            train_iterator=train_iter,
            criterion=criterion,
            optimizer=optimizer_ft,
            scheduler=exp_lr_scheduler,
            writer=writer,
            interrupted_path=interrupted_path,
            val_data_iterator=val_iter,
            num_updates=NUM_UPDATES,
        )

    inputs, true_label = next(train_iter)
    inputs = to_tensor(inputs, dtype=torch.float, device=global_torch_device()).repeat(
        1, 3, 1, 1
    )
    true_label = to_tensor(true_label, dtype=torch.long, device=global_torch_device())

    pred = model(inputs)
    predicted = torch.argmax(pred, -1)
    true_label = to_tensor(true_label, dtype=torch.long)
    print(predicted, true_label)
    horizontal_imshow(
        inputs, [f"p:{int(p)},t:{int(t)}" for p, t in zip(predicted, true_label)]
    )
    pyplot.show()

    writer.close()
    torch_clean_up()
Пример #3
0
def main():
    args = argparse.ArgumentParser()
    args.add_argument("-i", action="store_false")
    options = args.parse_args()

    seed = 42
    batch_size = 8  # 12
    depth = 4  # 5
    segmentation_channels = 3
    tqdm.monitor_interval = 0
    learning_rate = 3e-3
    lr_sch_step_size = int(1000 // batch_size) + 4
    lr_sch_gamma = 0.1
    model_start_channels = 16

    home_path = Path.home() / "Models" / "Vision"
    base_path = home_path / str(time.time())
    best_model_path = "INTERRUPTED_BEST.pth"
    interrupted_path = str(base_path / best_model_path)

    writer = TensorBoardPytorchWriter(str(base_path))
    env = CameraObservationWrapper()

    torch.manual_seed(seed)
    env.seed(seed)

    device = global_torch_device()

    aeu_model = SkipHourglassFission(
        segmentation_channels,
        (segmentation_channels,),
        encoding_depth=depth,
        start_channels=model_start_channels,
    )
    aeu_model = aeu_model.to(global_torch_device())

    optimizer_ft = optim.Adam(aeu_model.parameters(), lr=learning_rate)

    exp_lr_scheduler = lr_scheduler.StepLR(
        optimizer_ft, step_size=lr_sch_step_size, gamma=lr_sch_gamma
    )

    data_iter = iter(neodroid_camera_data_iterator(env, device, batch_size))

    if options.i:
        trained_aeu_model = train_model(
            aeu_model,
            data_iter,
            optimizer_ft,
            exp_lr_scheduler,
            writer,
            interrupted_path,
        )
        test_model(trained_aeu_model, data_iter)
    else:
        _list_of_files = list(home_path.glob("*"))
        latest_model_path = (
            str(max(_list_of_files, key=os.path.getctime)) + f"/{best_model_path}"
        )
        print("loading previous model: " + latest_model_path)
        test_model(aeu_model, data_iter, load_path=latest_model_path)

    torch.cuda.empty_cache()
    env.close()
    writer.close()