예제 #1
0
def predict(config_file):
    """
    Main function that runs predictions
    Args:
        config_file [str]: path to config file
    Returns:
        None
    """
    ##################
    # configure logger
    ##################
    logger = set_logger("../log/predict.log")

    ##################
    # Load config from config file
    ##################
    logger.info(f"Load config from {config_file}")
    config = parse_config(config_file)
    image_width = config['common']['in_image_width']
    image_height = config['common']['in_image_height']
    predict_img = config['predict']['folder_path']
    weights_path = config['common']['weights_path']

    X,img_names = preprocess(predict_img, image_width, image_height)

    model = KeyPointModel().getModel()
    logger.info(f"Loading weights from {weights_path}")
    model.load_weights(weights_path)
    # logger.info("-----------Model Summary------------")
    # logger.info(model.summary())

    predicted_keypoints = model.predict(X)
    logger.info("Prediction Completed. Writing output to predicted.csv")
    write_output(predicted_keypoints, img_names)
예제 #2
0
transforms_test = transforms.Compose([
    transforms.ToPILImage(),
    transforms.Resize((360, 480)),
    transforms.ToTensor(),
    transforms.Normalize(mean=[0.4372, 0.4372, 0.4373],
                         std=[0.2479, 0.2475, 0.2485])
])

datasets_test = KeyPointDatasets(root_dir="./data", transforms=transforms_test)

dataloader_test = DataLoader(datasets_test,
                             batch_size=4,
                             shuffle=True,
                             collate_fn=datasets_test.collect_fn)

model = KeyPointModel()

model.load_state_dict(torch.load("weights/epoch_290_0.232.pt"))

img_list = glob.glob(os.path.join("./data/images", "*.jpg"))

save_path = "./output"

img_tensor_list = []
img_name_list = []

for i in range(len(img_list)):
    img_path = img_list[i]
    img_name = os.path.basename(img_path)
    img_name_list.append(img_name)
예제 #3
0
def train(config_file):
    """
    Main function that train and persists model based on training set/

    Args:
        config_file [str]: path to config file

    Returns:
        None
    """
    ################
    # config logger
    ################
    logger = set_logger("../log/train.log")

    ###############################
    # Load config from config file
    ###############################
    logger.info(f"Load config from {config_file}")
    config = parse_config(config_file)

    keypoints_csv = Path(config['common']['labels_csv_path'])
    val_split = config['common']['val_split']
    train_img_scr_path = config['common']['img_source_path']
    test_img_scr_path = config['common']['img_source_path']
    image_width = config['common']['in_image_width']
    image_height = config['common']['in_image_height']

    epochs = config['train']['epochs']
    train_batch_size = config['train']['batch_size']
    weight_path = config['common']['weight_path']
    no_of_aug = config['train']['no_of_aug']
    test_batch_size = config['test']['batch_size']

    ############
    # Load Data
    ############
    logger.info(f"----------------Load the data----------------")
    selected_img, keypoint_df = load_data(keypoints_csv)
    logger.info(f"Number of selected images are {selected_img.shape}")
    logger.info(f"Few of the selected images are {selected_img[0:5]}")

    ####################################
    # Get train and test data generators
    ####################################

    X_train, y_train, X_test, y_test = train_test_split(
        selected_img, keypoint_df, val_split)

    train_gen = Car(x_set=X_train,
                    y_set=y_train,
                    mode='Train',
                    data_path=train_img_scr_path,
                    image_width=image_width,
                    image_height=image_height,
                    batch_size=train_batch_size,
                    augmentations='Self',
                    no_of_aug=no_of_aug)
    test_gen = Car(
        x_set=X_test,
        y_set=y_test,
        mode='Test',
        data_path=test_img_scr_path,
        image_width=image_width,
        image_height=image_height,
        batch_size=test_batch_size,
    )

    #####################
    # Set and train model
    #####################

    logger.info(
        f"-------------------------Initiate Model---------------------")
    model = KeyPointModel().getModel()

    logger.info(
        f"--------------------Model Summary---------------------------")
    logger.info(f"{model.summary}")

    # compile the model
    model.compile(loss='mean_squared_error',
                  optimizer='adam',
                  metrics=['mean_absolute_error'])

    # modelCheckPoint = ModelCheckpoint('car-{val_loss:.2f}.h5', monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True)
    earlyS = EarlyStopping(monitor='val_loss',
                           min_delta=1,
                           patience=3,
                           restore_best_weights=True)
    reducelr = ReduceLROnPlateau(monitor='val_loss',
                                 factor=0.1,
                                 patience=2,
                                 min_lr=1e-7)

    history = model.fit(x=train_gen,
                        validation_data=test_gen,
                        callbacks=[earlyS, reducelr],
                        epochs=epochs)
    logger.info(history)
    logger.info("------------Saving Weights--------------")
    model.save_weights(weight_path)
예제 #4
0
    transforms_all = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((480, 360)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.4372, 0.4372, 0.4373],
                             std=[0.2479, 0.2475, 0.2485])
    ])

    datasets = KeyPointDatasets(root_dir="./data", transforms=transforms_all)

    data_loader = DataLoader(datasets,
                             shuffle=True,
                             batch_size=bs,
                             collate_fn=datasets.collect_fn)

    model = KeyPointModel()

    optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
    # criterion = torch.nn.SmoothL1Loss()
    criterion = torch.nn.MSELoss()
    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=30,
                                                gamma=0.1)

    for epoch in range(total_epoch):
        train(model, epoch, data_loader, optimizer, criterion)
        loss = test(model, epoch, data_loader, criterion)

        if epoch % 10 == 0:
            torch.save(model.state_dict(),
                       "weights/epoch_%d_%.3f.pt" % (epoch, loss * 1000))
예제 #5
0
    transforms_all = transforms.Compose([
        transforms.ToPILImage(),
        transforms.Resize((360, 480)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.4372, 0.4372, 0.4373],
                             std=[0.2479, 0.2475, 0.2485])
    ])

    dataset = KeyPointDatasets(root_dir="./data", transforms=transforms_all)

    dataloader = DataLoader(dataset,
                            shuffle=True,
                            batch_size=1,
                            collate_fn=dataset.collect_fn)

    model = KeyPointModel()
    model.load_state_dict(torch.load(args.model))

    for iter, (image, label) in enumerate(dataloader):
        # print(image.shape)
        bs = image.shape[0]
        hm = model(image)

        hm = _nms(hm)

        scores, inds, clses, ys, xs = _topk(hm, K=1)

        print(scores, '\n', inds, '\n', clses, '\n', ys, '\n', xs)

        hm = hm.detach().numpy()