def visualize_shanghaitech_nonoverlap_downsample():
    HARD_CODE = HardCodeVariable()
    shanghaitech_data = ShanghaiTechDataPath(root=HARD_CODE.SHANGHAITECH_PATH)
    shanghaitech_data_part_a_train = shanghaitech_data.get_a().get_train().get(
    )
    saved_folder = "visualize/test_dataloader"
    os.makedirs(saved_folder, exist_ok=True)
    DATA_PATH = HARD_CODE.SHANGHAITECH_PATH_PART_B
    train_list, val_list = get_train_val_list(shanghaitech_data_part_a_train,
                                              test_size=0.2)
    test_list = None

    # create data loader
    train_loader, val_loader, test_loader = get_dataloader(
        train_list,
        val_list,
        test_list,
        dataset_name="shanghaitech_non_overlap_downsample",
        visualize_mode=True)

    img, label = next(iter(train_loader))

    print(img.shape)
    save_img(img, os.path.join(saved_folder,
                               "overlap_downsample_loader_1.png"))
    save_density_map(
        label[0].numpy()[0],
        os.path.join(saved_folder,
                     "overlap_downsample_loader_with_p_density1.png"))

    print("count1 ", label.numpy()[0].sum())
    print("count2 ", label.numpy()[0].sum())
    print("count3 ", label.numpy()[0].sum())

    print("s1 ", label.shape)
def visualize_shanghaitech_keepfull():
    HARD_CODE = HardCodeVariable()
    shanghaitech_data = ShanghaiTechDataPath(root=HARD_CODE.SHANGHAITECH_PATH)
    shanghaitech_data_part_a_train = shanghaitech_data.get_a().get_train().get(
    )
    saved_folder = "visualize/test_dataloader_shanghaitech"
    os.makedirs(saved_folder, exist_ok=True)
    train_list, val_list = get_train_val_list(shanghaitech_data_part_a_train,
                                              test_size=0.2)
    test_list = None
    train_loader, val_loader, test_loader = get_dataloader(
        train_list,
        val_list,
        test_list,
        dataset_name="shanghaitech_keepfull",
        visualize_mode=True)

    # do with train loader
    train_loader_iter = iter(train_loader)
    for i in range(10):
        img, label = next(train_loader_iter)
        save_img(img, os.path.join(saved_folder,
                                   "train_img" + str(i) + ".png"))
        save_density_map(
            label.numpy()[0][0],
            os.path.join(saved_folder, "train_label" + str(i) + ".png"))
def visualize_shanghaitech_pacnn_with_perspective():
    HARD_CODE = HardCodeVariable()
    shanghaitech_data = ShanghaiTechDataPath(root=HARD_CODE.SHANGHAITECH_PATH)
    shanghaitech_data_part_a_train = shanghaitech_data.get_a().get_train().get(
    )
    saved_folder = "visualize/test_dataloader"
    os.makedirs(saved_folder, exist_ok=True)
    DATA_PATH = HARD_CODE.SHANGHAITECH_PATH_PART_A
    train_list, val_list = get_train_val_list(shanghaitech_data_part_a_train,
                                              test_size=0.2)
    test_list = None

    # create data loader
    train_loader, val_loader, test_loader = get_dataloader(
        train_list, val_list, test_list, dataset_name="ucf_cc_50")
    train_loader_pacnn = torch.utils.data.DataLoader(ListDataset(
        train_list,
        shuffle=True,
        transform=transforms.Compose([transforms.ToTensor()]),
        train=True,
        batch_size=1,
        num_workers=4,
        dataset_name="shanghaitech_pacnn_with_perspective",
        debug=True),
                                                     batch_size=1,
                                                     num_workers=4)

    img, label = next(iter(train_loader_pacnn))

    print(img.shape)
    save_img(img, os.path.join(saved_folder, "pacnn_loader_img.png"))
    save_density_map(
        label[0].numpy()[0],
        os.path.join(saved_folder, "pacnn_loader_with_p_density1.png"))
    save_density_map(
        label[1].numpy()[0],
        os.path.join(saved_folder, "pacnn_loader_with_p_density2.png"))
    save_density_map(
        label[2].numpy()[0],
        os.path.join(saved_folder, "pacnn_loader_with_p_density3.png"))
    save_density_map(label[3].numpy()[0],
                     os.path.join(saved_folder, "pacnn_loader_p_s_4.png"))
    save_density_map(label[4].numpy()[0],
                     os.path.join(saved_folder, "pacnn_loader_p_5.png"))
    print("count1 ", label[0].numpy()[0].sum())
    print("count2 ", label[1].numpy()[0].sum())
    print("count3 ", label[2].numpy()[0].sum())
    print("count4 ", label[3].numpy()[0].sum())
    print("count5 ", label[4].numpy()[0].sum())

    print("s1 ", label[0].shape)
    print("s2 ", label[1].shape)
    print("s3 ", label[2].shape)
    print("s4 ", label[3].shape)
    print("s5 ", label[4].shape)
示例#4
0
def visualize_evaluation_shanghaitech_keepfull(
        path=None,
        dataset="shanghaitech_keepfull_r50",
        output="visualize/verify_dataloader_shanghaitech",
        meta_data="data_info.txt"):
    HARD_CODE = HardCodeVariable()
    if path == None:
        shanghaitech_data = ShanghaiTechDataPath(
            root=HARD_CODE.SHANGHAITECH_PATH)
        shanghaitech_data_part_a_train = shanghaitech_data.get_a().get_train(
        ).get()
        path = shanghaitech_data_part_a_train
    saved_folder = output
    os.makedirs(saved_folder, exist_ok=True)
    train_list, val_list = get_train_val_list(path, test_size=0.2)
    test_list = None
    train_loader, val_loader, test_loader = get_dataloader(
        train_list,
        val_list,
        test_list,
        dataset_name=dataset,
        visualize_mode=True,
        debug=True)

    # do with train loader
    train_loader_iter = iter(train_loader)
    f = open(meta_data, "w")
    total = len(train_loader)
    for i in range(len(train_loader)):
        img, label, debug_data = next(train_loader_iter)
        p_count = debug_data["p_count"]
        name = debug_data["name"][0]
        item_number = img_name_to_int(name)
        density_map_count = label.sum()
        log_str = str(item_number) + " " + str(
            density_map_count.item()) + " " + str(p_count.item())
        print(log_str)
        f.write(log_str + "\n")
        save_img(
            img,
            os.path.join(saved_folder,
                         "train_img_" + str(item_number) + ".png"))
        save_path = os.path.join(saved_folder,
                                 "train_label_" + str(item_number) + ".png")
        save_density_map(label.numpy()[0][0], save_path)
        print(str(i) + "/" + str(total))
    f.close()
示例#5
0
def visualize_evaluation_shanghaitech_keepfull(model):
    model = model.cuda()
    model.eval()
    HARD_CODE = HardCodeVariable()
    shanghaitech_data = ShanghaiTechDataPath(root=HARD_CODE.SHANGHAITECH_PATH)
    shanghaitech_data_part_a_train = shanghaitech_data.get_a().get_train().get(
    )
    saved_folder = "visualize/evaluation_dataloader_shanghaitech"
    os.makedirs(saved_folder, exist_ok=True)
    train_list, val_list = get_train_val_list(shanghaitech_data_part_a_train,
                                              test_size=0.2)
    test_list = None
    train_loader, val_loader, test_loader = get_dataloader(
        train_list,
        val_list,
        test_list,
        dataset_name="shanghaitech_keepfull",
        visualize_mode=False,
        debug=True)

    # do with train loader
    train_loader_iter = iter(train_loader)
    for i in range(10):
        img, label, count = next(train_loader_iter)
        # save_img(img, os.path.join(saved_folder, "train_img_" + str(i) +".png"))
        save_path = os.path.join(saved_folder,
                                 "train_label_" + str(i) + ".png")
        save_pred_path = os.path.join(saved_folder,
                                      "train_pred_" + str(i) + ".png")
        save_density_map(label.numpy()[0][0], save_path)
        pred = model(img.cuda())
        predicted_density_map = pred.detach().cpu().clone().numpy()
        predicted_density_map_enlarge = cv2.resize(
            np.squeeze(predicted_density_map[0][0]),
            (int(predicted_density_map.shape[3] * 8),
             int(predicted_density_map.shape[2] * 8)),
            interpolation=cv2.INTER_CUBIC) / 64
        save_density_map(predicted_density_map_enlarge, save_pred_path)
        print("pred " + save_pred_path + " value " +
              str(predicted_density_map.sum()))
        print("cont compare " + str(predicted_density_map.sum()) + " " +
              str(predicted_density_map_enlarge.sum()))
        print("shape compare " + str(predicted_density_map.shape) + " " +
              str(predicted_density_map_enlarge.shape))
def visualize_ucf_cc_50_pacnn():
    HARD_CODE = HardCodeVariable()
    saved_folder = "visualize/test_dataloader"
    os.makedirs(saved_folder, exist_ok=True)
    DATA_PATH = HARD_CODE.UCF_CC_50_PATH
    train_list, val_list = get_train_val_list(DATA_PATH, test_size=0.2)
    test_list = None

    # create data loader
    train_loader, val_loader, test_loader = get_dataloader(
        train_list, val_list, test_list, dataset_name="ucf_cc_50")
    train_loader_pacnn = torch.utils.data.DataLoader(ListDataset(
        train_list,
        shuffle=True,
        transform=transforms.Compose([transforms.ToTensor()]),
        train=True,
        batch_size=1,
        num_workers=4,
        dataset_name="shanghaitech_pacnn",
        debug=True),
                                                     batch_size=1,
                                                     num_workers=4)

    img, label = next(iter(train_loader_pacnn))

    print(img.shape)
    save_img(img, os.path.join(saved_folder, "pacnn_loader_img.png"))
    save_density_map(label[0].numpy()[0],
                     os.path.join(saved_folder, "pacnn_loader_density1.png"))
    save_density_map(label[1].numpy()[0],
                     os.path.join(saved_folder, "pacnn_loader_density2.png"))
    save_density_map(label[2].numpy()[0],
                     os.path.join(saved_folder, "pacnn_loader_density3.png"))
    print("count1 ", label[0].numpy()[0].sum())
    print("count2 ", label[1].numpy()[0].sum())
    print("count3 ", label[2].numpy()[0].sum())
        print("will use shanghaitech dataset with crop ")
    elif dataset_name == "shanghaitech_keepfull":
        print("will use shanghaitech_keepfull")
    else:
        print("cannot detect dataset_name")
        print("current dataset_name is ", dataset_name)

    # create list
    train_list = create_image_list(TRAIN_PATH)
    test_list = create_image_list(TEST_PATH)

    # create data loader
    train_loader, train_loader_for_eval, test_loader = get_dataloader(
        train_list,
        train_list,
        test_list,
        dataset_name=dataset_name,
        batch_size=args.batch_size,
        visualize_mode=args.no_norm)

    print("len train_loader ", len(train_loader))

    # model
    model_name = args.model
    experiment.log_other("model", model_name)
    if model_name == "CompactCNNV2":
        model = CompactCNNV2()
    elif model_name == "CompactCNNV7":
        model = CompactCNNV7()
    else:
        print("error: you didn't pick a model")
from models import CSRNet
import os

if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)
    args = real_args_parse()
    print(args)
    DATA_PATH = args.input

    # create list
    train_list, val_list = get_train_val_list(DATA_PATH, test_size=0.2)
    test_list = None

    # create data loader
    train_loader, val_loader, test_loader = get_dataloader(
        train_list, val_list, test_list, dataset_name="ucf_cc_50")

    # model
    model = CSRNet()
    model = model.to(device)

    # loss function
    loss_fn = nn.MSELoss(size_average=False).cuda()

    optimizer = torch.optim.SGD(model.parameters(),
                                args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    trainer = create_supervised_trainer(model,
                                        optimizer,
    DATA_PATH = "/data/ShanghaiTech_fixed_sigma/part_B/"
    TRAIN_PATH = os.path.join(DATA_PATH, "train_data_train_split")
    VAL_PATH = os.path.join(DATA_PATH, "train_data_validate_split")
    TEST_PATH = os.path.join(DATA_PATH, "test_data")

    # create list
    train_list = create_image_list(TRAIN_PATH)
    val_list = create_image_list(VAL_PATH)
    test_list = create_image_list(TEST_PATH)

    # train_loader, train_loader_eval, val_loader, test_loader = get_dataloader(train_list, val_list, test_list,
    #                                                                           dataset_name="shanghaitech_more_random"
    #                                                                           , batch_size=1,
    #                                                                           train_loader_for_eval_check=True)

    train_loader, train_loader_eval, val_loader, test_loader = get_dataloader(
        train_list,
        val_list,
        test_list,
        dataset_name="shanghaitech_non_overlap",
        batch_size=1,
        train_loader_for_eval_check=True,
        cache=True)
    print(len(train_loader))
    print(len(val_loader))

    for img, label in train_loader:
        print(img.shape, label)

    for img, label in train_loader:
        print(img.shape, label)
示例#10
0
    TEST_PATH = os.path.join(DATA_PATH, "test_data")
    dataset_name = args.datasetname
    if dataset_name=="shanghaitech":
        print("will use shanghaitech dataset with crop ")
    elif dataset_name == "shanghaitech_keepfull":
        print("will use shanghaitech_keepfull")
    else:
        print("cannot detect dataset_name")
        print("current dataset_name is ", dataset_name)

    # create list
    train_list = create_image_list(TRAIN_PATH)
    test_list = create_image_list(TEST_PATH)

    # create data loader
    train_loader, train_loader_for_eval, test_loader = get_dataloader(train_list, train_list, test_list, dataset_name=dataset_name, batch_size=args.batch_size)

    print("len train_loader ", len(train_loader))

    # model
    model_name = args.model
    experiment.log_other("model", model_name)
    if model_name == "M1":
        model = M1()
    elif model_name == "M2":
        model = M2()
    elif model_name == "M3":
        model = M3()
    elif model_name == "M4":
        model = M4()
    elif model_name == "CustomCNNv2":
if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)
    args = real_args_parse()
    print(args)
    DATA_PATH = args.input
    TRAIN_PATH = os.path.join(DATA_PATH, "train_data")
    TEST_PATH = os.path.join(DATA_PATH, "test_data")


    # create list
    train_list, val_list = get_train_val_list(TRAIN_PATH)
    test_list = create_training_image_list(TEST_PATH)

    # create data loader
    train_loader, val_loader, test_loader = get_dataloader(train_list, val_list, test_list)


    # model
    model = CSRNet()
    model = model.to(device)

    # loss function
    loss_fn = nn.MSELoss(size_average=False).cuda()

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.decay)

    trainer = create_supervised_trainer(model, optimizer, loss_fn, device=device)
    evaluator = create_supervised_evaluator(model,
    dataset_name = args.datasetname
    if dataset_name=="shanghaitech":
        print("will use shanghaitech dataset with crop ")
    elif dataset_name == "shanghaitech_keepfull":
        print("will use shanghaitech_keepfull")
    else:
        print("cannot detect dataset_name")
        print("current dataset_name is ", dataset_name)

    # create list
    train_list = create_image_list(TRAIN_PATH)
    val_list = create_image_list(VAL_PATH)
    test_list = create_image_list(TEST_PATH)
    train_loader, train_loader_eval, val_loader, test_loader = get_dataloader(train_list, val_list, test_list, dataset_name=dataset_name, batch_size=args.batch_size,
                                                                              train_loader_for_eval_check=True,
                                                                              cache=args.cache,
                                                                              pin_memory=args.pin_memory,
                                                                              test_size=1)

    print("len train_loader ", len(train_loader))

    # model
    model_name = args.model

    if model_name == "M1":
        model = M1()
    elif model_name == "M2":
        model = M2()
    elif model_name == "M3":
        model = M3()
    elif model_name == "M4":
if __name__ == "__main__":
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    print(device)
    args = context_aware_network_args_parse()
    print(args)
    DATA_PATH = args.input
    TRAIN_PATH = os.path.join(DATA_PATH, "train_data")
    TEST_PATH = os.path.join(DATA_PATH, "test_data")

    # create list
    train_list, val_list = get_train_val_list(TRAIN_PATH)
    test_list = None

    # create data loader
    train_loader, val_loader, test_loader = get_dataloader(
        train_list, val_list, test_list, dataset_name="shanghaitech")

    # model
    model = AttnCanAdcrowdNetFreezeVgg()
    model = model.to(device)

    # loss function
    loss_fn = nn.MSELoss(size_average=False).to(device)

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 weight_decay=args.decay)

    trainer = create_supervised_trainer(model,
                                        optimizer,
                                        loss_fn,
示例#14
0
from models.meow_experiment.kitten_meow_1 import H1_Bigtail3
from models import CustomCNNv2, CompactCNNV7
import os
from model_util import get_lr, BestMetrics
"""
shanghaitech_more_random
"""

if __name__ == "__main__":
    DATA_PATH = "/data/ShanghaiTech_fixed_sigma/part_B/"
    TRAIN_PATH = os.path.join(DATA_PATH, "train_data_train_split")
    VAL_PATH = os.path.join(DATA_PATH, "train_data_validate_split")
    TEST_PATH = os.path.join(DATA_PATH, "test_data")

    # create list
    train_list = create_image_list(TRAIN_PATH)
    val_list = create_image_list(VAL_PATH)
    test_list = create_image_list(TEST_PATH)

    train_loader, train_loader_eval, val_loader, test_loader = get_dataloader(
        train_list,
        val_list,
        test_list,
        dataset_name="shanghaitech_more_random",
        batch_size=20,
        train_loader_for_eval_check=True)
    print(len(train_loader))
    print(len(val_loader))

    for data, label in val_loader:
        print(label)
from models import CustomCNNv2, CompactCNNV7
import os
from model_util import get_lr, BestMetrics
"""
shanghaitech_more_random
"""

if __name__ == "__main__":
    DATA_PATH = "/data/my_crowd_image/dataset_batch1245/mybikedata"
    TRAIN_PATH = os.path.join(DATA_PATH, "train_data")
    VAL_PATH = os.path.join(DATA_PATH, "train_data")
    TEST_PATH = os.path.join(DATA_PATH, "test_data")

    # create list
    train_list = create_image_list(TRAIN_PATH)
    val_list = create_image_list(VAL_PATH)
    test_list = create_image_list(TEST_PATH)

    train_loader, train_loader_eval, val_loader, test_loader = get_dataloader(
        train_list,
        val_list,
        test_list,
        dataset_name="my_bike_non_overlap",
        batch_size=1,
        debug=True,
        train_loader_for_eval_check=True)
    print(len(train_loader))
    print(len(val_loader))

    for all in val_loader:
        print(all[0].shape)
    TEST_PATH = os.path.join(DATA_PATH, "test_data")
    dataset_name = args.datasetname
    if dataset_name=="shanghaitech":
        print("will use shanghaitech dataset with crop ")
    elif dataset_name == "shanghaitech_keepfull":
        print("will use shanghaitech_keepfull")
    else:
        print("cannot detect dataset_name")
        print("current dataset_name is ", dataset_name)

    # create list
    train_list = create_image_list(TRAIN_PATH)
    test_list = create_image_list(TEST_PATH)

    # create data loader
    train_loader, val_loader, test_loader = get_dataloader(train_list, None, test_list, dataset_name=dataset_name, batch_size=args.batch_size)

    print("len train_loader ", len(train_loader))

    # model
    model = CustomCNNv2()
    n_param = very_simple_param_count(model)
    experiment.log_other("n_param", n_param)
    if hasattr(model, 'model_note'):
        experiment.log_other("model_note", model.model_note)
    model = model.to(device)

    # loss function
    loss_fn = nn.MSELoss(reduction='sum').to(device)

    optimizer = torch.optim.Adam(model.parameters(), args.lr,
    print(args)
    DATA_PATH = args.input
    TRAIN_PATH = os.path.join(DATA_PATH, "train_data")
    TEST_PATH = os.path.join(DATA_PATH, "test_data")
    dataset_name = args.datasetname
    dataset_name = "shanghaitech_keepfull"

    count_below_256 = 0
    # create list
    train_list, val_list = get_train_val_list(TRAIN_PATH)
    test_list = None

    # create data loader
    train_loader, val_loader, test_loader = get_dataloader(
        train_list,
        val_list,
        test_list,
        dataset_name=dataset_name,
        batch_size=5)

    print(
        "============== TRAIN LOADER ===================================================="
    )
    min_1 = 500
    min_2 = 500
    for img, label in train_loader:
        print("img shape:" + str(img.shape) + " == " + "label shape " +
              str(label.shape))
        size_1 = img.shape[2]
        size_2 = img.shape[3]
        if min_1 > size_1:
            min_1 = size_1
示例#18
0
from data_flow import get_dataloader, create_image_list
from hard_code_variable import HardCodeVariable
import os

hard_code = HardCodeVariable()

TRAIN_PATH = os.path.join(hard_code.SHANGHAITECH_PATH_PART_B,
                          hard_code.SHANGHAITECH_PATH_TRAIN_POSTFIX)
TEST_PATH = os.path.join(hard_code.SHANGHAITECH_PATH_PART_B,
                         hard_code.SHANGHAITECH_PATH_TEST_POSTFIX)

train_list = create_image_list(TRAIN_PATH)
test_list = create_image_list(TEST_PATH)

train, valid, test = get_dataloader(train_list,
                                    None,
                                    test_list,
                                    dataset_name="shanghaitech",
                                    batch_size=5)

for img, label in train:
    print("img shape:" + str(img.shape) + " == " + "label shape " +
          str(label.shape))
示例#19
0
def visualize_evaluation_shanghaitech_keepfull(model, args):
    """

    :param model: model with param, if not model then do not output pred
    :param args:
    :return:
    """
    if model is not None:
        model = model.cuda()
        model.eval()
    saved_folder = args.output
    os.makedirs(saved_folder, exist_ok=True)
    train_list, val_list = get_train_val_list(args.input, test_size=0.2)
    test_list = create_image_list(args.input)
    train_loader, val_loader, test_loader = get_dataloader(
        train_list,
        val_list,
        test_list,
        dataset_name="shanghaitech_keepfull_r50",
        visualize_mode=False,
        debug=True)

    log_f = open(args.meta_data, "w")
    mae_s = 0
    mse_s = 0
    n = 0
    train_loader_iter = iter(train_loader)
    _, gt_density, _ = next(train_loader_iter)
    with torch.no_grad():
        for item in test_loader:
            img, gt_density, debug_info = item
            gt_count = debug_info["p_count"]
            file_name = debug_info["name"]
            print(file_name[0].split(".")[0])
            file_name_only = file_name[0].split(".")[0]
            save_path = os.path.join(saved_folder,
                                     "label_" + file_name_only + ".png")
            save_pred_path = os.path.join(saved_folder,
                                          "pred_" + file_name_only + ".png")
            save_density_map(gt_density.numpy()[0], save_path)
            if model is not None:
                pred = model(img.cuda())
                predicted_density_map = pred.detach().cpu().clone().numpy()
                predicted_density_map_enlarge = cv2.resize(
                    np.squeeze(predicted_density_map[0][0]),
                    (int(predicted_density_map.shape[3] * 8),
                     int(predicted_density_map.shape[2] * 8)),
                    interpolation=cv2.INTER_CUBIC) / 64
                save_density_map(predicted_density_map_enlarge, save_pred_path)
                print("pred " + save_pred_path + " value " +
                      str(predicted_density_map.sum()))

                print("cont compare " + str(predicted_density_map.sum()) +
                      " " + str(predicted_density_map_enlarge.sum()))
                print("shape compare " + str(predicted_density_map.shape) +
                      " " + str(predicted_density_map_enlarge.shape))

                pred_count = pred.detach().cpu().sum()
                pred_count_num = pred_count.item()

                error = abs(pred_count_num - gt_count_num)
            else:
                error = 0
                pred_count = 0

            mae_s += error
            mse_s += error * error
            density_map_count = gt_density.detach().sum()
            density_map_count_num = density_map_count.item()
            gt_count_num = gt_count.item()
            if model is not None:
                log_str = str(file_name_only) + " " + str(
                    density_map_count_num) + " " + str(
                        gt_count.item()) + " " + str(pred_count.item())
            else:
                log_str = str(file_name_only) + " " + str(
                    density_map_count_num) + " " + str(gt_count.item())
            print(log_str)
            log_f.write(log_str + "\n")
    log_f.close()
    mae = mae_s / n
    mse = math.sqrt(mse_s / n)
    print("mae ", mae)
    print("mse", mse)
示例#20
0
    TEST_PATH = os.path.join(DATA_PATH, "test_data")
    dataset_name = args.datasetname
    if dataset_name == "shanghaitech":
        print("will use shanghaitech dataset with crop ")
    elif dataset_name == "shanghaitech_keepfull":
        print("will use shanghaitech_keepfull")
    else:
        print("cannot detect dataset_name")
        print("current dataset_name is ", dataset_name)

    # create list
    train_list = create_image_list(TRAIN_PATH)
    test_list = create_image_list(TEST_PATH)

    # create data loader
    train_loader, val_loader, test_loader = get_dataloader(
        train_list, None, test_list, dataset_name=dataset_name)

    print("len train_loader ", len(train_loader))

    # model
    model = AttnCanAdcrowdNetSimpleV5()
    experiment.log_other("model_summary",
                         summary(model, (3, 128, 128), device="cpu"))
    model = model.to(device)

    # loss function
    loss_fn = nn.MSELoss(reduction='sum').to(device)

    optimizer = torch.optim.Adam(model.parameters(),
                                 args.lr,
                                 weight_decay=args.decay)