Ejemplo n.º 1
0
    args = init_args()
    # manualSeed = random.randint(1, 10000)  # fix seed
    manualSeed = 10
    random.seed(manualSeed)
    np.random.seed(manualSeed)
    torch.manual_seed(manualSeed)
    cudnn.benchmark = True
    # alphabet = alphabet = utils.to_alphabet("H:/DL-DATASET/BaiduTextR/train.list")

    # store model path
    if not os.path.exists('./expr'):
        os.mkdir('./expr')
    # read train set
    # dataset = baiduDataset("H:/DL-DATASET/BaiduTextR/train_images/train_images", "H:/DL-DATASET/BaiduTextR/train.list", params.alphabet, True)
    dataset = baiduDataset(
        "H:/DL-DATASET/360M/images",
        "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/train.txt",
        params.alphabet, False, (params.imgW, params.imgH))
    val_dataset = baiduDataset(
        "H:/DL-DATASET/360M/images",
        "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/test.txt",
        params.alphabet, False, (params.imgW, params.imgH))
    # dataset = baiduDataset("/media/hane/DL-DATASET/360M/images", "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/train.txt", params.alphabet, False)
    # val_dataset = baiduDataset("/media/hane/DL-DATASET/360M/images", "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/test.txt", params.alphabet, False)

    train_loader = DataLoader(dataset,
                              batch_size=params.batchSize,
                              shuffle=True,
                              num_workers=params.workers)
    # shuffle=True, just for time consuming.
    val_loader = DataLoader(val_dataset,
                            batch_size=params.val_batchSize,
Ejemplo n.º 2
0
    # device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    device = torch.device("cpu")
    # alphabet = alphabet = utils.to_alphabet("H:/DL-DATASET/BaiduTextR/train.list")

    # store model path
    if not os.path.exists(params.experiment):
        os.mkdir(params.experiment)
    # read train set
    # dataset = baiduDataset("H:/DL-DATASET/BaiduTextR/train_images/train_images", "H:/DL-DATASET/BaiduTextR/train.list", params.alphabet, True)
    # dataset = baiduDataset("H:/DL-DATASET/360M/images", "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/train.txt", params.alphabet, False, (params.imgW, params.imgH))
    # val_dataset = baiduDataset("H:/DL-DATASET/360M/images", "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/test.txt", params.alphabet, False, (params.imgW, params.imgH))
    # dataset = baiduDataset("/media/hane/DL-DATASET/360M/images", "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/train.txt", params.alphabet, False)
    # val_dataset = baiduDataset("/media/hane/DL-DATASET/360M/images", "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/test.txt", params.alphabet, False)
    dataset = baiduDataset(
        "/uuz/song/datasets/ocr/train_items",
        "/home/song/workplace/OCR/ocr_idcard/label/train/train_label_{}.txt".
        format(params.experiment), params.alphabet, False,
        (params.imgW, params.imgH))
    val_dataset = baiduDataset(
        "/uuz/song/datasets/ocr/train_items",
        "/home/song/workplace/OCR/ocr_idcard/label/val/val_label_{}.txt".
        format(params.experiment), params.alphabet, False,
        (params.imgW, params.imgH))
    #   /home/song/workplace/OCR/ocr_idcard/label/train/train_label_birth_d.txt
    train_loader = DataLoader(dataset,
                              batch_size=params.batchSize,
                              shuffle=True,
                              num_workers=params.workers)
    # shuffle=True, just for time consuming.
    val_loader = DataLoader(val_dataset,
                            batch_size=params.val_batchSize,
Ejemplo n.º 3
0
    # args = init_args()
    # manualSeed = random.randint(1, 10000)  #fix seed
    manualSeed = 10
    random.seed(manualSeed)
    np.random.seed(manualSeed)
    torch.manual_seed(manualSeed)
    cudnn.benchmark = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    # alphabet = alphabet = utils.to_alphabet("H:/DL-DATASET/BaiduTextR/train.list")

    # store model path
    if not os.path.exists('./expr'):
        os.mkdir('./expr')
    # read train set
    # dataset = baiduDataset("H:/DL-DATASET/BaiduTextR/train_images/train_images", "H:/DL-DATASET/BaiduTextR/train.list", params.alphabet, True)
    dataset = baiduDataset("data/vehicle/Image", "data/vehicle/Main/train.txt",
                           params.alphabet, False, (params.imgW, params.imgH))
    val_dataset = baiduDataset("data/vehicle/Image",
                               "data/vehicle/Main/val.txt", params.alphabet,
                               False, (params.imgW, params.imgH))
    # val_dataset = baiduDataset("H:/DL-DATASET/360M/images", "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/test.txt", params.alphabet, False, (params.imgW, params.imgH))
    # dataset = baiduDataset("/media/hane/DL-DATASET/360M/images", "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/train.txt", params.alphabet, False)
    # val_dataset = baiduDataset("/media/hane/DL-DATASET/360M/images", "E:/08-Github-resources/00-MY-GitHub-Entries/crnn_chinese_characters_rec-master/crnn_chinese_characters_rec-master/label/test.txt", params.alphabet, False)

    train_loader = DataLoader(dataset,
                              batch_size=params.batchSize,
                              shuffle=True,
                              num_workers=params.workers)
    # shuffle=True, just for time consuming.
    val_loader = DataLoader(val_dataset,
                            batch_size=params.val_batchSize,
                            shuffle=True,
Ejemplo n.º 4
0
    # args = init_args()
    # manualSeed = random.randint(1, 10000)  #fix seed
    os.environ["CUDA_VISIBLE_DEVICES"] = "7"
    manualSeed = 10
    random.seed(manualSeed)
    np.random.seed(manualSeed)
    torch.manual_seed(manualSeed)
    cudnn.benchmark = True
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    # store model path
    if not os.path.exists('./expr'):
        os.mkdir('./expr')
    # read train set
    dataset = baiduDataset(
        "./data_chinese_tra/images_add_fake",
        "./data_chinese_tra/label_add_fake/train_add_fake.txt",
        params.alphabet, False, (params.imgW, params.imgH))
    val_dataset = baiduDataset(
        "./data_chinese_tra/images_add_fake",
        "./data_chinese_tra/label_add_fake/val_add_fake.txt", params.alphabet,
        False, (params.imgW, params.imgH))

    train_loader = DataLoader(dataset,
                              batch_size=params.batchSize,
                              shuffle=True,
                              num_workers=params.workers)
    # shuffle=True, just for time consuming.
    val_loader = DataLoader(val_dataset,
                            batch_size=params.val_batchSize,
                            shuffle=True,
                            num_workers=params.workers)