예제 #1
0
def run():
    # Load Dataset
    train_dataset = dataset.autoencoderDataset("bibtex_train.embeddings")
    test_dataset  = dataset.autoencoderDataset("bibtex_test.embeddings")

    # Dataloaders
    train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size = 1, shuffle=True, num_workers =  4)
    test_dataloader  = torch.utils.data.DataLoader(test_dataset, batch_size = 1, num_workers = 1)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    autoencoder_model = model.autoEncoder(100) # 100 is the input dimension
    autoencoder_model.to(device)

    # Creating the optimizer
    optimizer = torch.optim.Adam(autoencoder_model.parameters(), lr=1e-3)

    for epoch in range(0,10):
        training_loss = engine.train(train_dataloader, autoencoder_model, optimizer, device)
        print("Epoch: {} Loss: {}".format(epoch+1, training_loss))

    # Model evaluation
    engine.eval(test_dataloader, autoencoder_model, device)

    # Generating the embeddings now
    train_set_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size = 1, shuffle = False)
    test_set_dataloader  = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle = False)

    embed_list = engine.generate_low_dimensional_embeddings(train_set_dataloader, autoencoder_model, device)
    embed_list.extend(engine.generate_low_dimensional_embeddings(test_set_dataloader, autoencoder_model, device))

    with open("bibtex_low_dimension.embeddings", mode='w+') as file:
        for each_elem in embed_list:
            line_to_write = " ".join(str(v) for v in each_elem[0])+'\n'
            file.write(line_to_write)
def evaluate(model, dataset, device, load_model=True, load_dataset=True, draw_results=False):
    try:
        ''' 
            if load_* params. are True then method accepts path for
            model, dataset, otherwise objects. 
            load_dataset sent without '.pkl'
        '''

        if load_model is True:
            model = torch.load(model) # model is expected to be the model path.
        elif load_dataset is True:
            dataset = load_obj(dataset)
        
        eval(model, dataset, device, draw=draw_results)

    except Exception as e:
        print(e)
        return False
    
    return True
예제 #3
0
def run_training():
    image_files = glob.glob(os.path.join(config.DATA_DIR, '*.png'))
    targets_orig = [x.split('/')[-1][:-4] for x in image_files]
    targets = [[c for c in x] for x in targets_orig]
    targets_flat = [c for clist in targets for c in clist]

    label_enc = preprocessing.LabelEncoder()
    label_enc.fit(targets_flat)
    targets_enc = [label_enc.transform(x) for x in targets]
    targets_enc = np.array(targets_enc) + 1

    train_imgs, test_imgs, train_targets, test_targets, train_orig_targets, test_orig_targets = model_selection.train_test_split(
        image_files, targets_enc, targets_orig, test_size=0.1, random_state=42)

    train_dataset = dataset.Classification(image_paths=train_imgs,
                                           targets=train_targets,
                                           resize=(config.IMAGE_HEIGHT,
                                                   config.IMAGE_WIDTH))

    train_loader = torch.utils.data.DataLoader(train_dataset,
                                               batch_size=config.BATCH_SIZE,
                                               shuffle=True)

    test_dataset = dataset.Classification(image_paths=test_imgs,
                                          targets=test_targets,
                                          resize=(config.IMAGE_HEIGHT,
                                                  config.IMAGE_WIDTH))

    test_loader = torch.utils.data.DataLoader(test_dataset,
                                              batch_size=config.BATCH_SIZE,
                                              shuffle=False)

    model = CaptchaModel(num_chars=len(label_enc.classes_))
    model = model.to(config.DEVICE)

    optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           factor=0.8,
                                                           patience=5,
                                                           verbose=True)

    for epoch in range(config.EPOCHS):
        train_loss = engine.train(model, train_loader, optimizer)
        val_preds, valid_loss = engine.eval(model, test_loader)
        print(
            f"Epoch: {epoch}: Train loss: {train_loss},  Valid loss: {valid_loss}"
        )
        valid_cap_preds = []
        for vp in val_preds:
            current_preds = decode_predictions(vp, label_enc)
            valid_cap_preds.extend(current_preds)
        print(list(zip(test_orig_targets, valid_cap_preds))[6:11])
예제 #4
0
def main():
    image_paths = glob.glob(os.path.join(config.PATH, "*.png"))
    image_paths = [path.replace("\\", "/") for path in image_paths]
    targets = [path.split("/")[-1][:-4] for path in image_paths]
    targets_listed = [[char for char in target] for target in targets]
    targets_flattened = [char for target in targets_listed for char in target]

    label_enc = preprocessing.LabelEncoder()
    label_enc.fit(targets_flattened)
    targets_encoded = np.array(
        [label_enc.transform(target) for target in targets_listed])
    targets_encoded += 1  # to keep the "0" class for UNK chars

    (
        train_imgs,
        valid_imgs,
        train_enc_targets,
        valid_enc_targets,
        _,
        valid_targets,
    ) = model_selection.train_test_split(image_paths,
                                         targets_encoded,
                                         targets,
                                         test_size=0.1,
                                         random_state=42)

    train_loader = make_loader(
        mode="train",
        image_paths=train_imgs,
        targets=train_enc_targets,
        size=(config.HEIGHT, config.WIDTH),
        resize=True,
    )

    valid_loader = make_loader(
        mode="valid",
        image_paths=valid_imgs,
        targets=valid_enc_targets,
        size=(config.HEIGHT, config.WIDTH),
        resize=True,
    )

    model = OCRModel(num_classes=len(label_enc.classes_),
                     dropout=config.DROPOUT).to(config.DEVICE)
    optimizer = torch.optim.Adam(model.parameters(), lr=config.LR)
    lr_scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer,
        mode="min",
        patience=config.PATIENCE,
        factor=config.FACTOR,
        verbose=True,
    )

    if config.MODE == "train":
        best_loss = float("inf")
        for epoch in range(config.EPOCHS):
            model.train()
            _ = engine.train(model, train_loader, optimizer)

            model.eval()
            with torch.no_grad():
                valid_preds, valid_loss = engine.eval(model, valid_loader)

            captcha_preds = []
            for preds in valid_preds:
                preds_ = decode_predictions(preds, label_enc)
                captcha_preds.extend(preds_)

            print(f"Epoch: {epoch}")
            pprint(list(zip(valid_targets, captcha_preds))[:10])

            lr_scheduler.step(valid_loss.avg)
            if valid_loss.avg < best_loss:
                best_loss = valid_loss.avg
                torch.save(model.state_dict(), "model.pt")
    else:
        model.load_state_dict(
            torch.load("./models/model.pt", map_location=config.DEVICE))
        model.eval()
        with torch.no_grad():
            valid_preds, valid_loss = engine.eval(model, valid_loader)
        captcha_preds = []
        for preds in valid_preds:
            preds_ = decode_predictions(preds, label_enc)
            captcha_preds.extend(preds_)

        pprint(list(zip(valid_targets, captcha_preds))[:10])
        return valid_loader, captcha_preds, valid_targets
예제 #5
0
        ecgs, y_true, y_pred = test(
                net=net,
                test_loader=loader,
                device=DEVICE,
                batch_size=BATCH_SIZE,
                plot_ecg=False,
                plot_ecg_windows_size=WINDOWS_SIZE
        )

        eval(
            ecgs=ecgs,
            y_true=y_true,
            y_pred=y_pred,
            labels=[0, 1, 2, 3, 4],
            target_names=['none', 'p_wave', 'qrs', 't_wave', 'extrasystole'],
            plot_acc=True,
            plot_loss=True,
            plot_conf_matrix=True,
            plot_ecg=True,
            plot_ecg_windows_size=WINDOWS_SIZE
        )









예제 #6
0
def run():
    trainDataset, testDataset, labelGenerator = utils.loadDataset()

    # Making DataLoaders
    trainDataLoader = torch.utils.data.DataLoader(
        trainDataset,
        batch_size=config.TRAIN_BATCH_SIZE,
        shuffle=True,
        num_workers=4,
        pin_memory=True)
    testDataLoader = torch.utils.data.DataLoader(
        testDataset, batch_size=config.TEST_BATCH_SIZE, num_workers=1)

    totalNOsOfLabels = len(labelGenerator.classes_)

    device = torch.device(config.DEVICE)

    # Defining Model
    print("Making model:- ", config.modelName)
    citeModel = None
    if config.modelName == "BertBase":
        citemodel = model.BERTBaseUncased(numOfLabels=totalNOsOfLabels,
                                          dropout=config.DROPOUT)
    elif config.modelName == "SciBert":
        citemodel = model.SciBertUncased(numOfLabels=totalNOsOfLabels,
                                         dropout=config.DROPOUT)
    citemodel.to(device)

    param_optimizer = list(citemodel.named_parameters())
    '''
        There is generally no need to apply L2 penalty (i.e. weight decay) to biases and LayerNorm.weight. 
        Hence, we have following line.
        Update: There is need to apply L2 to LayerNorm.weight as per Google TF implementation so reverting it ;)
    '''
    no_decay = ["bias", "LayerNorm.bias",
                "LayerNorm.weight"]  # Removed "LayerNorm.bias",

    optimizer_parameters = [
        {
            "params": [
                p for n, p in param_optimizer
                if not any(nd in n for nd in no_decay)
            ],
            "weight_decay":
            0.01,  # changed this from 0.001 to 0.1
        },
        {
            "params":
            [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
            "weight_decay":
            0.0,
        }
    ]

    num_train_steps = int(len(trainDataLoader) * config.EPOCHS)
    optimizer = AdamW(optimizer_parameters, lr=config.LEARNING_RATE)
    scheduler = get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=num_train_steps * config.WARMUP_PROPORTION,
        num_training_steps=num_train_steps)

    if config.dotrain:
        print('In Training')
        for epoch in range(config.EPOCHS):
            trainingLoss = engine.train(trainDataLoader, citemodel, optimizer,
                                        device, scheduler)
            print("Epoch: ", epoch, " Loss: ", trainingLoss, '\n')

        # Saving the model
        os.makedirs(os.path.dirname(config.MODEL_SAVED.format(
            config.modelName)),
                    exist_ok=True)
        torch.save(citemodel.state_dict(),
                   config.MODEL_SAVED.format(config.modelName))
        print('Model is saved at: ',
              config.MODEL_SAVED.format(config.modelName))
    '''
     Evaluating the model
    '''

    print("Loading the model")
    #citemodel = model.BERTBaseUncased(*args, **kwargs)
    citemodel.load_state_dict(
        torch.load(config.MODEL_SAVED.format(config.modelName)))
    outputs, targets = engine.eval(testDataLoader, citemodel, device)

    # Saving the results with corresponding targets
    os.makedirs(os.path.dirname(
        config.PREDICTIONS_PATH.format(config.modelName)),
                exist_ok=True)
    with open(config.PREDICTIONS_PATH.format(config.modelName), 'wb') as f:
        pickle.dump(outputs, f)  # First saved the predicted outputs
        pickle.dump(targets, f)  # Then saved the corresponding targets

    print('Starting Evaluation...')
    utils.metric(outputs, targets)
예제 #7
0
def run():
    df = pd.read_csv('input/IMDB Dataset.csv').sample(frac=1).reset_index(drop=True)
    df.sentiment = df.sentiment.apply(
        lambda x:
        1 if x == 'positive' else 0
    )
    df_train, df_val = train_test_split(df, test_size=0.1)

    train_data = DataLoader.DataLoader(df_train)
    train_loader = torch.utils.data.DataLoader(
        train_data,
        batch_size=config.Batch_Size,
        num_workers=4,
        pin_memory=True,
        collate_fn=DataLoader.MyCollate(config.pad_idx)
    )

    val_data = DataLoader.DataLoader(df_val)

    val_loader = torch.utils.data.DataLoader(
        val_data,
        batch_size=config.Batch_Size,
        num_workers=4,
        pin_memory=True,
        collate_fn=DataLoader.MyCollate(config.pad_idx)
    )
    

    if torch.cuda.is_available():
        accelarator = 'cuda'
        torch.backends.cudnn.benchmark=True
    else:
        accelarator = 'cpu'
    
    device = torch.device(accelarator)


    model = BertSentimentModel.BERTMODEL()

    model = model.to(device)

    model_param = list(model.named_parameters())

    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    
    
    optimizer_param = [
        {'params': [p for n, p in model_param if not any(nd in n for nd in no_decay)], 'weight_decay':0.001},
        {'params': [p for n, p in model_param if any(nd in n for nd in no_decay)], 'weight_decay':0.0 }
    ]

    num_training_steps = len(df_train)*config.Epochs//config.Batch_Size

    optimizer = transformers.AdamW(optimizer_param, lr=3e-5)
    scheduler = transformers.get_linear_schedule_with_warmup(
        optimizer,
        num_warmup_steps=0,
        num_training_steps=num_training_steps
    )

    best_loss = 1e32
    best_model = None
    print('---------- [INFO] STARTING TRAINING ---------')
    for epoch in range(config.Epochs):
        train_acc, train_loss = engine.train(
            model, 
            train_loader, 
            optimizer, 
            scheduler, 
            device
        )

        val_acc, val_loss = engine.eval(
            model,
            val_loader,
            device
        )

        print(f'EPOCH : {epoch+1}/{config.Epochs}')
        print(f'TRIAN_ACC = {train_acc}% | TRAIN LOSS = {train_loss}')
        print(f'VAL ACC = {val_acc}% | VAL LOSS {val_loss}')

        if best_loss > val_loss:
            best_loss = val_loss
            best_model = model.state_dict()
    torch.save(best_model, config.Model_Path)
예제 #8
0
def run():
    df = pd.read_csv('input/train.csv').sample(frac=1).dropna().reset_index(
        drop=True)

    df_train, df_val = train_test_split(df,
                                        test_size=0.1,
                                        stratify=df.sentiment.values)

    train_data = DataLoader.DataLoader(df_train)
    val_data = DataLoader.DataLoader(df_val)

    train_loader = torch.utils.data.DataLoader(
        train_data,
        num_workers=0,
        batch_size=CONFIG.Batch_Size,
        pin_memory=True,
        collate_fn=DataLoader.MyCollate())

    val_loader = torch.utils.data.DataLoader(val_data,
                                             num_workers=0,
                                             batch_size=CONFIG.Batch_Size,
                                             pin_memory=True,
                                             collate_fn=DataLoader.MyCollate())

    if torch.cuda.is_available():
        compute = 'cuda'
        torch.backends.cudnn.benchmark = True
    else:
        compute = 'cpu'

    device = torch.device(compute)
    device = torch.device('cpu')
    model = ROBERTA_MODEL.QnAModel(CONFIG.Dropout)
    mode = model.to(device)

    parameters = list(model.parameters())

    decay_parameter = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']

    optimized_parameters = [{
        'params': [
            p for n, p in model.named_parameters()
            if not any(nd in n for nd in decay_parameter)
        ],
        'weight_decay':
        0.001
    }, {
        'params': [
            p for n, p in model.named_parameters()
            if any(nd in n for nd in decay_parameter)
        ],
        'weight_decay':
        0.0
    }]

    optimizer = transformers.AdamW(optimized_parameters, lr=CONFIG.LR)

    num_training_steps = len(df_train) * CONFIG.Epochs / CONFIG.Batch_Size
    scheduler = transformers.get_linear_schedule_with_warmup(
        optimizer, num_training_steps=num_training_steps, num_warmup_steps=0)

    best_loss = 100
    best_jaccard = 0
    print('-------------[INFO] STARTING TRANING--------------\n')
    for epoch in range(CONFIG.Epochs):
        train_loss = engine.train(model, train_loader, optimizer, scheduler,
                                  device)
        val_loss, jaccard_score = engine.eval(model, val_loader, device)
        print(
            f'EPOCHS -> {epoch+1}/{CONFIG.Epochs} | TRAINING LOSS = {train_loss} | VAL LOSS = {val_loss} | JACCARD SCORE = {jaccard_score}\n'
        )
        if jaccard_score > best_jaccard:
            best_jaccard = jaccard_score
            best_model = model.state_dict()

    torch.save(best_model, CONFIG.MODLE_PATH)
예제 #9
0
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.001
    },
    {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    },
]

num_train_steps = int(len(df_train) / config.TRAIN_BATCH_SIZE * config.EPOCHS)
optimizer = AdamW(optimizer_parameters, lr=3e-5)
scheduler = get_linear_schedule_with_warmup(optimizer,
                                            num_warmup_steps=0,
                                            num_training_steps=num_train_steps)

model = nn.DataParallel(model)

best_accuracy = 0
for epoch in range(config.EPOCHS):
    print('======== Epoch {} ========'.format(epoch))
    engine.train(train_data_loader, model, optimizer, scheduler, device)
    outputs, targets = engine.eval(valid_data_loader, model, device)
    outputs = np.array(outputs) >= 0.5
    accuracy = metrics.accuracy_score(targets, outputs)
    print(f"Accuracy Score = {accuracy}")
    if accuracy > best_accuracy:
        torch.save(model.state_dict(), config.MODEL_PATH)
        best_accuracy = accuracy
def run_training():
    image_files = glob.glob("../input/dl_captcha/train_images/*.png")
    print(image_files.__len__())

    targets = [int(i.split("/")[-1].split(".")[0].split("_")[1])
               for i in image_files]
    print(targets.__len__())

    lbl_encoder = preprocessing.LabelEncoder()
    lbl_encoder.fit(targets)

    enc_targets = np.array(lbl_encoder.transform(targets))
    print(len(lbl_encoder.classes_))

    train_imgs, val_imgs, train_enc_targets, val_enc_targets, train_orig_targets, val_orig_targets = model_selection.train_test_split(
        image_files, enc_targets, targets, test_size=0.1, random_state=42)

    train_dataset = dataset.RotnetDataset(
        train_imgs, train_enc_targets, resize=(config.IMAGE_HEIGHT, config.IMAGE_WIDTH))
    
    print(len(train_dataset))

    val_dataset = dataset.RotnetDataset(
        val_imgs, val_enc_targets, resize=(config.IMAGE_HEIGHT, config.IMAGE_WIDTH))

    print(len(val_dataset))

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=config.BATCH_SIZE,
        shuffle=True, num_workers=config.NUM_WORKERS
    )

    val_loader = torch.utils.data.DataLoader(
        val_dataset, batch_size=config.BATCH_SIZE,
        shuffle=False, num_workers=config.NUM_WORKERS
    )

    model = RotNetclassifier(num_classes=len(lbl_encoder.classes_))
    model.to(config.DEVICE)

    optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
        optimizer, factor=0.8, patience=5, verbose=True
    )

    for epoch in range(config.EPOCHS):
        train_loss = engine.train(model, train_loader, optimizer)
        valid_preds, valid_loss = engine.eval(model, val_loader)
        final_val_preds = []
        for vp in valid_preds:
            current_preds = decode_preds(vp, lbl_encoder)
            final_val_preds.extend(current_preds)

        combined = list(zip(val_orig_targets, final_val_preds))
        print(combined[:20])
        accuracy = metrics.accuracy_score(final_val_preds, val_orig_targets)
        print(
            f"Epoch={epoch}, Train Loss={train_loss}, Test Loss={valid_loss} Accuracy={accuracy}"
        )
        scheduler.step(valid_loss)
        joblib.dump(lbl_encoder, "../input/pickles/lbl_encoder.pkl")
        torch.save(model.state_dict(), "../input/pickles/captcha.pth")
예제 #11
0
def run():
    df = pd.read_csv('input/ner_dataset.csv', encoding='latin-1')
    df['Sentence #'] = df['Sentence #'].fillna(method='ffill')

    if os.path.exists('input/pos_lb.pickle'):
        pos_lb = pickle.load(open('input/pos_lb.pickle', 'rb'))
    else:
        pos_lb = LabelEncoder().fit(df.POS.values)
        pickle.dump(pos_lb, open('input/pos_lb.pickle', 'wb'))

    df['POS'] = pos_lb.transform(df.POS.values)

    pos_pad_idx = pos_lb.transform(['.'])[0]

    sentence = df.groupby('Sentence #')['Word'].apply(list).values
    pos = df.groupby('Sentence #')['POS'].apply(list).values

    print('-------- [INFO] TOKENIZING --------\n')
    data = DataLoader.DataLoader(sentence, pos)

    data_len = len(data)
    indices = np.arange(0, data_len)
    valid_len = int(data_len * CONFIG.Valid_split)

    train_index = indices[valid_len:]
    valid_index = indices[:valid_len]

    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_index)
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_index)

    if not os.path.exists('input/word_to_idx.pickle'):
        pickle.dump(data.vocab.word_to_idx,
                    open('input/word_to_idx.pickle', 'wb'))

    pad_idx = data.vocab.word_to_idx['<PAD>']

    train_loader = torch.utils.data.DataLoader(data,
                                               num_workers=4,
                                               batch_size=CONFIG.BATCH_SIZE,
                                               pin_memory=True,
                                               collate_fn=DataLoader.MyCollate(
                                                   pad_idx, pos_pad_idx),
                                               sampler=train_sampler)

    valid_loader = torch.utils.data.DataLoader(data,
                                               num_workers=4,
                                               batch_size=CONFIG.BATCH_SIZE,
                                               pin_memory=True,
                                               collate_fn=DataLoader.MyCollate(
                                                   pad_idx, pos_pad_idx),
                                               sampler=valid_sampler)

    vocab_size = len(data.vocab.word_to_idx)

    num_pos_class = len(list(pos_lb.classes_))
    tag_to_idx = {str(x): num for num, x in enumerate(list(pos_lb.classes_))}
    tag_to_idx['start_tag'] = num_pos_class
    tag_to_idx['stop_tag'] = num_pos_class + 1

    if torch.cuda.is_available():
        accelarator = 'cuda'
        torch.backends.cudnn.benchmark = True
    else:
        accelarator = 'cpu'

    device = torch.device(accelarator)

    model = NERCRFModel.NER(vocab_size=vocab_size,
                            embed_dims=CONFIG.EMBED_DIMS,
                            hidden_dims=CONFIG.HIDDEN_DIMS,
                            num_layers=CONFIG.NUM_HIDDEN_LAYER,
                            num_classes=len(tag_to_idx),
                            dropout=CONFIG.DROPOUT,
                            bidirectional=CONFIG.BIDIRECTIONAL,
                            tag_to_idx=tag_to_idx,
                            device=device)

    model = model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=CONFIG.LR)
    best_loss = 1e32

    print('\n-----------[INFO] STARTING TRAINING ----------------\n')
    for epoch in range(CONFIG.EPOCHS):
        train_loss = engine.train(model, train_loader, optimizer, device)
        eval_loss, val_pos_acc = engine.eval(model, valid_loader, device)
        print(f'EPOCH -> {epoch+1}/{CONFIG.EPOCHS}')
        print(f'TRAIN LOSS = {np.round(train_loss, 5)}')
        print(
            f'VAL LOSS   = {np.round(eval_loss, 5)} | VAL POS ACC   = {np.round(val_pos_acc*100, 5)}%'
        )
        if best_loss > eval_loss:
            best_loss = eval_loss
            best_model = model.state_dict()

    torch.save(best_model, CONFIG.Model_Path)
예제 #12
0
def run():
    df = pd.read_csv(CONFIG.Path).sample(frac=1).reset_index(drop=True)[:10]
    mean = (0.5, 0.5, 0.5)
    std = (0.5, 0.5, 0.5)

    mean = (0.485, 0.456, 0.406)
    std = (0.229, 0.224, 0.225)

    aug = albumentations.Compose([
        albumentations.Normalize(mean, std, always_apply=True),
        albumentations.RandomBrightness(),
        albumentations.HueSaturationValue(),
        albumentations.Resize(224, 224, always_apply=True),
        AT.ToTensor()
    ])

    print('-------[INFO] TOKENIZING CAPTIONS -------')
    dataset = DataLoader.DataLoader(df, aug)
    dataset_size = len(dataset)
    indices = list(range(dataset_size))
    split = int(np.floor(0.1 * dataset_size))
    train_indices, val_indices = indices[split:], indices[:split]
    train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indices)
    valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(val_indices)

    pickle.dump(dataset.vocab.word_to_idx,
                open('model/word_to_idx.pickle', 'wb'))
    pickle.dump(dataset.vocab.idx_to_word,
                open('model/idx_to_word.pickle', 'wb'))

    pad_idx = dataset.vocab.word_to_idx['<PAD>']

    train_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=CONFIG.BATCH_SIZE,
        sampler=train_sampler,
        pin_memory=True,
        num_workers=8,
        collate_fn=DataLoader.MyCollate(pad_idx=pad_idx))

    val_loader = torch.utils.data.DataLoader(
        dataset,
        batch_size=CONFIG.BATCH_SIZE,
        sampler=valid_sampler,
        pin_memory=True,
        num_workers=8,
        collate_fn=DataLoader.MyCollate(pad_idx=pad_idx))

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    torch.backends.cudnn.benchmark = True

    model = ImageCaptioningModel.EncoderDecoder(
        embedding_dims=CONFIG.embedding_dims,
        vocab_size=len(dataset.vocab.word_to_idx),
        hidden_dims=CONFIG.hidden_dims,
        num_layers=CONFIG.num_layer,
        bidirectional=CONFIG.bidirectional,
        dropout=CONFIG.dropout)

    torch.backends.cudnn.benchmark = True

    model = model.to(device)

    for name, param in model.encoder.base_model.named_parameters():
        if "linear" in name:
            param.requires_grad = True
        else:
            param.requires_grad = False

    optimizer = torch.optim.AdamW(model.parameters(), lr=CONFIG.LR)

    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           patience=2,
                                                           threshold=0.001,
                                                           mode='min')

    best_loss = 1e4

    print(
        '------------------------------ [INFO] STARTING TRAINING --------------------------------'
    )
    for epoch in range(CONFIG.EPOCHS):
        print(f'-----EPOCH - {epoch+1}/ {CONFIG.EPOCHS} -----')
        train_loss = engine.train(model, train_loader, optimizer, device,
                                  pad_idx)
        val_loss = engine.eval(model, val_loader, device, pad_idx)
        scheduler.step(val_loss)
        print(f'Train Loss = {train_loss} | Eval Loss = {val_loss}\n')
        if best_loss > val_loss:
            best_loss = val_loss
            best_model = model.state_dict()
            torch.save(best_model, CONFIG.MODEL_PATH)
            predict.predict('1.jpg')
예제 #13
0
def run_training():
    # image_files
    image_files = glob.glob("../input/train_all_captchas/*.png")
    print(image_files[:4])

    # targets
    targets_orig = [i.split("/")[-1][:-4] for i in image_files]
    print(targets_orig[:5])

    # creating a list of list for the targets
    targets = [[j for j in i] for i in targets_orig]

    # flattening the lists
    targets_flat = [item for sublists in targets for item in sublists]
    # print(targets_flat)

    lbl_encoder = preprocessing.LabelEncoder()
    lbl_encoder.fit(targets_flat)
    enc_targets = [lbl_encoder.transform(x) for x in targets]

    # this +1 is to add 1 to all the encoded labels, so that we could use 0 for the unknown values
    enc_targets = np.array(enc_targets) + 1
    print(len(enc_targets))
    print(len(lbl_encoder.classes_))

    (
        train_imgs,
        test_imgs,
        train_targets_orig,
        test_target_orig,
        train_targets,
        test_targets,
    ) = model_selection.train_test_split(image_files,
                                         targets_orig,
                                         enc_targets,
                                         test_size=0.1,
                                         random_state=42)

    print(len(train_imgs), len(train_targets))
    print(len(test_imgs), len(test_targets))
    train_dataset = dataset.ClassificationDataset(
        image_paths=train_imgs,
        targets=train_targets,
        resize=(config.IMAGE_HEIGHT, config.IMAGE_WIDTH),
    )

    train_dataloader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=config.BATCH_SIZE,
        num_workers=config.NUM_WORKERS,
        shuffle=True,
    )

    test_dataset = dataset.ClassificationDataset(
        image_paths=test_imgs,
        targets=test_targets,
        resize=(config.IMAGE_HEIGHT, config.IMAGE_WIDTH),
    )
    test_dataloader = torch.utils.data.DataLoader(
        test_dataset,
        batch_size=config.BATCH_SIZE,
        num_workers=config.NUM_WORKERS,
        shuffle=False,
    )

    model = CaptchaModel(num_chars=len(lbl_encoder.classes_))
    model.to(config.DEVICE)

    optimizer = torch.optim.Adam(model.parameters(), lr=3e-4)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           factor=0.8,
                                                           patience=5,
                                                           verbose=True)
    for epoch in range(config.EPOCHS):
        train_loss = engine.train(model, train_dataloader, optimizer)
        valid_preds, valid_loss = engine.eval(model, test_dataloader)
        valid_captcha_preds = []
        for vp in valid_preds:
            current_preds = decode_predictions(vp, lbl_encoder)
            valid_captcha_preds.extend(current_preds)
        combined = list(zip(test_target_orig, valid_captcha_preds))
        print(combined[:20])
        test_dup_rem = [remove_duplicates(c) for c in test_target_orig]
        accuracy = metrics.accuracy_score(test_dup_rem, valid_captcha_preds)
        print(
            f"Epoch={epoch}, Train Loss={train_loss}, Test Loss={valid_loss} Accuracy={accuracy}"
        )
        scheduler.step(valid_loss)
        joblib.dump(
            lbl_encoder,
            "../input/pickles/tan_pan_oltas_gst_epfo_rc_lbl_encoder.pkl")
        torch.save(model.state_dict(),
                   "../input/pickles/tan_pan_oltas_gst_epfo_rc_model.pth")