Beispiel #1
0
def train():
    print("Loading data...")
    SRC, TGT, train, val, test = generate_dataloaders()

    devices = [0, 1, 2, 3]
    pad_idx = TGT.vocab.stoi["<blank>"]
    print("Making model...")
    model = make_model(len(SRC.vocab), len(TGT.vocab), N=6)
    model.cuda()
    criterion = LabelSmoothing(
        size=len(TGT.vocab), padding_idx=pad_idx, smoothing=0.1)
    criterion.cuda()
    BATCH_SIZE = 12000
    train_iter = BatchIterator(train, batch_size=BATCH_SIZE, device=torch.device(0),
                               repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
                               batch_size_fn=batch_size_fn, train=True)
    valid_iter = BatchIterator(val, batch_size=BATCH_SIZE, device=torch.device(0),
                               repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
                               batch_size_fn=batch_size_fn, train=False)
    model_par = nn.DataParallel(model, device_ids=devices)
    model_opt = NoamOpt(model.src_embed[0].d_model, 1, 2000,
                        torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
    folder = get_unique_folder("./models/", "model")
    if not(os.path.exists(folder)):
        os.mkdir(folder)
    for epoch in tqdm(range(10)):
        model_par.train()
        run_epoch((rebatch(pad_idx, b) for b in train_iter),
                  model_par,
                  MultiGPULossCompute(model.generator, criterion,
                                      devices=devices, opt=model_opt))
        model_par.eval()
        loss = run_epoch((rebatch(pad_idx, b) for b in valid_iter),
                         model_par,
                         MultiGPULossCompute(model.generator, criterion,
                                             devices=devices, opt=None))
        torch.save(model.state_dict, os.path.join(folder, "model.bin." + str(epoch)))
        print(loss)

    for i, batch in enumerate(valid_iter):
        src = batch.src.transpose(0, 1)[:1]
        src_mask = (src != SRC.vocab.stoi["<blank>"]).unsqueeze(-2)
        out = greedy_decode(model, src, src_mask,
                            max_len=60, start_symbol=TGT.vocab.stoi["<s>"])
        print("Translation:", end="\t")
        for i in range(1, out.size(1)):
            sym = TGT.vocab.itos[out[0, i]]
            if sym == "</s>":
                break
            print(sym, end=" ")
        print()
        print("Target:", end="\t")
        for i in range(1, batch.trg.size(0)):
            sym = TGT.vocab.itos[batch.trg.data[i, 0]]
            if sym == "</s>":
                break
            print(sym, end=" ")
        print()
        break
Beispiel #2
0
def run_prediction(model_name):
    tf.reset_default_graph()

    img_rows, img_cols = 64, 64

    test_data, test_id = read_and_normalize_test_data(img_rows,
                                                      img_cols,
                                                      color_type=1)
    logging.info('test_data shape:', test_data.shape)
    logging.info('test_id shape:', len(test_id))

    nn = NaiveModel(img_rows, img_cols, 10, color_type=1)
    saver = tf.train.Saver()

    predictions = []
    test_data_iter = BatchIterator(test_data, batch_size=1024, mode='test')

    with tf.Session() as sess:
        saver.restore(sess, 'model/{}.ckpt'.format(model_name))
        with tqdm(total=test_data_iter.iters) as pbar:
            for _x in test_data_iter:
                pbar.update(1)
                predict = sess.run(nn.outputs,
                                   feed_dict={
                                       nn.images: _x,
                                       nn.prob: 1.0
                                   })
                predictions.append(predict)
            predictions = np.concatenate(predictions, axis=0)
    save_submission(test_id, predictions)
Beispiel #3
0
def make_batch_iterator(options, dataset):
    sentences = dataset['sentences']
    extra = dataset['extra']
    word2idx = dataset['metadata']['word2idx']

    vocab_size = len(word2idx)

    batch_iterator = BatchIterator(sentences, extra=extra)

    return batch_iterator
Beispiel #4
0
 def predict(self, sess, x, batch_size=1024):
     predicts = []
     for _x in BatchIterator(x, batch_size=batch_size, mode='test'):
         logits = sess.run(self.outputs,
                           feed_dict={
                               self.images: _x,
                               self.prob: 1.0
                           })
         predicts.append(logits)
     return np.concatenate(predicts, axis=0)
Beispiel #5
0
def make_batch_iterator(options,
                        dset,
                        shuffle=True,
                        include_partial=False,
                        filter_length=0,
                        batch_size=None,
                        length_to_size=None,
                        contextual_target=False,
                        curriculum_start_length=False):
    sentences = dset['sentences']
    word2idx = dset['word2idx']
    extra = dset['extra']
    metadata = dset['metadata']

    n_etypes = metadata.get('n_etypes', None)
    etype2idx = metadata.get('etype2idx', None)

    cuda = options.cuda

    vocab_size = len(word2idx)

    batch_iterator = BatchIterator(
        sentences,
        extra=extra,
        shuffle=shuffle,
        include_partial=include_partial,
        filter_length=filter_length,
        batch_size=batch_size,
        cuda=cuda,
        size=options.hidden_dim,
        word2idx=word2idx,
        options_path=options.elmo_options_path,
        weights_path=options.elmo_weights_path,
    )

    # DIRTY HACK: Makes it easier to print examples later. Should really wrap this within the class.
    batch_iterator.word2idx = word2idx
    batch_iterator.n_etypes = n_etypes
    batch_iterator.etype2idx = etype2idx

    return batch_iterator
Beispiel #6
0
 def fit(self, sess, x, y, batch_size=1024, nb_epoch=20):
     it = 0
     for _x, _y in BatchIterator((x, y),
                                 batch_size=batch_size,
                                 epoch=nb_epoch):
         it += 1
         loss, _ = sess.run([self.loss, self.optimizer],
                            feed_dict={
                                self.images: _x,
                                self.labels: _y,
                                self.prob: 0.5
                            })
         if it % 100 == 0:
             print('Iter {} loss: {}'.format(it, loss))
def data_processing():
    train_dataset = pd.read_csv(
        'aclImdb/dataset_feat_clean/train_feat_clean.csv',
        usecols=[
            'clean_review', 'polarity', 'subjectivity', 'word_count',
            'UPPERCASE', 'DIGITS', 'PROPN', 'VERB', 'NOUN', 'PUNCT', 'ADJ',
            'label'
        ])

    train_dataset = train_dataset[[
        'clean_review', 'polarity', 'subjectivity', 'word_count', 'UPPERCASE',
        'DIGITS', 'PROPN', 'VERB', 'NOUN', 'PUNCT', 'ADJ', 'label'
    ]]

    train_scaler = StandardScaler()
    train_dataset.iloc[:, 3:11] = train_scaler.fit_transform(
        train_dataset.iloc[:, 3:11])
    val_dataset = pd.read_csv('aclImdb/dataset_feat_clean/val_feat_clean.csv',
                              usecols=[
                                  'clean_review', 'polarity', 'subjectivity',
                                  'word_count', 'UPPERCASE', 'DIGITS', 'PROPN',
                                  'VERB', 'NOUN', 'PUNCT', 'ADJ', 'label'
                              ])

    val_dataset = val_dataset[[
        'clean_review', 'polarity', 'subjectivity', 'word_count', 'UPPERCASE',
        'DIGITS', 'PROPN', 'VERB', 'NOUN', 'PUNCT', 'ADJ', 'label'
    ]]
    val_scaler = StandardScaler()
    val_dataset.iloc[:,
                     3:11] = val_scaler.fit_transform(val_dataset.iloc[:,
                                                                       3:11])
    train_iterator = BatchIterator(
        train_dataset,
        batch_size=64,
        vocab_created=False,
        vocab=None,
        target_col=None,
        word2index=None,
        sos_token='<SOS>',
        eos_token='<EOS>',
        unk_token='<UNK>',
        pad_token='<PAD>',
        min_word_count=3,
        max_vocab_size=None,
        max_seq_len=0.9,
        use_pretrained_vectors=True,
        glove_path='glove/',
        glove_name='glove.6B.100d.txt',
        weights_file_name='glove/weights-biGRU-glove.npy')
    val_iterator = BatchIterator(val_dataset,
                                 batch_size=64,
                                 vocab_created=False,
                                 vocab=None,
                                 target_col=None,
                                 word2index=train_iterator.word2index,
                                 sos_token='<SOS>',
                                 eos_token='<EOS>',
                                 unk_token='<UNK>',
                                 pad_token='<PAD>',
                                 min_word_count=3,
                                 max_vocab_size=None,
                                 max_seq_len=0.9,
                                 use_pretrained_vectors=False,
                                 glove_path='glove/',
                                 glove_name='glove.6B.100d.txt',
                                 weights_file_name='glove/weights.npy')

    test_dataset = pd.read_csv(
        'aclImdb/dataset_feat_clean/test_feat_clean.csv',
        usecols=[
            'clean_review', 'polarity', 'subjectivity', 'word_count',
            'UPPERCASE', 'DIGITS', 'PROPN', 'VERB', 'NOUN', 'PUNCT', 'ADJ',
            'label'
        ])
    test_dataset = test_dataset[[
        'clean_review', 'polarity', 'subjectivity', 'word_count', 'UPPERCASE',
        'DIGITS', 'PROPN', 'VERB', 'NOUN', 'PUNCT', 'ADJ', 'label'
    ]]
    test_scaler = StandardScaler()
    test_dataset.iloc[:, 3:11] = test_scaler.fit_transform(
        test_dataset.iloc[:, 3:11])
    test_iterator = BatchIterator(test_dataset,
                                  batch_size=256,
                                  vocab_created=False,
                                  vocab=None,
                                  target_col=None,
                                  word2index=train_iterator.word2index,
                                  sos_token='<SOS>',
                                  eos_token='<EOS>',
                                  unk_token='<UNK>',
                                  pad_token='<PAD>',
                                  min_word_count=3,
                                  max_vocab_size=None,
                                  max_seq_len=0.9,
                                  use_pretrained_vectors=False,
                                  glove_path='glove/',
                                  glove_name='glove.6B.100d.txt',
                                  weights_file_name='glove/weights.npy')

    return train_iterator, val_iterator, test_iterator
    parser.add_argument("-l", "--linguistic_model", type=str, required=True)
    parser.add_argument("-a", "--acoustic_model", type=str, required=True)
    args = parser.parse_args()

    assert isfile(
        args.acoustic_model), "acoustic_model weights file does not exist"
    assert isfile(args.acoustic_model.replace(
        ".torch", ".json")), "acoustic_model config file does not exist"
    assert isfile(
        args.linguistic_model), "linguistic_model weights file does not exist"
    assert isfile(args.linguistic_model.replace(
        ".torch", ".json")), "linguistic_model config file does not exist"

    test_features_acoustic, test_labels_acoustic, val_features_acoustic, val_labels_acoustic, _, _ = load_spectrogram_dataset(
    )
    test_iterator_acoustic = BatchIterator(test_features_acoustic,
                                           test_labels_acoustic, 100)
    test_features_linguistic, test_labels_linguistic, val_features_linguistic, val_labels_linguistic, _, _ = load_linguistic_dataset(
    )
    test_iterator_linguistic = BatchIterator(test_features_linguistic,
                                             test_labels_linguistic, 100)
    val_iterator_acoustic = BatchIterator(val_features_acoustic,
                                          val_labels_acoustic, 100)
    val_iterator_linguistic = BatchIterator(val_features_linguistic,
                                            val_labels_linguistic, 100)

    assert np.array_equal(
        test_labels_acoustic, test_labels_linguistic
    ), "Labels for acoustic and linguistic datasets are not the same!"
    """Choosing hardware"""
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    if device == "cuda":
Beispiel #9
0
                              Y_val,
                              DATA_DIR_PATH,
                              batch_size=batch_size,
                              image_size=IMAGE_SIZE,
                              patience=PATIENCE,
                              patience_increase=PATIENCE_INCREASE)

    # train
    for e in range(nb_epoch):
        print('-' * 40)
        print('Epoch', e)
        print('-' * 40)
        print("Training...")

        # train batch by batch
        batches = list(BatchIterator(X_train, Y_train, batch_size, IMAGE_SIZE))
        progbar = generic_utils.Progbar(len(X_train))

        for X_batch, Y_batch in batches:  # X_batch: filenames, A_batch: annotations
            X_batch_image = []
            for image_path in X_batch:
                # load pre-processed train images from filenames
                processed_img_arr = cv2.imread(DATA_DIR_PATH + '/' +
                                               image_path)
                # perform online data augmentation
                if data_augmentation:
                    processed_img_arr = augment(processed_img_arr)
                #print(X_batch_image.shape)
                X_batch_image.append(
                    processed_img_arr.reshape(3, IMAGE_SIZE, IMAGE_SIZE))
Beispiel #10
0
if __name__ == "__main__":
    BATCH_SIZE = 12000
    parser = argparse.ArgumentParser()
    parser.add_argument('model_name')
    parser.add_argument('log_name')

    args = parser.parse_args()
    model_file = open(args.model_name)
    log_file = open(args.log_name, 'w')

    print("Loading data...")
    SRC, TGT, train, val, test = generate_dataloaders("./data_processed/")
    test_iter = BatchIterator(val,
                              batch_size=BATCH_SIZE,
                              device=torch.device(0),
                              repeat=False,
                              sort_key=lambda x: (len(x.src), len(x.trg)),
                              batch_size_fn=batch_size_fn,
                              train=False)
    print("Loading model...")
    model = torch.load(model_file)
    print("Generating test output...")
    log("Testing model stored at " + args.model_name + ".", log_file)
    for i, batch in tqdm(enumerate(test_iter)):
        src = batch.src.transpose(0, 1)[:1]
        src_mask = (src != SRC.vocab.stoi["<blank>"]).unsqueeze(-2)
        out = greedy_decode(model,
                            src,
                            src_mask,
                            max_len=60,
                            start_symbol=TGT.vocab.stoi["<s>"])
def data_processing():
    train_dataset = pd.read_csv(
        'aclImdb/dataset_feat_clean/train_feat_clean.csv',
        usecols=['clean_review', 'label'])
    train_dataset = train_dataset[['clean_review', 'label']]
    val_dataset = pd.read_csv('aclImdb/dataset_feat_clean/val_feat_clean.csv',
                              usecols=['clean_review', 'label'])
    val_dataset = val_dataset[['clean_review', 'label']]
    train_iterator = BatchIterator(train_dataset,
                                   batch_size=256,
                                   vocab_created=False,
                                   vocab=None,
                                   target_col=None,
                                   word2index=None,
                                   sos_token='<SOS>',
                                   eos_token='<EOS>',
                                   unk_token='<UNK>',
                                   pad_token='<PAD>',
                                   min_word_count=3,
                                   max_vocab_size=None,
                                   max_seq_len=0.9,
                                   use_pretrained_vectors=False,
                                   glove_path='glove/',
                                   glove_name='glove.6B.100d.txt',
                                   weights_file_name='glove/weights.npy')
    val_iterator = BatchIterator(val_dataset,
                                 batch_size=256,
                                 vocab_created=False,
                                 vocab=None,
                                 target_col=None,
                                 word2index=train_iterator.word2index,
                                 sos_token='<SOS>',
                                 eos_token='<EOS>',
                                 unk_token='<UNK>',
                                 pad_token='<PAD>',
                                 min_word_count=3,
                                 max_vocab_size=None,
                                 max_seq_len=0.9,
                                 use_pretrained_vectors=False,
                                 glove_path='glove/',
                                 glove_name='glove.6B.100d.txt',
                                 weights_file_name='glove/weights.npy')
    test_dataset = pd.read_csv(
        'dataset/datasets_feat_clean/test_feat_clean.csv',
        usecols=['clean_review', 'label'])
    test_dataset = test_dataset[['clean_review', 'label']]
    test_iterator = BatchIterator(test_dataset,
                                  batch_size=256,
                                  vocab_created=False,
                                  vocab=None,
                                  target_col=None,
                                  word2index=train_iterator.word2index,
                                  sos_token='<SOS>',
                                  eos_token='<EOS>',
                                  unk_token='<UNK>',
                                  pad_token='<PAD>',
                                  min_word_count=3,
                                  max_vocab_size=None,
                                  max_seq_len=0.9,
                                  use_pretrained_vectors=False,
                                  glove_path='glove/',
                                  glove_name='glove.6B.100d.txt',
                                  weights_file_name='glove/weights.npy')
    return train_iterator, val_iterator, test_iterator
#X_test = X_test.astype("float32")
#X_train /= 255
#X_test /= 255

if not data_augmentation:
    print("Not using data augmentation or normalization")
    #model.fit(X_train, Y_train, batch_size=batch_size, nb_epoch=nb_epoch)
    # Alternatively, let's say you have a MiniBatchGenerator that yields 32-64 samples at a time:
    for e in range(nb_epoch):
        print("epoch %d" % e)
        #DEBUG
        #X_train = X_train[:35]
        #y_train = y_train[:35]
        #X_train = X_train[:5000]
        #y_train = y_train[:5000]
        b = BatchIterator(X_train, y_train, batch_size)
        X_batch, Y_batch = b.next()
        #print(X_batch[0])
        #print(Y_batch[0])

        num=0
        progbar = generic_utils.Progbar(len(X_train))
        while X_batch is not None:# and Y_batch != None:
            X_batch, Y_batch = b.next()
            #print(Y_batch)

            try:
                X_batch = X_batch.astype("float32")
            except AttributeError as e:
                print (e)
                print (X_batch)
def run_training(model, cfg, test_features, test_labels, train_data,
                 train_labels, val_data, val_labels):
    model_run_path = MODEL_PATH + "/" + strftime("%Y-%m-%d_%H:%M:%S", gmtime())
    model_weights_path = "{}/{}".format(model_run_path, cfg.model_weights_name)
    model_config_path = "{}/{}".format(model_run_path, cfg.model_config_name)
    result_path = "{}/result.txt".format(model_run_path)
    os.makedirs(model_run_path, exist_ok=True)
    """Choosing hardware"""
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    if device == "cuda":
        print(
            "Using GPU. Setting default tensor type to torch.cuda.FloatTensor")
        torch.set_default_tensor_type("torch.cuda.FloatTensor")
    else:
        print("Using CPU. Setting default tensor type to torch.FloatTensor")
        torch.set_default_tensor_type("torch.FloatTensor")

    json.dump(cfg.to_json(), open(model_config_path, "w"))
    """Converting model to specified hardware and format"""
    model.float()
    model = model.to(device)
    """Defining loss and optimizer"""
    optimizer = torch.optim.Adam(model.parameters(), lr=cfg.lr)
    criterion = torch.nn.CrossEntropyLoss()
    criterion = criterion.to(device)
    """Creating data generators"""
    test_iterator = BatchIterator(test_features, test_labels, 100)
    train_iterator = BatchIterator(train_data, train_labels, cfg.batch_size)
    validation_iterator = BatchIterator(val_data, val_labels, 100)

    train_loss = 999
    best_val_loss = 999
    train_acc = 0
    epochs_without_improvement = 0
    """Running training"""
    for epoch in range(cfg.n_epochs):
        train_iterator.shuffle()
        if epochs_without_improvement == cfg.patience:
            break

        val_loss, val_acc, val_weighted_acc, conf_mat = evaluate(
            model, validation_iterator, criterion)

        if val_loss < best_val_loss:
            torch.save(model.state_dict(), model_weights_path)
            best_val_loss = val_loss
            best_val_acc = val_acc
            best_val_weighted_acc = val_weighted_acc
            best_conf_mat = conf_mat
            epochs_without_improvement = 0
            log_success(
                " Epoch: {} | Val loss improved to {:.4f} | val acc: {:.3f} | weighted val acc: {:.3f} | train loss: {:.4f} | train acc: {:.3f} | saved model to {}."
                .format(epoch, best_val_loss, best_val_acc,
                        best_val_weighted_acc, train_loss, train_acc,
                        model_weights_path))

        train_loss, train_acc, train_weighted_acc, _ = train(
            model, train_iterator, optimizer, criterion, cfg.reg_ratio)

        epochs_without_improvement += 1

        if not epoch % 1:
            log(
                f'| Epoch: {epoch+1} | Val Loss: {val_loss:.3f} | Val Acc: {val_acc*100:.2f}% '
                f'| Train Loss: {train_loss:.4f} | Train Acc: {train_acc*100:.3f}%',
                cfg.verbose)

    model.load_state_dict(torch.load(model_weights_path))
    test_loss, test_acc, test_weighted_acc, conf_mat = evaluate(
        model, test_iterator, criterion)

    result = f'| Epoch: {epoch+1} | Test Loss: {test_loss:.3f} | Test Acc: {test_acc*100:.2f}% | Weighted Test Acc: {test_weighted_acc*100:.2f}%\n Confusion matrix:\n {conf_mat}'
    log_major("Train acc: {}".format(train_acc))
    log_major(result)
    log_major("Hyperparameters:{}".format(cfg.to_json()))
    with open(result_path, "w") as file:
        file.write(result)
Beispiel #14
0
    def validate(self, epoch, model):
        '''
    returns True when overfitting (early stopping)
    '''

        print("Validating...")
        self.prev_val_score = self.cur_val_score
        progbar = generic_utils.Progbar(self.X_val.shape[0])
        t = []

        val_batches = list(
            BatchIterator(self.X_val, self.Y_val, self.batch_size,
                          self.image_size))
        for X_batch, Y_batch in val_batches:  #zip(self.X_val, self.Y_val):
            X_batch_image = []
            for image_path in X_batch:
                # load pre-processed val images from filenames
                processed_img_arr = cv2.imread(self.data_dir_path + '/' +
                                               image_path)
                #print(processed_img_arr.shape)
                #print(self.image_size)
                X_batch_image.append(
                    processed_img_arr.reshape(3, self.image_size,
                                              self.image_size))
            # convert to ndarray
            X_batch_image = np.array(X_batch_image)
            X_batch_image = X_batch_image.astype("float32")
            X_batch_image /= 255

            score = model.test_on_batch(X_batch_image, Y_batch)
            valid_accuracy = model.test_on_batch(
                X_batch_image, Y_batch, accuracy=True)  # calc valid accuracy
            progbar.add(X_batch.shape[0],
                        values=[("val loss", score),
                                ("val accuracy", valid_accuracy[1])])
            t.append(score)

        # track the last validation score of the validation
        self.cur_val_score = mean(t)
        if self.first_val or self.cur_val_score < self.best_val_score:
            self.best_val_score = self.cur_val_score
            self.first_val = False
        print('cur_val_score: %f' % self.cur_val_score)
        print('best_val_score: %f' % self.best_val_score)
        print('prev_val_score: %f' % self.prev_val_score)

        # detect worsening and perform early stopping if needed
        if epoch > self.patience:
            if not self.being_patient and self.cur_val_score > self.prev_val_score or self.being_patient and self.cur_val_score > self.tracking_score:
                if not self.being_patient:  # first time
                    self.being_patient = True
                    self.tracking_score = self.cur_val_score
                self.patience_increase_count += 1
                print('early stopping: being patient %d / %d' %
                      (self.patience_increase_count, self.patience_increase))
                if self.patience_increase_count >= self.patience_increase:
                    print('EARLY STOPPING')
                    return True
            elif self.being_patient and self.cur_val_score < self.tracking_score:
                self.being_patient = False
                self.patience_increase_count = 0
                print('patience_increase initialized')
        return False
Beispiel #15
0
def run_training(model, cfg, test_features, test_labels, train_data,
                 train_labels, val_data, val_labels):
    tmp_run_path = MODEL_PATH + "/tmp_" + get_datetime()
    model_weights_path = "{}/{}".format(tmp_run_path, cfg.model_weights_name)
    model_config_path = "{}/{}".format(tmp_run_path, cfg.model_config_name)
    result_path = "{}/result.txt".format(tmp_run_path)
    os.makedirs(tmp_run_path, exist_ok=True)
    json.dump(cfg.to_json(), open(model_config_path, "w"))
    """Defining loss and optimizer"""
    optimizer = torch.optim.Adam(model.parameters(), lr=cfg.lr)
    criterion = torch.nn.CrossEntropyLoss()
    criterion = criterion.to(get_device())
    """Creating data generators"""
    test_iterator = BatchIterator(test_features, test_labels)
    train_iterator = BatchIterator(train_data, train_labels, cfg.batch_size)
    validation_iterator = BatchIterator(val_data, val_labels)

    train_loss = 999
    best_val_loss = 999
    train_acc = 0
    epochs_without_improvement = 0

    writer = SummaryWriter()
    """Running training"""
    for epoch in range(cfg.n_epochs):
        train_iterator.shuffle()
        if epochs_without_improvement == cfg.patience:
            break

        val_loss, val_cm = evaluate(model, validation_iterator, criterion)

        if val_loss < best_val_loss:
            torch.save(model.state_dict(), model_weights_path)
            best_val_loss = val_loss
            best_val_acc = val_cm.accuracy
            best_val_unweighted_acc = val_cm.unweighted_accuracy
            epochs_without_improvement = 0
            log_success(
                " Epoch: {} | Val loss improved to {:.4f} | val acc: {:.3f} | weighted val acc: {:.3f} | train loss: {:.4f} | train acc: {:.3f} | saved model to {}."
                .format(epoch, best_val_loss, best_val_acc,
                        best_val_unweighted_acc, train_loss, train_acc,
                        model_weights_path))

        train_loss, train_cm = train(model, train_iterator, optimizer,
                                     criterion, cfg.reg_ratio)
        train_acc = train_cm.accuracy

        writer.add_scalars('all/losses', {
            "val": val_loss,
            "train": train_loss
        }, epoch)
        writer.add_scalars('all/accuracy', {
            "val": val_cm.accuracy,
            "train": train_cm.accuracy
        }, epoch)
        writer.add_scalars(
            'all/unweighted_acc', {
                "val": val_cm.unweighted_accuracy,
                "train": train_cm.unweighted_accuracy
            }, epoch)
        writer.add_scalar('val/loss', val_loss, epoch)
        writer.add_scalar('val/val_acc', val_cm.accuracy, epoch)
        writer.add_scalar('val/val_unweighted_acc', val_cm.unweighted_accuracy,
                          epoch)
        writer.add_scalar('train/loss', train_loss, epoch)
        writer.add_scalar('train/train_acc', train_cm.accuracy, epoch)
        writer.add_scalar('train/train_unweighted_acc',
                          train_cm.unweighted_accuracy, epoch)

        epochs_without_improvement += 1

        if not epoch % 1:
            log(
                f'| Epoch: {epoch+1} | Val Loss: {val_loss:.3f} | Val Acc: {val_cm.accuracy*100:.2f}% '
                f'| Train Loss: {train_loss:.4f} | Train Acc: {train_acc*100:.3f}%',
                cfg.verbose)

    model.load_state_dict(torch.load(model_weights_path))
    test_loss, test_cm = evaluate(model, test_iterator, criterion)

    result = f'| Epoch: {epoch+1} | Test Loss: {test_loss:.3f} | Test Acc: {test_cm.accuracy*100:.2f}% | Weighted Test Acc: {test_cm.unweighted_accuracy*100:.2f}%\n Confusion matrix:\n {test_cm}'
    log_major("Train acc: {}".format(train_acc))
    log_major(result)
    log_major("Hyperparameters:{}".format(cfg.to_json()))
    with open(result_path, "w") as file:
        file.write(result)

    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()

    output_path = "{}/{}_{:.3f}Acc_{:.3f}UAcc_{}".format(
        MODEL_PATH, cfg.model_name, test_cm.accuracy,
        test_cm.unweighted_accuracy, strftime("%Y-%m-%d_%H:%M:%S", gmtime()))
    os.rename(tmp_run_path, output_path)

    return test_loss