def train_model_all(path_name, C, kernel_type, model_path):
    """
    Trains models for everyone in directory
    """

    xTr, yTr, label_set = data_loader.load_vectors(path_name, 'tr_data.txt')
    models = {}

    log('C: ' + str(C))
    log('kernel type: ' + kernel_type + '\n')

    for correct_label in label_set:
        log('Training for: ' + correct_label)
        tr_numeric_labels = make_numeric_labels(yTr, correct_label)
        svm_classifier = kernalized_svm.create_svm(xTr, np.array(tr_numeric_labels), C, kernel_type, make_model_name(model_path, correct_label, C))
        models[correct_label] = svm_classifier
        preds = svm_classifier(xTr)
        training_acc, precision, recall, f1 = calc_acc(tr_numeric_labels, preds)
        log("Training accuracy: " + str(training_acc))
        log('Precision: ' + str(precision))
        log('Recall: ' + str(recall))
        log('F1 score: ' + str(f1))
        log('')
        stats_list = [model_path, correct_label, C, kernel_type, training_acc, precision, recall, f1]
        log_csv(stats_list)

    return models
Example #2
0
def train():
    sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(max_to_keep=max_to_keep)
    utils.load_ckpt(sess, ckpt_dir, saver)
    best_loss = 1e6
    start_epoch = 0
    if history_file.exists():
        df = pd.read_csv(history_file)
        best_loss = df['best_loss'].min()
        start_epoch = int(df.iloc[-1]['epoch']) + 1

    print('Training ...')
    for epoch in range(start_epoch, num_epochs):
        train_loss, train_lr = train_one_epoch(sess, epoch, saver)
        val_loss, best_loss = val_one_epoch(sess, epoch, saver, best_loss)
        csv_header = ['epoch', 'lr', 'train_loss', 'val_loss', 'best_loss']
        csv_values = [epoch, train_lr, train_loss, val_loss, best_loss]
        utils.log_csv(history_file,
                      csv_values,
                      header=csv_header if epoch == 0 else None)
        print(
            f'[{opt.area}-{opt.mode}] Epoch {epoch} loss:{train_loss:.6f}, val loss:{val_loss:.6f},duration:{time.time() - start_time:.3f}s'
        )

    print('Training completed...')
Example #3
0
    def train(self,
              train_dir,
              val_dir,
              patch_size,
              patch_stride,
              batch_size,
              train_refs,
              num_workers=0,
              epochs=30,
              resume=True):
        self.logger.info('Loading data...')
        train_set = PatchSet(train_dir,
                             self.image_size,
                             patch_size,
                             patch_stride,
                             n_refs=train_refs)
        val_set = PatchSet(val_dir,
                           self.image_size,
                           patch_size,
                           n_refs=train_refs)
        train_loader = DataLoader(train_set,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  num_workers=num_workers,
                                  drop_last=True)
        val_loader = DataLoader(val_set,
                                batch_size=batch_size,
                                num_workers=num_workers)

        least_error = sys.maxsize
        start_epoch = 0
        if resume and self.checkpoint.exists():
            utils.load_checkpoint(self.checkpoint, self.model, self.optimizer)
            if self.history.exists():
                df = pd.read_csv(self.history)
                least_error = df['val_error'].min()
                start_epoch = int(df.iloc[-1]['epoch']) + 1

        self.logger.info('Training...')
        scheduler = ReduceLROnPlateau(self.optimizer,
                                      mode='min',
                                      factor=0.1,
                                      patience=5)
        for epoch in range(start_epoch, epochs + start_epoch):
            for param_group in self.optimizer.param_groups:
                self.logger.info(f"Current learning rate: {param_group['lr']}")

            train_loss, train_error = self.train_on_epoch(epoch, train_loader)
            val_loss, val_error = self.test_on_epoch(val_loader)
            csv_header = [
                'epoch', 'train_loss', 'train_error', 'val_loss', 'val_error'
            ]
            csv_values = [epoch, train_loss, train_error, val_loss, val_error]
            utils.log_csv(self.history, csv_values, header=csv_header)
            scheduler.step(val_loss)
            if val_error < least_error:
                shutil.copy(str(self.checkpoint), str(self.best))
                least_error = val_error
def test_model_all(models, path_name, text_file, test_or_validation, model_path, C, kernel_type):
    xTest, yTest, label_set = data_loader.load_vectors(path_name, text_file)
    for correct_label in label_set:
        if correct_label in models:
            test_numeric_labels = make_numeric_labels(yTest, correct_label)
            test_pred = models[correct_label](np.array(xTest))
            # test_acc, false_neg, false_pos, true_neg, true_pos = calc_acc(test_numeric_labels, test_pred)
            # log(test_or_validation + ' accuracy for ' + correct_label + ': ' + str(test_acc))
            # log('False negatives for ' + correct_label + ': ' + str(false_neg))
            # log('False positives for ' + correct_label + ': ' + str(false_pos))
            # log('True negatives for ' + correct_label + ': ' + str(true_neg))
            # log('True positives for ' + correct_label + ': ' + str(true_pos))
            # log('')
            acc, precision, recall, f1 = calc_acc(test_numeric_labels, test_pred)
            log(test_or_validation + ' accuracy for ' + correct_label + ': ' + str(acc))
            log('Precision for ' + correct_label + ': ' + str(precision))
            log('Recall for ' + correct_label + ': ' + str(recall))
            log('F1 score for ' + correct_label + ': ' + str(f1))
            log('')
            stats_list = [model_path, path_name, correct_label, C, kernel_type, acc, precision, recall, f1]
            log_csv(stats_list)
def train_model(pos_path_name, neg_path_name, C, kernel_type, model_path):

    xTr_pos, yTr_pos, pos_label = data_loader.load_vectors_of_class(pos_path_name, 'tr_data.txt', 1)
    xTr_neg, yTr_neg, neg_label = data_loader.load_vectors_of_class(neg_path_name, 'tr_data.txt', -1)
    xTr = np.concatenate((xTr_pos, xTr_neg))
    yTr = np.concatenate((yTr_pos, yTr_neg))

    log('C: ' + str(C))
    log('kernel type: ' + kernel_type + '\n')

    log('Training for: ' + pos_label)
    # tr_numeric_labels = make_numeric_labels(yTr, correct_label)
    svm_classifier = kernalized_svm.create_svm(xTr, yTr, C, kernel_type, make_model_name(model_path, pos_label, C))
    preds = svm_classifier(xTr)
    training_acc, precision, recall, f1 = calc_acc(yTr, preds)
    log("Training accuracy: " + str(training_acc))
    log('Precision: ' + str(precision))
    log('Recall: ' + str(recall))
    log('F1 score: ' + str(f1))
    log('')
    stats_list = [model_path, pos_label, C, kernel_type, training_acc, precision, recall, f1]
    log_csv(stats_list)

    return svm_classifier