def evaluate(segmentation_model, images_dir, masks_dir, metrics, size=None):
    segmentation_dl = torch.utils.data.DataLoader(
        SegmentationDataset(images_dir, masks_dir, size=size, crop=False), 1, shuffle=False)

    eval_out = model_metrics(segmentation_model, segmentation_dl, stats=metrics)
    print('Segmenation model', eval_out)
    return eval_out
def evaluate_gan_mask_generator(model, G, bg_direction, params,
                                mask_postprocessing, zs, z_noise, num_steps):
    mask_generator = MaskGenerator(
        G, bg_direction, params, [], mask_postprocessing,
        zs=zs, z_noise=z_noise).cuda().eval()
    def it():
        while True:
            sample = mask_generator()
            for img, mask in zip(sample[0], sample[1]):
                yield img.unsqueeze(0), mask

    score = {
        DEFAULT_EVAL_KEY:
            model_metrics(SegmentationInference(model), it(), num_steps, (F_max,)),
        THR_EVAL_KEY:
            model_metrics(Threshold(model), it(), num_steps, (IoU, accuracy)),
    }

    return score
def predict_cat(X_train, X_test, y_train, y_test=None):
    # X_train: description data for training
    # y_train: corresponding categories for training
    # X_test and y_test: description and category for testing

    # Vectorizing the train and test data using TfIDf vectorization
    # TfIdf - Text Frequency Inverse Document Freqeuncy : vectorizes based on frequency across the current text document but less frequency across multiple documents

    train_tfIdf, test_tfIdf = vectorize(0, X_train, X_test)

    train_SVC(train_tfIdf, y_train)

    classifier = pickle.load(open("linearkernelSVC.sav", "rb"))
    predictions = classifier.predict(
        test_tfIdf)  #predictions made on the unseen data
    train_score = classifier.score(train_tfIdf, y_train)
    print("\n\nTrain Accuracy:", train_score * 100, "%\n\n")
    score = classifier.score(test_tfIdf, y_test)
    model_metrics(classifier, y_test, predictions, score)