Exemplo n.º 1
0
def main():
    """
    Just runs some example code.
    """

    # load a dataset
    iris_file = helper.get_data_dir() + os.sep + "iris.arff"
    helper.print_info("Loading dataset: " + iris_file)
    loader = Loader("weka.core.converters.ArffLoader")
    iris_data = loader.load_file(iris_file)
    iris_data.class_is_last()

    # train classifier
    classifier = Classifier("weka.classifiers.trees.J48")
    classifier.build_classifier(iris_data)

    # save and read object
    helper.print_title("I/O: model (using serialization module)")
    outfile = tempfile.gettempdir() + os.sep + "j48.model"
    serialization.write(outfile, classifier)
    model = Classifier(jobject=serialization.read(outfile))
    print(model)

    # save classifier and dataset header (multiple objects)
    helper.print_title("I/O: model and header (using serialization module)")
    serialization.write_all(
        outfile,
        [classifier, Instances.template_instances(iris_data)])
    objects = serialization.read_all(outfile)
    for i, obj in enumerate(objects):
        helper.print_info("Object #" + str(i + 1) + ":")
        if javabridge.get_env().is_instance_of(
                obj,
                javabridge.get_env().find_class("weka/core/Instances")):
            obj = Instances(jobject=obj)
        elif javabridge.get_env().is_instance_of(
                obj,
                javabridge.get_env().find_class(
                    "weka/classifiers/Classifier")):
            obj = Classifier(jobject=obj)
        print(obj)

    # save and read object
    helper.print_title("I/O: just model (using Classifier class)")
    outfile = tempfile.gettempdir() + os.sep + "j48.model"
    classifier.serialize(outfile)
    model, _ = Classifier.deserialize(outfile)
    print(model)

    # save classifier and dataset header (multiple objects)
    helper.print_title("I/O: model and header (using Classifier class)")
    classifier.serialize(outfile, header=iris_data)
    model, header = Classifier.deserialize(outfile)
    print(model)
    if header is not None:
        print(header)
Exemplo n.º 2
0
def load_classification_model(filename):
    classifier, model = Classifier.deserialize(get_file_destinantion(filename))

    return {
        "classes": model.class_attribute.values,
        "classifier": classifier,
    }
Exemplo n.º 3
0
def load_model(model_cache_file_name):
    """ Loads the cached classifier model and writes it to the __predictors global variable
    :param model_cache_file_name: path of the classifier model file
    :return: N/A
    """
    global __predictors
    # __predictors[classifier_name], data = localizer_config.load_model(classifier_name)
    path = os.path.join('caches', 'model')
    path = os.path.join(path, model_cache_file_name + '.cache')
    __predictors["LMT"], _ = Classifier.deserialize(path)
Exemplo n.º 4
0
def createpatientinfo(request):
    if request.method == 'POST':
        try:
            form = PatientInfoForm(request.POST, request.FILES)
            newPatientInfo = form.save(commit=False)
            newPatientInfo.user = request.user
            image = request.FILES['Skin_image'].name
            print("*******************", image)

            newPatientInfo.image_name = image

            newPatientInfo.save()

            # ******************************************** ENCRYPTION CODE ************************************************
            key = Fernet.generate_key()
            key = key.decode('utf-8')

            newPatientInfo.image_key = key
            newPatientInfo.save(update_fields=['image_key'])

            input_file = "media/patient/images/" + image
            encrypted_file = "encryptedImages/" + image

            with open(input_file, 'rb') as f:
                data = f.read()

            fernet = Fernet(key)
            encrypted = fernet.encrypt(data)

            with open(encrypted_file, 'wb') as f:
                f.write(encrypted)

    # ***************************************************  DECRYPTION CODE ************************************************
            image = newPatientInfo.image_name
            input_file = encrypted_file
            decrypted_file = "decryptedImages/" + image
            key = newPatientInfo.image_key
            # print("************************************************",key)

            with open(input_file, 'rb') as f:
                data = f.read()

            fernet = Fernet(key)
            encrypted = fernet.decrypt(data)

            with open(decrypted_file, 'wb') as f:
                f.write(encrypted)


# -----------------------------------------------------  WEKA CODE  ---------------------------------------------------
            JVM.start(max_heap_size="4000m")

            clsfr, _ = Classifier.deserialize(
                r"patient\static\patient\Melanoma_Best_Performing_Weka3.8.model"
            )
            haarSize = 8
            dctMat = dct(np.eye(64), norm='ortho')
            haarMat = Hybrid.haar(haarSize)

            for i in range(haarSize):
                haarMat[i] = haarMat[i] / math.sqrt(abs(haarMat[i]).sum())

            hybridTransformMat = Hybrid.hybridTransform(
                haarMat, dctMat.transpose())

            fPath = "decryptedImages/"
            fName = image

            img = cv2.imread(fPath + fName)
            imgResize = cv2.resize(img, (512, 512),
                                   interpolation=cv2.INTER_AREA)

            bFeatures64, gFeatures64, rFeatures64, _, _, _, _, _, _ = Hybrid.hybridTransformation(
                imgResize, hybridTransformMat)

            bFeatures64 = bFeatures64.reshape((1, bFeatures64.shape[0]))
            gFeatures64 = gFeatures64.reshape((1, gFeatures64.shape[0]))
            rFeatures64 = rFeatures64.reshape((1, rFeatures64.shape[0]))
            diagnosisMat = np.full((1, 1), "NA")

            features64 = np.concatenate(
                (bFeatures64, gFeatures64, rFeatures64, diagnosisMat), axis=1)

            op_file_name = "arff_csv_files/HybridTransformFeatures64-Haar" + str(
                haarSize) + "DCT" + str(dctMat.shape[0]) + fName
            pd.DataFrame(features64).to_csv(op_file_name + ".csv",
                                            header=True,
                                            mode='a',
                                            index=False)

            csvLoader = Loader(classname="weka.core.converters.CSVLoader")
            data = csvLoader.load_file(op_file_name + ".csv")

            arffSaver = Saver(classname="weka.core.converters.ArffSaver")
            arffSaver.save_file(data, op_file_name + ".arff")

            arffLoader = Loader(classname="weka.core.converters.ArffLoader")
            arff_data = arffLoader.load_file(op_file_name + ".arff")
            arff_data.class_is_last()

            diagnosis = ""
            for index, inst in enumerate(arff_data):
                pred = clsfr.classify_instance(inst)
                print(pred)
                dist = clsfr.distribution_for_instance(inst)
                print(dist)

                if pred == 1.0:
                    diagnosis = "Malignant"
                else:
                    diagnosis = "Benign"

            print(
                "Final Diagnosis: ***************************************************",
                diagnosis)
            JVM.stop()
            # -----------------------------------------------------  WEKA CODE END ---------------------------------------------------

            newPatientInfo.result = diagnosis
            newPatientInfo.save(update_fields=['result'])

            return redirect('currentinfo')
        except ValueError:
            return render(request, 'patient/createpatientinfo.html', {
                'form': PatientInfoForm(),
                "error": "Bad data passed in!"
            })
    else:
        return render(request, 'patient/createpatientinfo.html',
                      {'form': PatientInfoForm()})
Exemplo n.º 5
0
def train(training_dataset_path, model_cache_file_name, evaluation_is_on,
          summary_file_path):
    """Model Training function

    The function uses the WEKA machine learning library, implemented by
    python-weka-wrapper Python library. Divides the data into given
    folds, and do the training and evaluation. Trained model copied to __predictors global variable
    and also saved (together with training data set) to the model_cache_file_name file. Evaluation summary is being written to summary_file_path file.

    Args:
        :param training_dataset_path: the path of the input arff file.
        :param model_cache_file_name:
        :param evaluation_is_on: run evaluation after training (true / false)
        :param summary_file_path: the path of the model evaluation summary file.

    Returns:
        None
    """

    global __classifiers
    global __predictors

    training_data = converters.load_any_file(training_dataset_path)
    training_data.class_is_last()

    lines = []
    summaries = []
    summary_line = [
        'Model'.ljust(16), 'Precision'.ljust(12), 'Recall'.ljust(12),
        'F-measure'.ljust(12), 'Accuracy'.ljust(12), 'FPR'.ljust(12)
    ]
    summaries.append('\t'.join(summary_line))

    for classifier, option_str in __classifiers.items():
        option_list = re.findall(r'"(?:[^"]+)"|(?:[^ ]+)', option_str)
        option_list = [s.replace('"', '') for s in option_list]

        classifier_name = classifier.split('.')[-1]
        info_str = "Using classifier: {classifier}, options: {options}".format(
            classifier=classifier_name, options=str(option_list))
        localizer_log.msg(info_str)
        lines.append(info_str)

        # Train
        cls = Classifier(classname=classifier, options=option_list)
        localizer_log.msg("Start building classifier")
        cls.build_classifier(training_data)
        localizer_log.msg("Completed building classifier")
        localizer_log.msg("Saving trained model to {model_cache_name}".format(
            model_cache_name=model_cache_file_name))

        # localizer_config.save_model(cls, training_data, model_cache_file_name)
        path = os.path.join('caches', 'model')
        if not os.path.exists(path):
            os.makedirs(path, exist_ok=True)
        path = os.path.join(path, model_cache_file_name + '.cache')
        cls.serialize(path)
        localizer_log.msg("Trained model saved")

        classifier2, _ = Classifier.deserialize(path)
        print(classifier2)

        __predictors[classifier_name] = cls

        if evaluation_is_on:

            # Model evaluation
            localizer_log.msg("Start evaluation classifier")
            evl = Evaluation(training_data)
            localizer_log.msg("Complete evaluation classifier")

            localizer_log.msg("Start cross-validating classifier")
            evl.crossvalidate_model(cls, training_data, 10, Random(1))
            localizer_log.msg("Complete cross-validating classifier")

            # print(evl.percent_correct)
            # print(evl.summary())
            # print(evl.class_details())

            lines.append(evl.summary())
            lines.append(evl.class_details())

            summary_line = []
            summary_line.append(classifier_name.ljust(16))
            summary_line.append("{:.3f}".format(evl.weighted_precision *
                                                100).ljust(12))
            summary_line.append("{:.3f}".format(evl.weighted_recall *
                                                100).ljust(12))
            summary_line.append("{:.3f}".format(evl.weighted_f_measure *
                                                100).ljust(12))
            summary_line.append("{:.3f}".format(evl.percent_correct).ljust(12))
            summary_line.append("{:.3f}".format(
                evl.weighted_false_positive_rate * 100).ljust(12))
            summaries.append('\t'.join(summary_line))

            # Save evaluation summary to file
            with open(summary_file_path, 'w') as f:
                f.writelines('\n'.join(lines))
                f.writelines('\n' * 5)
                f.writelines('\n'.join(summaries))