Esempio n. 1
0
def main():
    args = parse_inputs()

    # Get data loaders for training and build the model
    loaders = get_data_loaders(args.data_dir)
    train_set = loaders["train_set"]
    train, valid, test = loaders["loaders"]
    model = build_model(args.arch, None, args.hidden_units, None, None)

    # Train and test model
    train_model(model, train, valid, args.learn_rate, args.epochs, args.gpu)
    test_model(model, test, args.gpu)

    # Saving checkpoint
    model_options = ["vgg11", "vgg13", "vgg19"]
    checkpoint = {
        "fc1_input": 25088,
        "fc1_output": args.hidden_units,
        "fc2_output": 102,
        "dp_rate": 0.2,
        "epochs": args.epochs,
        "model_state": model.state_dict(),
        "class_to_idx": train_set.class_to_idx
    }

    if args.arch in model_options:
        checkpoint["arch"] = args.arch
        filename = "checkpoint_{}.pth".format(args.arch)
    else:
        checkpoint["arch"] = "vgg11"
        filename = "checkpoint_vgg11.pth"

    torch.save(checkpoint, filename)
    print("Model checkpoint saved!")
    print("You can now use the predict.py script to classify flower images")
Esempio n. 2
0
def scenario8():
    home = os.path.dirname(os.getcwd())
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

    fopen = open(home + "\\Attributes\\test_dataset.dat", mode='rb')
    test = pickle.load(fopen)
    fopen.close()

    model = keras.models.load_model(home + "\\Attributes\\CNN_scenario8.h5")
    cl.test_model(test, model, 8)
Esempio n. 3
0
def main():

    # Instantiate the console arguments function
    args = arg_parser()

    print("GPU setting: {}".format(args.gpu))

    # Define normalization for transforms
    normalize = transforms.Normalize(
        mean=[0.485, 0.456, 0.406],
        std=[0.229, 0.224, 0.225],
    )

    # Define transformations for training, validation and test sets
    data_transforms = create_transforms(30, 224, 256, normalize)

    # Load the datasets from the image folders
    datasets = image_datasets(data_transforms)

    # Define the dataloaders using the image datasets
    loaders = data_loaders(datasets, 32)

    # Instantiate a new model
    model = create_model(arch=args.arch)

    output_units = len(datasets['training'].classes)

    # Create new classifier
    model.classifier = create_classifier(model, args.hidden_layers,
                                         output_units, args.dropout)

    device = check_gpu(args.gpu)
    print(device)
    model.to(device)

    learning_rate = args.learning_rate
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
    epochs = args.epochs
    print_every = args.print_every
    steps = 0
    trainloader = loaders['training']
    validloader = loaders['validation']

    # trained_model = train(model, epochs, learning_rate, criterion, optimizer, loaders['training'], loaders['validation'], device)
    trained_model = train(model, trainloader, validloader, device, criterion,
                          optimizer, epochs, print_every, steps)

    print("Training has completed")

    test_model(trained_model, loaders['testing'], device)

    initial_checkpoint(trained_model, args.checkpoint_dir,
                       datasets['training'])
Esempio n. 4
0
    def btn_test_clicked(self):

        expected, predicted = classifier.test_model(
            self.X, self.y, self.ui.selector_model.currentText())
        print("!")
        if self.ui.preprocess_type_test.currentText() == "Стандартизация":
            preprocess_type = 2
        else:
            preprocess_type = 1
        classifier.generate_test_report(self.ui.path_to_test_data.text(),
                                        self.ui.selector_model.currentText(),
                                        preprocess_type)
        self.ui.label_20.setText(
            metrics.classification_report(expected, predicted,
                                          zero_division=0))
        # print(metrics.confusion_matrix(expected, predicted))
        self.load_test_predictions_matrix(
            metrics.confusion_matrix(expected, predicted))
Esempio n. 5
0
def classify_websites(js_dir,
                      model,
                      dict_not_hash=True,
                      tolerance='false',
                      n=4,
                      threshold=0.29):
    """
        Test of a classification model to detect malicious web pages.
        A web page is defined as malicious if at least one of its JS snippet is malicious.
        Otherwise it is labeled as benign.

        -------
        Parameters:
        - js_dirs: str
            Directory containing directories representing the web pages to be analysed
            (the JS snippets of the page considered are stored in the corresponding subdirectory).
        - model: str
            Path to the model used to classify the new files.
         dict_not_hash: Boolean
            True if a dictionary is used to map n-grams to int, False if hashes are used.
            Default: True.
        - tolerance: str
            Indicates whether esprima should tolerate a few cases of syntax errors
            (corresponds to esprima's tolerant option). Default: 'false'.
            The values 'true' and 'false' shall be used to enable this tolerant mode.
        - n: Integer
            Stands for the size of the sliding-window which goes through the units contained in the
            files to be analysed. Default: 4.
        - threshold: int
            Threshold over which all samples are considered malicious. Default: 0.29.
    """

    len_malicious = 0
    len_benign = 0

    res_names = []
    res_predict = []

    for html in os.listdir(js_dir):
        html = os.path.join(js_dir, html)

        names, attributes, labels = static_analysis.main_analysis \
            (js_dirs=[html], labels_dirs=None, js_files=None, labels_files=None,
             tolerance=tolerance, n=n, dict_not_hash=dict_not_hash)

        # Uncomment to save the analysis results in pickle objects.
        """
        utility.save_analysis_results(os.path.join(html, "Analysis-n" + str(n) + "-dict"
                                                   + str(dict_not_hash)), names, attributes, labels)
        """

        try:
            labels_predicted_test = classifier.test_model(names,
                                                          labels,
                                                          attributes,
                                                          model,
                                                          print_res=False,
                                                          print_score=False,
                                                          threshold=threshold)
            if 'malicious' in labels_predicted_test:
                len_malicious += len(names)
                res_names.append(html)
                res_predict.append('malicious')
            else:
                len_benign += len(names)
                res_names.append(html)
                res_predict.append('benign')

        except ValueError:
            logging.exception('No valid JS files could be found in ' + html)
            # shutil.rmtree(html)

    utility.get_classification_results(res_names, res_predict)

    print('Recognised as malicious: ' +
          str(len([i for i in res_predict if i == 'malicious'])) +
          ' Total size: ' + str(len_malicious) + ' scripts')
    print('Recognised as benign: ' +
          str(len([i for i in res_predict if i == 'benign'])) +
          ' Total size: ' + str(len_benign) + ' scripts')
Esempio n. 6
0
    "size":
    1451369,
    "cache_name":
    "day2.fft.dev9_only",
    "paths": [
        '/mnt/lebensraum/Datasets/Day2.After_FFT/Device_9/tx_7/converted_576floats.protobin',
        '/mnt/lebensraum/Datasets/Day2.After_FFT/Device_9/tx_9/converted_576floats.protobin',
        '/mnt/lebensraum/Datasets/Day2.After_FFT/Device_9/tx_5/converted_576floats.protobin',
        '/mnt/lebensraum/Datasets/Day2.After_FFT/Device_9/tx_3/converted_576floats.protobin',
    ],
}

encoder_weights_path = "/mnt/lebensraum/CSC275_Project/working_set/golden_weights/encoder.objective.wts.h5"
classifier_weights_path = "/mnt/lebensraum/CSC275_Project/working_set/golden_weights/classifier.objective.wts.h5"

the_encoder = auto_encoder.build_model()
the_encoder.load_weights(encoder_weights_path)

the_classifier = classifier.build_model()
the_classifier.load_weights(classifier_weights_path)

stack_input = keras.Input(shape=the_encoder.input.shape)
stack_layers = the_encoder(stack_input)
stack_layers = the_classifier(stack_layers)

the_stack = tf.keras.Model(inputs=stack_input, outputs=stack_layers)
the_stack.summary()

classifier.test_model(the_stack, ds_config)

# classifier.test_model(the_classifier, classifier_test_paths)