示例#1
0
def train_model():
    dataloader = load_dataset(train=True)
    model = cnn.Net()
    model.to(cnn.get_device())
    model.train()
    model, dictionary = cnn.train_model(model=model,
                                        dataloader=dataloader,
                                        num_epochs=10)
    print("Time spent: {:.2f}s".format(dictionary['exec_time']))
    torch.save(model.state_dict(), "./modelo/mnist.pt")
    save_stats(dictionary)
示例#2
0
def train_cnn(rootpath, dataset, dataloader, device):
    """
    Trains the convolutional Neural Network
    """
    num_classes = len(dataset.classes)
    model, input_size = cnn.initialize_model("resnet",
                                             num_classes,
                                             resume_from=None)
    criterion = cnn.get_loss()
    optimizer = cnn.make_optimizer(model)
    scheduler = cnn.make_scheduler(optimizer)
    num_epochs = 20
    save_dir = os.path.join(rootpath, "weights")
    trained_model, val_acc_history = cnn.train_model(model,
                                                     device,
                                                     dataloader,
                                                     criterion,
                                                     optimizer,
                                                     scheduler=scheduler,
                                                     save_dir=save_dir,
                                                     num_epochs=num_epochs)

    return trained_model, val_acc_history
示例#3
0
文件: __main__.py 项目: jah3xc/ACENet
def main():
    args = init()
    logger = logging.getLogger(__name__)

    ###########
    # Load the Data
    ###########
    data_path, gt_path = args["dataset"], args["groundTruth"]
    if not Path(data_path).absolute().is_file() or not Path(gt_path).absolute().is_file():
        logger.critical("Dataset or GT do not exist!")
    data, ground_truth = load_mat(data_path, gt_path)
    logger.info("Loaded data...")

    ############
    # Extract Patches
    ############
    window_size = args["windowSize"]
    stride = args["stride"]
    maxPatches = args["maxPatches"] if "maxPatches" in args else None
    samples, labels = extract_patches(data, ground_truth, window_size, stride, maxPatches=maxPatches)
    logger.info("Extracted {} patches".format(len(samples)))

    ###########
    # ACE If necessary
    ###########
    ACE = args["ace"]
    cpu_count = args["cpu_count"] if "cpu_count" in args else os.cpu_count()
    if ACE:
        samples, labels = ace_transform_samples(samples, labels, data, ground_truth, cpu=cpu_count)
    

    ###########
    # Train the Network
    ###########
    trainParams = args["training"] if "training" in args else {}
    buildParams = args["building"] if "building" in args else{}
    model = train_model(samples, labels, window_size, buildParams, trainParams)
示例#4
0
def train(model,case_type,number=1):
    average_accuracy=0
    test_accuracy=list()
    if model=='svm':
        import svm
        x_train,y_train,x_test,y_test=svm.loadText(case_type)
        for _ in range(int(number)):
            test_accuracy.append(svm.train(x_train,y_train,x_test,y_test)) 
            average_accuracy=average_accuracy+test_accuracy[_]
        average_accuracy=average_accuracy/int(number)
        print("aveage accuract:" +str(average_accuracy))
    elif model=='cnn':
        import cnn
        train_data, test_data, train_label, test_label, vocab = cnn.get_data(case_type,mode='sequence')
        for _ in range(int(number)):
            test_accuracy.append(cnn.train_model(case_type,train_data, test_data, train_label, test_label, vocab))
            average_accuracy=average_accuracy+test_accuracy[_]
    elif model=='lstm':
        import lstm
        train_data, test_data, train_label, test_label, vocab = lstm.get_data(case_type,mode='sequence')
        for _ in range(int(number)):
            test_accuracy.append(lstm.train_model(case_type,train_data, test_data, train_label, test_label, vocab))
            average_accuracy=average_accuracy+test_accuracy[_]
    elif model=='keras_text_cnn':
        import keras_text_cnn as text_cnn
        train_data, test_data, train_label, test_label, vocab = text_cnn.get_data(case_type,mode='sequence')
        for _ in range(int(number)):
            test_accuracy.append(text_cnn.train_model(case_type,train_data, test_data, train_label, test_label, vocab))
            average_accuracy=average_accuracy+test_accuracy[_]
    average_accuracy=average_accuracy/int(number)
    print("aveage accuract:" +str(average_accuracy))
    with open(file='D:/judgement_prediction/judgement_prediction/'+case_type+'/information.txt', mode="a",encoding='utf-8') as target_file:
        target_file.write(case_type)
        for i in range(int(number)):
            target_file.write(str(test_accuracy[i])+' ')
        target_file.write(',average:'+str(average_accuracy)+'\n')
#import video
import vis

import datetime
import shutil
import os

# The sketch dataset is licensed under a Creative Commons Attribution 4.0 International License.
# https://creativecommons.org/licenses/by/4.0/
# Copyright (C) 2012 Mathias Eitz, James Hays, and Marc Alexa. 2012. How Do Humans Sketch Objects? ACM Trans. Graph. (Proc. SIGGRAPH) 31, 4 (2012), 44:1--44:10.
# http://cybertron.cg.tu-berlin.de/eitz/projects/classifysketch/
# The data has been modified from original by splitting the images in train sets and test sets, and subsets

if __name__ == '__main__':
    print("Hello and welcome to the CNN training visualizer.")
    start_time = datetime.datetime.now()
    output_dir = f"output/{start_time.strftime('%Y-%m-%d %H.%M.%S')} {train_dir.split('/')[-1]} {test_dir.split('/')[-1]}"
    print(f"Started {start_time}\n")
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)
    shutil.copyfile("const.py", output_dir + "/parameters.py")

    m = cnn.ConvNet()
    cnn.train_model(m, output_dir)
    # classes, train_loader, losses, accuracies = cnn.train_model(m, output_dir)
    # vis.show_images(train_loader, classes)
    # vis.plot_results(losses, accuracies)
    # Load a model instead: m = torch.load(MODEL_STORE_PATH + 'conv_net_model.ckpt')
    # cnn.test_model(m)
    # video.make(output_dir)
示例#6
0
# output dimensions
num_classes = 10

################################################################################
# callbacks for Save weights, Tensorboard
# creating a new directory for each run using timestamp
folder = os.path.join(os.getcwd(), datetime.now().strftime("%d-%m-%Y_%H-%M-%S"), str(ACTIV_FN))
history_file = folder + "\cnn_" + str(ACTIV_FN) + ".h5"
save_callback = ModelCheckpoint(filepath=history_file, verbose=1)
tb_callback = TensorBoard(log_dir=folder)

# Build, train, and test model
model = cnn.build_model(input_shape, activation_fn, LEARNING_RATE, DROP_PROB, NUM_NEURONS_IN_DENSE_1, num_classes)
train_accuracy, train_loss, valid_accuracy, valid_loss = cnn.train_model(model, train_images, train_labels, BATCH_SIZE,
                                                                     NUM_EPOCHS, valid_images, valid_labels,
                                                                     save_callback, tb_callback)
test_accuracy, test_loss, predictions = cnn.test_model(model, test_images, test_labels)

# save test set results to csv
predictions = np.round(predictions)
predictions = predictions.astype(int)
df = pd.DataFrame(predictions)
df.to_csv("mnist.csv", header=None, index=None)

################################################################################
# Visualization and Output
num_epochs_plot = range(1, len(train_accuracy) + 1)

# Loss curves
plt.figure(1)
示例#7
0
# HYPERPARAMETERS AND DESIGN CHOICES
num_neurons = 128
batch_size = 64
ACTIV_FN = "relu"
activation_fn = cnn.get_activ_fn(ACTIV_FN)
num_epochs = 5
max_count = 50
for count in range(0, max_count):
    learn_rate = 10**uniform(-2, -4)
    drop_prob = 10**uniform(-2, 0)

    # callbacks for Save weights, Tensorboard
    # creating a new directory for each run using timestamp
    folder = os.path.join(os.getcwd(),
                          datetime.now().strftime("%d-%m-%Y_%H-%M-%S"),
                          str(ACTIV_FN))
    tb_callback = TensorBoard(log_dir=folder)

    # Build, train, and test model
    model = cnn.build_model(input_shape, activation_fn, learn_rate, drop_prob,
                            num_neurons, num_classes)
    train_accuracy, train_loss, valid_accuracy, valid_loss = cnn.train_model(
        model, train_images, train_labels, batch_size, num_epochs,
        valid_images, valid_labels, tb_callback)
    print(
        'Step: {:d}/{:d}, learn: {:.6f}, dropout: {:.4f},'
        'Train_loss: {:.4f}, Train_acc: {:.4f}, Val_loss: {:.4f}, Val_acc: {:.4f}'
        .format(count, max_count, learn_rate, drop_prob, train_loss[-1],
                train_accuracy[-1], valid_loss[-1], valid_accuracy[-1]))
示例#8
0
# callbacks for Save weights, Tensorboard
# creating a new directory for each run using timestamp
folder = os.path.join(os.getcwd(),
                      datetime.now().strftime("%d-%m-%Y_%H-%M-%S"))
history_file = folder + "/cnn_nodatagen" + ".h5"
save_callback = ModelCheckpoint(filepath=history_file, verbose=1)
tb_callback = TensorBoard(log_dir=folder)

# Build, train, and test model
model = cnn.build_model(input_shape=input_shape,
                        learn_rate=LEARNING_RATE,
                        drop_prob=DROP_PROB,
                        num_neurons=NUM_NEURONS_IN_DENSE_1)
train_accuracy, train_loss, valid_accuracy, valid_loss = cnn.train_model(
    model, x_train, y_train, BATCH_SIZE, NUM_EPOCHS, x_valid, y_valid,
    save_callback, tb_callback)
test_accuracy, test_loss, predictions = cnn.test_model(model, x_test, y_test)
# save test set results to csv
predictions = np.round(predictions)
predictions = predictions.astype(int)
df = pd.DataFrame(predictions)
df.to_csv("predictions_nodatagen.csv", header=None, index=None)

# Visualization and Output
num_epochs_plot = range(1, len(train_accuracy) + 1)  # x axis range
directory = os.getcwd()
# Loss curves
plt.figure()
plt.plot(num_epochs_plot, train_loss, "b", label="Training Loss")
plt.plot(num_epochs_plot, valid_loss, "r", label="Validation Loss")
示例#9
0
    x_train, y_train = utils.apply_undersampling(x_train,y_train,undersample_val=undersample_val)

    # initialize output bias
    neg, pos = np.bincount(y_train)
    output_bias = np.log(pos/neg)
    output_bias = keras.initializers.Constant(output_bias)
    print("Positive Class Counter:",pos)
    print("Negative Class Counter:",neg)

    model = cnn.build_model(input_shape = input_shape, layers = n_layers, filters = n_filters, opt=opt, output_bias=output_bias)
    
    weightsFile = 'numSplit'+str(numSplit)+'_params'+str(parameterNum)

    history = cnn.train_model(model,x_train,y_train,x_val,y_val,
                        weightsDir,weightsFile,
                        patience_count=patience_count,
                        epochs=max_epochs,
                        batch_size=batch_size,
                        class_weights = class_weights)

    model.load_weights(weightsDir+weightsFile+'.h5')

    predictions = model.predict(x_val)
    cm = utils.calc_cm(y_val,predictions)
    precision, recall = utils.calc_binary_metrics(cm)
    auc = roc_auc_score(y_val,predictions)

    #save the metrics for the best epoch, or the last one
    if(len(history.history['acc']) == max_epochs):
        iterations += max_epochs
        training_acc += history.history['acc'][max_epochs-1]
        training_loss += history.history['loss'][max_epochs-1]
示例#10
0
def main(train_spectrum_path=r"dataset/train_spectrum.npy",
         test_spectrum_path=r"dataset/test_spectrum.npy",
         train_labels_path=r"dataset/train_labels.npy",
         test_labels_path=r"dataset/test_labels.npy",
         batch_size=1,
         learning_rate=0.01,
         num_epochs=20,
         kernel_size=(1, 2),
         padding=(0, 0),
         dropout=True,
         drop_prob=0.2,
         batch_normalization=True,
         weight_decay=True,
         weight_decay_amount=0.01,
         data_width=2100,
         model_save_path=r"model.pth",
         fruits=("apple", "banana", "mix"),
         create_dataset_now=False,
         root_dir="YOMIRAN",
         num_channels_layer1=3,
         num_channels_layer2=6,
         sample_time="after 5",
         sample_location="anal",
         sample_type="pos",
         tolerance=5,
         number_of_samples_to_alter=100,
         size_of_dataset=60000,
         train_data_percentage=0.8,
         train_now=False,
         show_statistics=True,
         predict_now=False,
         file_to_predict=r"apple neg.txt",
         confidence_threshold=0.7,
         validate_hierarchy=True,
         validate_filename_format=True,
         validate_empty_file=True,
         create_dataset_progress_bar_intvar=None,
         fc1_amount_output_nodes=1000,
         fc2_amount_output_nodes=500,
         fc3_amount_output_node=100,
         stretch_data=False,
         knn=False,
         cross_validation_iterations=1,
         n_components=2,
         k="auto"):

    # create data set
    if create_dataset_now:
        valid_files, _ = get_valid_and_invalid_files(
            root_dir=root_dir,
            validate_empty_file=validate_empty_file,
            validate_filename_format=validate_filename_format,
            validate_hierarchy=validate_hierarchy)
        create_dataset(data_files=valid_files,
                       fruits=fruits,
                       size_of_dataset=size_of_dataset,
                       train_data_percentage=train_data_percentage,
                       tolerance=tolerance,
                       number_of_samples_to_alter=number_of_samples_to_alter,
                       train_spectrum_path=Path(train_spectrum_path),
                       train_labels_path=Path(train_labels_path),
                       test_spectrum_path=Path(test_spectrum_path),
                       test_labels_path=Path(test_labels_path),
                       data_width=data_width,
                       sample_time=sample_time,
                       sample_location=sample_location,
                       create_dataset_progress_bar_intvar=
                       create_dataset_progress_bar_intvar,
                       stretch_data=stretch_data,
                       sample_type=sample_type,
                       n_components=n_components)

    # transformation of dataset
    transform = compose(transforms.ToTensor(), minmax_scale)
    # get the labels enum
    if fruits:
        fruit_label_enum = create_fruit_labels(fruits=fruits)
    # transform = transforms.ToTensor()

    if train_now:
        # Get the dataset
        train_data_loader = DataLoader("train",
                                       train_spectrum_path=train_spectrum_path,
                                       train_labels_path=train_labels_path,
                                       batch_size=batch_size,
                                       transform=transform)
        test_data_loader = DataLoader("test",
                                      test_spectrum_path=test_spectrum_path,
                                      test_labels_path=test_labels_path,
                                      batch_size=batch_size,
                                      transform=transform)

        if knn:
            train_data_loader_size_calculator = copy.deepcopy(
                train_data_loader)
            amount_train_data = 0
            for spectrum, labels in train_data_loader_size_calculator.load_data(
            ):
                amount_train_data += spectrum.shape[0]

            if k == "auto":
                k = math.ceil(math.sqrt(amount_train_data))

            cross_validation_accuracies = []
            cross_validation_true_labels = []
            cross_validation_predictions = []
            for i in range(cross_validation_iterations):
                print("Cross validation iteration: {}/{}".format(
                    i + 1, cross_validation_iterations))
                # Get the dataset
                train_data_loader = DataLoader(
                    "train",
                    train_spectrum_path=train_spectrum_path,
                    train_labels_path=train_labels_path,
                    batch_size=1,
                    transform=transform)
                test_data_loader = DataLoader(
                    "test",
                    test_spectrum_path=test_spectrum_path,
                    test_labels_path=test_labels_path,
                    batch_size=1,
                    transform=transform)
                model = KNN(k=k,
                            train_data_loader=train_data_loader,
                            test_data_loader=test_data_loader)
                accuracy, true_labels, predictions = model.train()
                cross_validation_accuracies.append(accuracy * 100)
                cross_validation_true_labels.extend(true_labels)
                cross_validation_predictions.extend(predictions)
                print("k={}\tAccuracy: {:.3f}%".format(k, accuracy * 100))

                shuffle_data_for_cross_validation(
                    train_data_loader=train_data_loader,
                    test_data_loader=test_data_loader,
                    train_spectrum_path=train_spectrum_path,
                    train_labels_path=train_labels_path,
                    test_spectrum_path=test_spectrum_path,
                    test_labels_path=test_labels_path)

            accuracies_mean = stat.mean(cross_validation_accuracies)
            accuracies_std = stat.stdev(cross_validation_accuracies)
            print("Test accuracies mean: {}".format(accuracies_mean))
            print("Test accuracies standard deviation: {}".format(
                accuracies_std))
            plot_data.plot_box_plot(
                test_accuracies=cross_validation_accuracies)
            plot_data.plot_confusion_matrix(
                true_labels=cross_validation_true_labels,
                predictions=cross_validation_predictions,
                fruits=fruits,
                show_null_values=True,
                show_plot=True)
            # for k in range(1, amount_train_data):
            #     model = KNN(k=k, train_data_loader=train_data_loader, test_data_loader=test_data_loader)
            #     accuracy = model.train()
            #     print("k={}\tAccuracy: {:.3f}%".format(k, accuracy * 100))

        else:
            train_data_loader_size_calculator = copy.deepcopy(
                train_data_loader)
            amount_train_data = 0
            fruits_from_dataset = []
            for spectrum, labels in train_data_loader_size_calculator.load_data(
            ):
                amount_train_data += spectrum.shape[0]

                # deprecated
                if fruits is None:
                    for label in labels:
                        if label not in fruits_from_dataset:
                            fruits_from_dataset.append(label)

            # deprecated
            if fruits is None:
                fruit_label_enum = create_fruit_labels(
                    fruits=fruits_from_dataset)
                fruits = fruits_from_dataset

            cross_validation_losses = []
            cross_validation_accuracies_train = []
            cross_validation_accuracies_test = []
            cross_validation_true_labels = []
            cross_validation_predictions_of_last_epoch = []
            statistics_of_all_iterations = []
            for i in range(cross_validation_iterations):
                print("Cross validation iteration: {}/{}".format(
                    i + 1, cross_validation_iterations))
                # Get the dataset
                train_data_loader = DataLoader(
                    "train",
                    train_spectrum_path=train_spectrum_path,
                    train_labels_path=train_labels_path,
                    batch_size=batch_size,
                    transform=transform)
                test_data_loader = DataLoader(
                    "test",
                    test_spectrum_path=test_spectrum_path,
                    test_labels_path=test_labels_path,
                    batch_size=batch_size,
                    transform=transform)

                # initialize the neural net
                model = CNN(amount_of_labels=len(fruit_label_enum),
                            batch_normalization=batch_normalization,
                            dropout=dropout,
                            drop_prob=drop_prob,
                            kernel_size=kernel_size,
                            padding=padding,
                            data_width=data_width,
                            data_height=1,
                            num_channels_layer1=num_channels_layer1,
                            num_channels_layer2=num_channels_layer2,
                            fc1_amount_output_nodes=fc1_amount_output_nodes,
                            fc2_amount_output_nodes=fc2_amount_output_nodes,
                            fc3_amount_output_node=fc3_amount_output_node,
                            n_components=n_components)

                # train the model
                statistics = train_model(
                    model=model,
                    fruit_label_enum=fruit_label_enum,
                    train_data_loader=train_data_loader,
                    test_data_loader=test_data_loader,
                    num_epochs=num_epochs,
                    learning_rate=learning_rate,
                    batch_size=batch_size,
                    weight_decay=weight_decay,
                    weight_decay_amount=weight_decay_amount,
                    model_save_path=model_save_path,
                    train_dataset_size=amount_train_data)

                statistics_of_all_iterations.append(statistics)
                losses, accuracies_train, accuracies_test, true_labels, predictions_of_last_epoch = statistics
                cross_validation_losses.extend([losses[-1]])
                cross_validation_accuracies_train.extend(
                    [accuracies_train[-1]])
                cross_validation_accuracies_test.extend([accuracies_test[-1]])
                cross_validation_true_labels.extend(true_labels),
                cross_validation_predictions_of_last_epoch.extend(
                    list(predictions_of_last_epoch))

                shuffle_data_for_cross_validation(
                    train_data_loader=train_data_loader,
                    test_data_loader=test_data_loader,
                    train_spectrum_path=train_spectrum_path,
                    train_labels_path=train_labels_path,
                    test_spectrum_path=test_spectrum_path,
                    test_labels_path=test_labels_path)

            accuracies_test_mean = stat.mean(cross_validation_accuracies_test)
            accuracies_test_std = stat.stdev(cross_validation_accuracies_test)
            print("Test accuracies mean: {}".format(accuracies_test_mean))
            print("Test accuracies standard deviation: {}".format(
                accuracies_test_std))
            # plot_data.plot_box_plot(test_accuracies=cross_validation_accuracies_test, show_plot=True)

            # plot the statistics
            if show_statistics:
                # plot_data.plot_train_statistics(x_values=range(len(losses)), y_values=losses, x_label="Epoch",
                #                                 y_label="Loss")
                # plot_data.plot_train_statistics(x_values=range(len(accuracies_train)), y_values=accuracies_train,
                #                                 x_label="Epoch", y_label="Train accuracy")
                # plot_data.plot_train_statistics(x_values=range(len(accuracies_test)), y_values=accuracies_test,
                #                                 x_label="Epoch", y_label="Test accuracy")

                # max_test_accuracy = max(cross_validation_accuracies_test)
                # statistics = statistics_of_all_iterations[cross_validation_accuracies_test.index(max_test_accuracy)]
                # losses, accuracies_train, accuracies_test, true_labels, predictions_of_last_epoch = statistics
                # plot_data.plot_train_statistics1(losses=losses, train_accuracy=accuracies_train,
                #                                  test_accuracy=accuracies_test)

                plot_data.plot_box_plot(
                    test_accuracies=cross_validation_accuracies_test)
                plot_data.plot_confusion_matrix(
                    true_labels=cross_validation_true_labels,
                    predictions=cross_validation_predictions_of_last_epoch,
                    fruits=fruits,
                    show_null_values=True)
                plot_data.plot_classification_report(
                    true_labels=cross_validation_true_labels,
                    predictions=cross_validation_predictions_of_last_epoch,
                    show_plot=True)

                max_test_accuracy = max(cross_validation_accuracies_test)
                statistics = statistics_of_all_iterations[
                    cross_validation_accuracies_test.index(max_test_accuracy)]
                losses, accuracies_train, accuracies_test, true_labels, predictions_of_last_epoch = statistics
                plot_data.plot_train_statistics1(
                    losses=losses,
                    train_accuracy=accuracies_train,
                    test_accuracy=accuracies_test,
                    show_plot=True)

    if predict_now:

        # fit the pca
        valid_files, _ = get_valid_and_invalid_files(
            root_dir=root_dir,
            validate_empty_file=validate_empty_file,
            validate_filename_format=validate_filename_format,
            validate_hierarchy=validate_hierarchy)

        # Get existing data
        existing_data, _ = get_existing_data(fruits=fruits,
                                             data_files=valid_files,
                                             sample_time=sample_time,
                                             sample_location=sample_location,
                                             data_width=data_width,
                                             sample_type=sample_type)

        # fit pca
        pca = PCA(n_components=n_components)
        pca = pca.fit(existing_data)

        model = load_model(model_save_path,
                           amount_of_labels=len(fruit_label_enum),
                           batch_normalization=batch_normalization,
                           dropout=dropout,
                           drop_prob=drop_prob,
                           kernel_size=kernel_size,
                           padding=padding,
                           data_width=data_width,
                           data_height=1,
                           num_channels_layer1=num_channels_layer1,
                           num_channels_layer2=num_channels_layer2,
                           fc1_amount_output_nodes=fc1_amount_output_nodes,
                           fc2_amount_output_nodes=fc2_amount_output_nodes,
                           fc3_amount_output_node=fc3_amount_output_node,
                           n_components=n_components)

        confidence, prediction = predict(
            model=model,
            data_file=file_to_predict,
            pca=pca,
            transform=transform,
            fruit_label_enum=fruit_label_enum,
            data_width=data_width,
            confidence_threshold=confidence_threshold)

        return confidence, prediction