예제 #1
0
def validate_field_data(request, dataset_field_id):
    """
    Проверить новое значение поля
    """
    import validate
    dataset_field = DatasetField.objects.get(dataset_field_id)
    result = validate.validation(dataset_field)

    return True
예제 #2
0
def train_network(model):
    criterion = nn.NLLLoss()
    optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate)
    print_every = 40
    if gpu:
        model.to('cuda')
    curr_epoch = 0
    for e in range(epochs): 
        curr_epoch += 1
        training_loss = 0
        steps = 0
        for images, labels in iter(trainloader):
            steps += 1
            if gpu:
                images = images.to('cuda')
                labels = labels.to('cuda')
            optimizer.zero_grad()       
            output = model.forward(images)
            loss = criterion(output, labels)
            training_loss += loss
            loss.backward()
            optimizer.step()
        validation_loss, num_correct = validation(model, validationloader, criterion, gpu)
        print("epoch: {} \n total training loss: {:.4f} \n average training loss: {:.4f} \n total validation loss: {:.4f} \n average validation loss: {:.4f} \n validation accuracy: {:.2f}%".format(curr_epoch, training_loss, training_loss/len(training_dataset), validation_loss, validation_loss/len(validation_dataset), int(num_correct)*100/len(validation_dataset)))
예제 #3
0
# script calls on validation script which then runs the scope parser

from validate import validation
import sys

from colorama import Fore
from colorama import Style

# call validator, which then calls scope parser
# stop everything (crawls), raise an error
# ask the user to try again

if len(sys.argv) == 1:
    print("no file arguements passed")
    raise Exception(Fore.RED + "no file arguments passed" + Style.RESET_ALL)
    # sys.exit()
else:
    try:
        validation(sys.argv[1])  # pass into the validation function
    except Exception:
        raise
예제 #4
0
                        # print('%%%%%%%%%%%%%%%%')

                        # Train Adaboost Classifer
                        model = abc.fit(X_train, y_train)

                        # decision_tree = DecisionTreeClassifier(max_depth=max_depth)
                        # decision_tree = decision_tree.fit(X_train, y_train)
                        # r = export_text(decision_tree)


                        # Predict the response for test dataset
                        y_pred = model.predict(X_test)

                        pred.append(y_pred)
                        test.append(y_test)

                    predT = np.array(pred).T.tolist()
                    testT = np.array(test).T.tolist()
                    # print(predT) # Prediction
                    # print('%%%%%%%%%%%%%%%%')
                    # print(testT) # Test
                    validation_result = validation(predT, testT)

                    end = time.process_time()
                    learning_time = str(end - start)
                    print(validation_result + "|" + learning_time)
                    new_file.write(validation_result + "|" + learning_time + "\n")

                new_file.close()

예제 #5
0
파일: bp.py 프로젝트: anggapur/TA_BPMLL
        for ix, h in enumerate(scores['predicted']):
            hasild1[ix].append(h)

        for ix, h in enumerate(scores['actual']):
            actuald1[ix].append(h)

        learningtimed1.append(scores['learning_time_keep'])

    # print(hasild1)
    # print(actuald1)
    learning_time_transpose = transposing(learningtimed1)
    # print(learning_time_transpose)

    final_learning_time = [sum(d) for d in learning_time_transpose]
    # print(final_learning_time)

    # print("%%%%%%%%%")
    for iz, data_z in enumerate(hasild1):
        # print(hasild1[iz])
        # print(transposing(hasild1[iz]))
        # print(actuald1[iz])
        # print(transposing(actuald1[iz]))
        validation_result = validation(transposing(hasild1[iz]),
                                       transposing(actuald1[iz]))
        print(str(validation_result) + "|" + str(final_learning_time[iz]))
        new_file.write(
            str(validation_result) + "|" + str(final_learning_time[iz]) + "\n")
        # print("-----")

    new_file.close()
예제 #6
0
def train(dataLoader,
          validate_after=5,
          resume=False,
          perform_training=True,
          save_best=False,
          model_='cnn'):
    """
    Perform training and validation of model.
    Args:
        dataLoader : DataLoader object
        validate_after : Number of epochs after which validation is performed.
                         The model is also saved after this.
        resume : If True, a previously saved model file is loaded.
        perform_training : If False, training step is skipped, and final testing is done.
        save_best : If True, save session for epoch with minimum validation loss.
        model_ : String denoting the neural network model to use (RNN or CNN)
    """

    model = None
    if model_ == 'cnn':
        model = models.cnn_model()
    elif model_ == 'rnn':
        model = models.rnn_model()

    sess = tf.Session()
    saver = tf.train.Saver()

    sess.run(tf.global_variables_initializer())

    if resume:
        try:
            prev_session = config.resume_ckpt
            saver.restore(sess, prev_session)
            print("Using previous session: {}".format(prev_session))
        except Exception as exp:
            print(exp)
            print("Creating a new session.")

    if save_best:
        MIN_VAL_LOSS = 100000000000

    if perform_training:
        config.init()

        train_writer = tf.summary.FileWriter(
            os.path.join(config.logdir, "train"), sess.graph)
        valid_writer = tf.summary.FileWriter(
            os.path.join(config.logdir, "validation"), sess.graph)

        for e in range(config.EPOCHS):
            epoch_loss = 0.0

            for sensor, label in dataLoader.next_train():
                # Run the graph.
                loss, _, tb = sess.run(
                    [model['loss'], model['train'], model['summary']],
                    feed_dict={
                        model['sensor_data']: sensor,
                        model['label']: label,
                        model['training']: True
                    })
                epoch_loss += loss

            avg_loss = epoch_loss / dataLoader.train_batches
            print("Average loss for epoch {} = {}".format(e, avg_loss))

            if e % validate_after == 0:

                val_loss = validation(sess, model, dataLoader, valid_writer, e)

                if save_best:
                    if val_loss < MIN_VAL_LOSS:
                        path = saver.save(sess, config.ckpt, global_step=e)
                        print("Saved model to {}".format(path))
                        MIN_VAL_LOSS = val_loss
                else:
                    path = saver.save(sess, config.ckpt, global_step=e)
                    print("Saved model to {}".format(path))

                train_writer.add_summary(tb, e)

    print("===========================================")
    print("Calculating validation accuracy...")

    accuracies = []
    positives = negatives = 0
    true_positives = true_negatives = false_positives = false_negatives = 0

    for sensor, label in dataLoader.next_validation():
        # Run the graph.
        pred = sess.run(model['prediction'],
                        feed_dict={
                            model['sensor_data']: sensor,
                            model['label']: label,
                            model['training']: False
                        })

        label = np.argmax(label, axis=1)

        positives += np.count_nonzero(label == 1)
        negatives += np.count_nonzero(label == 0)

        # detects the condition when the condition is present.
        true_positives += np.count_nonzero(pred + label == 2)

        # does not detect the condition when the condition is absent.
        true_negatives += np.count_nonzero(pred + label == 0)

        # wrongly indicates that a particular condition or attribute is present.
        false_positives += np.count_nonzero(pred > label)

        # wrongly indicates that a particular condition or attribute is absent.
        false_negatives += np.count_nonzero(pred < label)

        accuracies.append(
            np.count_nonzero(pred == label) / pred.shape[0] * 100)

    accuracies = np.array(accuracies)

    # print(positives, negatives)
    # print("True positives : {}".format(true_positives))
    # print("False negatives: {}".format(false_negatives))
    # print("False positives: {}".format(false_positives))
    # print("True negatives: {}".format(true_negatives))

    print("Sensitivity: {}".format(true_positives / positives))
    print("Specificity: {}".format(true_negatives / negatives))
    print("Precision: {}".format(true_positives /
                                 (true_positives + false_positives)))

    print("Min Validation set accuracy: {} %".format(accuracies.min()))
    print("Max Validation set accuracy: {} %".format(accuracies.max()))
    print("Average Validation set accuracy: {} %".format(accuracies.mean()))

    sess.close()
예제 #7
0
def train(name, model, optim, schedule, lossp, early, dataloader, device, logger,
          epoch, iteration, save, validate, depoch, diteration, norm, binar,
          time_weight, layer_weight, debug=False):
    """
    Training the model
    
    name := name of the model
    model := initialized network model
    optim := initialized optimizer
    schedule := initialized lr scheduler
    lossp := name of loss to use <mae|mse|bce|bcel>
    early := initialized early stopping
    dataloader := initialized dataloader
    device := GPU or CPU
    logger := initialized tensorboard logger
    epoch := epochs to run
    iteration := iterations to run per epoch
    save := True if model should be saved, False otherwise
    validate := validate performance after n epochs
    depoch := already performed amount of epochs (Done epoch)
    diteration := already performed amount of iterations (Done iteration)
    norm := True if normalization is required, False otherwise
    binar := True if binarization is required, False otherwise
    time_weight := initialized list of time weights
    layer_weight := initialized list of layer weights
    debug := debug
    """
    if debug:
        print('[DEBUG] Start training.')

    it = 0

    # run through the epochs, omit some if pre-trained
    for i in range(depoch, epoch):
        # set model in training mode
        model.train()
        logger.set_mode('training')
        bloss = 0.0
        
        # run through all batches in the dataloader
        for batch_id, data in enumerate(dataloader[0]):
            # get sequence and target
            x = data.to(device).float().permute(1,0,2,3,4)
            if norm or binar:
                x = normalize(x)
            if binar:
                x = binarize(x)

            # clear optimizer
            optim.zero_grad()

            # forward pass & compute loss
            if name == 'prednet':
                output = model(x)
                loss = Loss(time_weight, layer_weight, len(time_weight), output)
            else:
                output = model(x[:-1])
                loss = LOSS(output, x[-1], lossp)
            
            bloss += loss

            # backpropagation
            loss.backward()

            # update weights
            optim.step()

            it += 1

            # log scalar values
            logger.plot_loss('error', loss, it)

            if batch_id >= iteration and iteration > 0:
                break

        # validation every n epochs
        if (i+1) % validate == 0:
            if len(dataloader) > 1:
                with torch.no_grad():
                    val_loss = validation(name, model, lossp, dataloader[1], logger, device,
                                          normalize, binarize)
#                print('Epoch: {} Mean loss: {:.6f}'.format(i+1, bloss / (batch_id + 1)))
                print('Epoch: {} Validation loss: {:.6f}'.format(i+1, val_loss))
                
                # early stopping
                if early.step(val_loss):
                    return

        if i > 0:
            # perform scheduler update
            schedule.step()