model.add(Dense(512, batch_input_shape=(None, C)))
    model.add(Dropout(0.02))
    model.add(Dense(nb_classes, activation='softmax'))
    # load weights
    model.load_weights(best_model_path)
    # Compile model (required to make predictions)
    model.compile(loss='categorical_crossentropy',
                  optimizer='Adadelta',
                  metrics=['accuracy'])
    dev_pred = model.predict_classes([dev_x], batch_size=batch_size, verbose=1)
    test_pred = model.predict_classes([test_x],
                                      batch_size=batch_size,
                                      verbose=1)

    ######Dev
    accu, P, R, F1, wAUC, AUC, report = performance.performance_measure_tf(
        dev_y, dev_pred, [], dev_le, labels, dev_file)
    wauc = wAUC * 100
    auc = AUC * 100
    precision = P * 100
    recall = R * 100
    f1_score = F1 * 100
    print(
        str("{0:.2f}".format(auc)) + "\t" + str("{0:.2f}".format(wauc)) +
        "\t" + str("{0:.2f}".format(precision)) + "\t" +
        str("{0:.2f}".format(recall)) + "\t" +
        str("{0:.2f}".format(f1_score)) + "\n")
    print(report)

    ######Test
    accu, P, R, F1, wAUC, AUC, report = performance.performance_measure_tf(
        test_y, test_pred, [], le, labels, test_file)
Exemple #2
0
best_valid_epoch = 0
best_weights = None
train_history = {}
train_history['f1'] = list()
train_history['epoch'] = list()

#for epoch in range(num_epochs):
epoch = 0
#while True:
for epoch in range(num_epochs):
    #m.step_train(max_iter = 100, iter_graph = 10, iter_inst = 10, iter_label = 10) # perform a training step
    m.step_train_minibatch(iter_label=10, epoch=epoch)
    tpy = m.predict(dev_x)  # predict the dev set
    accu = comp_accu(tpy, dev_y)  # compute the accuracy on the dev set
    iter_cnt += 1
    accu, P, R, F1, wAUC, AUC, report = performance.performance_measure_tf(
        dev_y, tpy, dev_le, dev_labels)
    wauc = wAUC * 100
    auc = AUC * 100
    precision = P * 100
    recall = R * 100
    f1_score = F1 * 100
    current_valid = train_history['f1'].append(f1_score)
    current_epoch = train_history['epoch'].append(epoch)
    #    print (epoch)
    if (epoch % 10 == 0):
        current_valid = train_history['f1'][-1]
        print("f1-cur " + str(current_valid))
        current_epoch = train_history['epoch'][-1]
        if current_valid > best_valid:
            print("f1-best " + str(best_valid))
            best_valid = current_valid
Exemple #3
0
            domain_tr_y = y.astype('int32')
            train_err += train2(inputs, domain_tr_y)
            #            print ("error2 "+str(train_err))

            train_batches += 1
        print("Epoch {} of {} took {:.3f}s".format(epoch + 1, num_epochs,
                                                   time.time() - start_time))
        print("  training loss:\t\t{:.6f}".format(train_err / train_batches))

        dev_pred = pred_fn(dev_x)
        dev_pred = np.argmax(dev_pred, axis=1)
        # print (dev_pred)
        # accu,P,R,F1,wAUC,AUC,report =performance.performance_measure_tf(dev_y,pred,dev_le,dev_labels)
        y_prob = [0]
        devoutFile = "dev.txt"
        accu, P, R, F1, wAUC, AUC, report = performance.performance_measure_tf(
            dev_y, dev_pred, y_prob, dev_le, dev_labels, devoutFile)
        wauc = wAUC * 100
        auc = AUC * 100
        precision = P * 100
        recall = R * 100
        f1_score = F1 * 100
        result = str("{0:.2f}".format(auc)) + "\t" + str(
            "{0:.2f}".format(wauc)) + "\t" + str(
                "{0:.2f}".format(precision)) + "\t" + str(
                    "{0:.2f}".format(recall)) + "\t" + str(
                        "{0:.2f}".format(f1_score)) + "\n"
        print(result)
        # print (report)
        current_valid = train_history['valid_loss'].append(f1_score)
        current_epoch = train_history['epoch'].append(epoch)