Beispiel #1
0
model = (Dense(32, activation='relu'))(merge)
model = Dropout(0.5)(model)
model = (Dense(1, activation='sigmoid'))(model)

model = Model(inputs=[energy_input, maximum_amplitude_input, radial_strain_input, tangential_strain_input],
              outputs=model)
model.compile(loss='binary_crossentropy', optimizer="rmsprop", metrics=['accuracy'])
print model.summary()

print "Training....................................................."
model.fit([energies, maximum_amplitudes, radial_strains, tangential_strains], train_labels, epochs=epochs, verbose=1,
          batch_size=batch_size, validation_split=0.2)

print("--- train %s seconds ---" % (time.time() - start_time))

original_classified = model.predict([energies, maximum_amplitudes, radial_strains, tangential_strains],
                                    batch_size=batch_size, verbose=0)
best_threshold = find_best_threshold(train_labels, original_classified)

if run_test:
    print 'test'
    for test in test_sets:
        print test
        test_reader = pd.read_csv(get_diff_path(test))
        energy_test, maximum_amplitude_test, radial_strain_test, tangential_strain_test, test_labels = read_data(
            test_reader)

        original_classified = model.predict(
            [energy_test, maximum_amplitude_test, radial_strain_test, tangential_strain_test], batch_size=batch_size,
            verbose=0)

        print "0.5 threshold"
Beispiel #2
0
        #    plt.ylabel('accuracy')
        #    plt.show()

        #Final evaluation of the model
        score_val = model.evaluate(x_valid, y_valid, verbose=0)
        print("Accuracy on Validation data:  %.2f%%" % (score_val[1] * 100))
        score_train = model.evaluate(x_train, y_train, verbose=0)
        print("Accuracy on Train data:  %.2f%%" % (score_train[1] * 100))

    test_files = []
    predictions = []
    correct = []
    print('Predicting test data ...')

    for file_names, spectrograms, target in test_data_processing():
        predicts = model.predict(spectrograms)
        predicts = np.argmax(predicts, axis=1)
        predicts = [class_labels[p] for p in predicts]
        test_files.extend(file_names)
        predictions.extend(predicts)
        correct.extend(target)

    number_of_corrects = 0
    for x in range(len(predictions)):
        if (np.array_equal(correct[x], predicts[x])):
            number_of_corrects += 1
    score_test = number_of_corrects / len(predictions)
    print("Accuracy on Test data:  %.2f%%" % (score_test * 100))

    print('Saving predictions to csv ...')
    df = pd.DataFrame(columns=['file_name', 'predicted_label'])
Beispiel #3
0

# ### Uncomment the next cell for training

# In[ ]:


#m.fit(X_train, Y_train, epochs = 350,validation_data=(X_valid,Y_valid),callbacks=[checkpoint,rlStop],
#          batch_size=batch_size, verbose = 2)


# In[28]:


#m.load_weights('/output/CNN_stacked_w2v.hdfs')
pred = m.predict(X_test,batch_size=batch_size)
acc_test = accuracy_score(Y_test.argmax(axis=1),pred.argmax(axis=1))
print('Model2 accuracy' + str(acc_test))


# In[38]:


#Model7
batch_size = 64
n_bilstm_1 = 8
n_bilstm_2 = 16
drop_bilstm = 0.25
embed_dim = 512
input_layer = Input(shape=(max_tweet_length,))
embedding_layer = Embedding(max_features, embed_dim)(input_layer)
    if (0):
        model3 = load_model(os.path.join(root_folder,
                                         r"cnn_models/roi_test.h5"),
                            custom_objects={"RoI": RoI})

        image = np.array(input_image[0])
        image = np.expand_dims(image, axis=-1)
        image = np.expand_dims(image, axis=0)
        roi_in = np.array(input_roi[0])
        roi_in = np.expand_dims(roi_in, axis=0)
        predictions = model3.predict([image, roi_in])
        print(predictions)
    if (0):
        conv = load_model(os.path.join(root_folder,
                                       r"cnn_models/conv_test.h5"))
        for i in range(len(input_image)):
            images = conv.predict(
                np.expand_dims(np.expand_dims(input_image[i], axis=0),
                               axis=-1))
            images = np.reshape(images, (1, 32, 300, 200))
            for image in images[0]:
                cv2.imshow('image', input_image[i])
                cv2.waitKey(0)
                cv2.destroyAllWindows()
                cv2.imshow('image', image)
                cv2.waitKey(0)
                cv2.destroyAllWindows()

    #print(total_prec / len(image_locs))
    #print(total_recall / len(image_locs))