예제 #1
0
 def run(self):
     from ml_statistics import BaseStatistics
     
     X = self.model.get_model_X_rows()
     Y = self.model.get_model_Y_rows()
     self.folds.get_n_splits(X, Y)
     
     for train_index, test_index in self.folds.split(X, Y):
         
         # TRAIN instances
         X_train_inputs, y_train = self.model.get_model_data(train_index)
         
         # TEST instances
         X_test_inputs, y_test = self.model.get_model_data(test_index)            
         
         
         self.model.model.compile(loss=self.error_metric, optimizer=self.optimizer, metrics=['accuracy'])
         print(self.model.model.summary())
         
         history = self.model.model.fit(X_train_inputs, y_train, validation_split=self.validation_split, epochs=self.epochs, batch_size=self.batch_size, callbacks=self.callbacks, class_weight=self.class_weigths, verbose=0)
         print(history.history.keys())
         
         self.print_history(history)
         
         Y_pred = self.model.model.predict(X_test_inputs)
         
         stats = BaseStatistics(y_test, Y_pred)
         print stats
         print '='*30
def test(model, data):
    from ml_statistics import BaseStatistics
    x_test, y_test = data
    Y = np.zeros(y_test.shape)
    y_pred = model.predict(x=x_test, batch_size=8)
    stats = BaseStatistics(y_test, y_pred)
    return stats, y_pred
예제 #3
0
 def gen_synthetic_stats(num_samples=100, similarity=.8):
     y_true = np.random.randint(2, size=num_samples)
     y_pred = np.array(
         [x if np.random.random() < similarity else 1 - x for x in y_true])
     statistics = BaseStatistics(y_true, y_pred)
     print(statistics)
     return statistics
예제 #4
0
def test(model, data, args):
    from ml_statistics import BaseStatistics
    x_test, y_test = data
    y_pred, x_recon = model.predict(x_test, batch_size=100)
    print('-'*30 + 'Begin: test' + '-'*30)
    print('Test acc:', np.sum(np.argmax(y_pred, 1) == np.argmax(y_test, 1))/y_test.shape[0])

    img = combine_images(np.concatenate([x_test[:50],x_recon[:50]]))
    image = img * 255
    Image.fromarray(image.astype(np.uint8)).save(args.save_dir + "/real_and_recon.png")
    print()
    print('Reconstructed images are saved to %s/real_and_recon.png' % args.save_dir)
    print('-' * 30 + 'End: test' + '-' * 30)
    plt.imshow(plt.imread(args.save_dir + "/real_and_recon.png"))
    plt.show()
    
    stats = BaseStatistics(y_test, y_pred)
    print stats
예제 #5
0
 def test_model(self):
     from ml_statistics import BaseStatistics
     Y = np.zeros(self.y_test.shape)
     y_pred = self.model.predict(x=self.x_test, batch_size=8)
     stats = BaseStatistics(self.y_test, y_pred)
     return stats, y_pred
    model = get_mergerd_model(X_train, X2_train)
    opt = get_options()
    
    loss_function = ['mse', 'binary_crossentropy']
    
    model.compile(loss=loss_function[1], optimizer=opt, metrics=['accuracy'])
    tensorboard = TensorBoard(log_dir="logs/{}".format(time()))
    if cnt==0:
        print(model.summary())
        
#    calls = [tensorboard, earlyStopping, reduceLR]
    calls = [tensorboard, earlyStopping]
    weights={0:.3, 1:.7}
    history = model.fit([X_train, X2_train], y_train, validation_split=0.1, epochs=250, batch_size=32, callbacks=calls, class_weight=weights, verbose=0)
    
    Y_pred = model.predict([X_test, X2_test])
    
    stats = BaseStatistics(y_test, Y_pred)
    print stats
    
    
    scores = model.evaluate([X_test, X2_test], y_test, verbose=0)
    print("Accuracy: %.2f%%" % (scores[1]*100))
    cvscores.append(scores[1] * 100)
    print scores
    print '='*30
    cnt+=1

print("%.2f%% (+/- %.2f%%)" % (numpy.mean(cvscores), numpy.std(cvscores)))
print('<<< END >>>')
예제 #7
0
def run(npath, ppath):
    from sklearn.model_selection import StratifiedKFold
    from sklearn.model_selection import cross_val_score
    from keras.wrappers.scikit_learn import KerasClassifier
    from keras.utils import plot_model
    from keras.callbacks import EarlyStopping
    from ml_statistics import BaseStatistics
    from keras import optimizers
    import numpy as np

    # Params
    weightTrue = 0.8
    class_weight = {0: (1 - weightTrue), 1: weightTrue}
    earlyStopping = EarlyStopping(monitor='val_loss',
                                  patience=10,
                                  verbose=1,
                                  mode='auto')
    opt = optimizers.Nadam(lr=0.001,
                           beta_1=0.9,
                           beta_2=0.999,
                           epsilon=1e-08,
                           schedule_decay=0.004)
    #    opt = optimizers.RMSprop(lr=0.001, decay=0.0)

    # Setup all data for inputs
    setup_data(npath, ppath)

    # Retrieve data for model
    X, Y = get_model_data_full(data)

    X = X * 100000

    print X
    print Y.shape

    #    plot_model(create_model(), show_shapes=True)

    # evaluate using 10-fold cross validation
    kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=17)
    #    results = cross_val_score(model, X, Y, cv=kfold)
    #    print('='*30)
    #    print(results.mean())

    kfold.get_n_splits(X, Y)

    for train_index, test_index in kfold.split(X, Y):
        # TRAIN DATA
        X_train, y_train = X[train_index, :], Y[train_index]
        # TEST DATA
        X_test, y_test = X[test_index, :], Y[test_index]

        # create model
        #        model = KerasClassifier(build_fn=create_model, epochs=50, batch_size=32, verbose=1)
        model = create_model()

        # Compile Model
        model.compile(loss='binary_crossentropy',
                      optimizer=opt,
                      metrics=['accuracy'])
        print(model.summary())

        history = model.fit(X_train,
                            y_train,
                            validation_split=0.3,
                            epochs=100,
                            batch_size=128,
                            callbacks=[earlyStopping],
                            class_weight=class_weight,
                            verbose=0)
        print(history.history.keys())

        Y_pred = model.predict(X_test, verbose=0)

        stats = BaseStatistics(y_test, Y_pred)
        print stats
예제 #8
0
 def eval(self, model, data):
     from ml_statistics import BaseStatistics
     x_test, y_test = data
     y_pred, aux = model.predict(x_test, batch_size=32)
     stats = BaseStatistics(y_test[0], y_pred)
     return stats
예제 #9
0
def get_test_stats(model, X, y):
    from ml_statistics import BaseStatistics
    y_pred = model.predict(x=X, batch_size=8)
    stats = BaseStatistics(y, y_pred)
    return stats, y_pred
 def eval(self, model, data):
     x_test, y_test = data
     y_pred = model.predict(x_test, batch_size=32)
     stats = BaseStatistics(y_test, y_pred)
     return stats
예제 #11
0
def test(model, data):
    from ml_statistics import BaseStatistics
    x_test, y_test = data
    y_pred, x_recon = model.predict([x_test, y_test], batch_size=100)
    stats = BaseStatistics(y_test, y_pred)
    return stats