コード例 #1
0
m = model.predict(x_test)  #returns the median prediction if more than one tree

#evaluate results
mse = sklearn.metrics.mean_squared_error(y_test, m)
mabs = sklearn.metrics.mean_absolute_error(y_test, m)
exvar = sklearn.metrics.explained_variance_score(y_test, m)
print('MSE', mse)
print('M Abs Err', mabs)
print('Expl. Var.', exvar)

#close model
model.close_model()

print("Reload model and continue training for 20 epochs")
# reload model; can also open it using cPickle.load()
model2 = djinn.load(model_name="reg_djinn_test")

#continue training for 20 epochs using same learning rate, etc as before
model2.continue_training(x_train,
                         y_train,
                         20,
                         learnrate,
                         batchsize,
                         random_state=1)

#make updated predictions
m2 = model2.predict(x_test)

#evaluate results
mse2 = sklearn.metrics.mean_squared_error(y_test, m2)
mabs2 = sklearn.metrics.mean_absolute_error(y_test, m2)
コード例 #2
0
def model_train1():
    print("djinn example")
    modelname = "class_djinn_test"  # name the model
    ntrees = 1  # number of trees = number of neural nets in ensemble
    maxdepth = 4  # max depth of tree -- optimize this for each data set
    dropout_keep = 1.0
    model = djinn.DJINN_Classifier(ntrees, maxdepth, dropout_keep)

    # find optimal settings: this function returns dict with hyper-parameters
    # each djinn function accepts random seeds for reproducible behavior
    optimal = model.get_hyperparameters(x_train, y_train, random_state=1)
    batchsize = optimal['batch_size']
    learnrate = optimal['learn_rate']
    epochs = optimal['epochs']

    # train the model with hyperparameters determined above
    model.train(x_train,
                y_train,
                epochs=epochs,
                learn_rate=learnrate,
                batch_size=batchsize,
                display_step=1,
                save_files=True,
                file_name=modelname,
                save_model=True,
                model_name=modelname,
                random_state=1)

    # *note there is a function model.fit(x_train,y_train, ... ) that wraps
    # get_hyperparameters() and train(), so that you do not have to manually
    # pass hyperparameters to train(). However, get_hyperparameters() can
    # be expensive, so I recommend running it once per dataset and using those
    # hyperparameter values in train() to save computational time

    # make predictions
    m = model.predict(
        x_test)  #returns the median prediction if more than one tree
    model2 = djinn.load(model_name="class_djinn_test")
    #evaluate results
    '''with cf.ProcessPoolExecutor() as p:
        acc=p.submit(sklearn.metrics.accuracy_score,y_test,m.flatten())
        p.submit(model2.continue_training,x_train, y_train, 20, learnrate, batchsize, 1)
        acc=acc.result()'''
    acc = sklearn.metrics.accuracy_score(y_test, m.flatten())
    print('Accuracy', acc)
    #acc=sklearn.metrics.accuracy_score(y_test,m.flatten())
    model2.continue_training(x_train,
                             y_train,
                             20,
                             learnrate,
                             batchsize,
                             random_state=1)
    #close model
    model.close_model()

    print("Reload model and continue training")
    # reload model; can also open it using cPickle.load()
    #model2=djinn.load(model_name="class_djinn_test")

    #continue training for 20 epochs using same learning rate, etc as before

    #make updated predictions
    m2 = model2.predict(x_test)

    #evaluate results
    acc = sklearn.metrics.accuracy_score(y_test, m.flatten())
    print('Ye model ka Accuracy', acc)
コード例 #3
0
ファイル: gaussian_test.py プロジェクト: klmentzer/DJINN
# plt.show()

X=X.reshape(-1,1)

x_train,x_test,y_train,y_test=train_test_split(X, y, test_size=0.2, random_state=1)

print("djinn example")
modelname="gaussian_djinn_test"   # name the model
ntrees=1                 # number of trees = number of neural nets in ensemble
maxdepth=4               # max depth of tree -- optimize this for each data set
dropout_keep=1.0         # dropout typically set to 1 for non-Bayesian models

load_model = True

if load_model:
    model=djinn.load(model_name=modelname)
else:
    #initialize the model
    model=djinn.DJINN_Regressor(ntrees,maxdepth,dropout_keep)

    # find optimal settings: this function returns dict with hyper-parameters
    # each djinn function accepts random seeds for reproducible behavior
    optimal=model.get_hyperparameters(x_train, y_train, random_state=1)
    batchsize=optimal['batch_size']
    learnrate=optimal['learn_rate']
    epochs=optimal['epochs']

    # train the model with hyperparameters determined above
    model.train(x_train,y_train,epochs=epochs,learn_rate=learnrate, batch_size=batchsize,
                  display_step=1, save_files=True, file_name=modelname,
                  save_model=True,model_name=modelname, random_state=1)
コード例 #4
0
            save_files=True,
            file_name=modelname,
            save_model=True,
            model_name=modelname,
            random_state=1)

# *note there is a function model.fit(x_train,y_train, ... ) that wraps
# get_hyperparameters() and train(), so that you do not have to manually
# pass hyperparameters to train(). However, get_hyperparameters() can
# be expensive, so I recommend running it once per dataset and using those
# hyperparameter values in train() to save computational time

# make predictions

m = model.predict(x_test)  #returns the median prediction if more than one tree
model2 = djinn.load(model_name="class_djinn_test")
#evaluate results

with cf.ProcessPoolExecutor() as p:
    acc = p.submit(sklearn.metrics.accuracy_score, y_test, m.flatten())
    p.submit(model2.continue_training, x_train, y_train, 20, learnrate,
             batchsize, 1)
    acc = acc.result()
#acc=sklearn.metrics.accuracy_score(y_test,m.flatten())
model2.continue_training(x_train,
                         y_train,
                         20,
                         learnrate,
                         batchsize,
                         random_state=1)
print('Accuracy', acc)