예제 #1
0
def predictions(hemi="N",start_time=dt.datetime(2019,1,1), timedelta = 365):
    f = get_data("Data_Proc/%s_seaice_extent_daily_v4.0.csv"%hemi)
    X = f[["yr","month","doy","day"]].as_matrix()

    dn = [start_time + dt.timedelta(x) for x in np.arange(timedelta)]
    u = pd.DataFrame()
    u["yr"] = [x.year-1978 for x in dn]
    u["month"] = [x.month for x in dn]
    u["day"] = [x.day for x in dn]
    u["doy"] = [(x - dt.datetime(x.year,1,1)).days + 1 for x in dn]
    Xp = u[["yr","month","doy","day"]].as_matrix()
    print Xp.shape
    Xn = np.concatenate((X,Xp))
    print Xn.shape,X.shape
    scaler = MinMaxScaler()
    scaler.fit(Xn)
    x = scaler.transform(Xp)

    m = KerasRegressor(build_fn=deep_model, epochs=50, batch_size=100, verbose=0)
    m.model = load_model("Data_Proc/%s_model.h5"%hemi)
    y = m.predict(x)
    print y,x
    #o = distribution(f, key="doy")
    #ano = []
    #for x,doy in zip(y,u.doy.tolist()):
    #    a = x - o[o.doy==doy].Extent["mean"].tolist()[0]
    #    ano.append(a)
    #    pass
    #print ano
    return
예제 #2
0
def get_distribution():
    import keras.backend.tensorflow_backend as tb
    tb._SYMBOLIC_SCOPE.value = True

    employee_number = float(request.get_json()['employee_number'])
    modelled_market_share = float(request.get_json()['modelled_market_share'])

    # Instantiate a model as template
    def model():
        model = Sequential()
        model.add(
            Dense(600,
                  input_dim=1,
                  kernel_initializer='normal',
                  activation='sigmoid'))
        model.add(Dense(8, activation='sigmoid', kernel_initializer='normal'))
        model.compile(loss='mse', optimizer='adam')
        return model

    model = KerasRegressor(build_fn=model, epochs=10, batch_size=10, verbose=1)

    # Load scaler
    scaler = joblib.load('../model_keras_scaler.pkl')

    # Load the model
    model.model = load_model('../model_keras.h5')

    # Set up pipeline
    pipe = Pipeline([('scale', scaler), ('clf', model)])

    test_df = pd.DataFrame(data={'x_test': [modelled_market_share]})

    results = pipe.predict(test_df)

    results = pd.DataFrame(
        {
            'pred Brand & Content': results[:, 0],
            'pred Marketing Operations': results[:, 1],
            'pred Digital Marketing': results[:, 2],
            'pred Customer Care': results[:, 3],
            'pred Indirect Retail / General Trade': results[:, 4],
            'pred Direct Retail': results[:, 5],
            'pred Direct Sales': results[:, 6],
            'pred Sales Systems Support': results[:, 7]
        }) * employee_number

    print(results)

    return jsonify({
        'pred_BC': int(results.loc[0][0]),
        'pred_MO': int(results.loc[0][1]),
        'pred_DM': int(results.loc[0][2]),
        'pred_CC': int(results.loc[0][3]),
        'pred_IR': int(results.loc[0][4]),
        'pred_DR': int(results.loc[0][5]),
        'pred_DS': int(results.loc[0][6]),
        'pred_SS': int(results.loc[0][7])
    })
예제 #3
0
def yieldPred(request):
    if request.method == 'POST':
        form = YieldForm(request.POST)
        if form.is_valid():
            Crop = form.cleaned_data.get('Crop')
            Location = form.cleaned_data.get('Location')
            crop_detail = getCropDataPoint(Location, Crop)
            Input = crop_detail
            pred = 0
            model2 = KerasRegressor(build_fn=build_regressor,
                                    epochs=10,
                                    batch_size=10,
                                    verbose=1)

            if Crop == "Barley":
                model2.model = load_model(
                    'D:\\Projects\\Clone 16 Nov Capstone\\FarmAlert\\farm\\static\\Kerasmodels\\Barleymodelkeras.h5'
                )
            elif Crop == "Wheat":
                model2.model = load_model(
                    'D:\\Projects\\Clone 16 Nov Capstone\\FarmAlert\\farm\\static\\Kerasmodels\\Wheatmodelkeras.h5'
                )
            elif Crop == "Maize":
                model2.model = load_model(
                    'D:\\Projects\\Clone 16 Nov Capstone\\FarmAlert\\farm\\static\\Kerasmodels\\Maizemodelkeras.h5'
                )
            elif Crop == "Rice":
                model2.model = load_model(
                    'D:\\Projects\\Clone 16 Nov Capstone\\FarmAlert\\farm\\static\\Kerasmodels\\Ricemodelkeras.h5'
                )
            else:
                model2.model = load_model(
                    'D:\\Projects\\Clone 16 Nov Capstone\\FarmAlert\\farm\\static\\Kerasmodels\\Sugarcanemodelkeras.h5'
                )

            pred = model2.predict(Input)
            pred = pred[0]

            return render(request, 'services/yieldResult.html', {'data': pred})

    form = YieldForm()
    return render(request, 'services/yieldPred.html', {'form': form})
    def _load_model(self):
        def dummy():
            return

        model = KerasRegressor(build_fn=dummy,
                               epochs=1,
                               batch_size=10,
                               verbose=1)
        model.model = ld_mdl(self.model_dir + self.model_name + '.hdf5')

        self.model = model
예제 #5
0
def predict_list(x):

    root_dir = os.path.dirname(os.path.realpath(__file__))

    estimator = KerasRegressor(build_fn=build_by_loading,
                               nb_epoch=5000,
                               batch_size=50,
                               verbose=1)
    estimator.model = load_model(
        os.path.join(root_dir, "output", "model.please"))

    print("Fed to estimator")
    print(x)
    prediction = estimator.predict(x)
    scaler = joblib.load(os.path.join(root_dir, "output", "y_scaler.please"))
    return scaler.inverse_transform(prediction)
예제 #6
0
파일: models.py 프로젝트: bjur27r/dash_iot
def get_caudal(gra, p_h, pot):
    model2 = KerasRegressor(build_fn=larger_model,
                            epochs=10,
                            batch_size=10,
                            verbose=1)

    model2.model = load_model('modelBV1.h5')

    gra = gra / 1000
    # gra = 0.021
    # p_h = 340.000
    # pot = 4.00
    Xnew = np.array([[float(gra), float(p_h), float(pot)]])

    caudl = model2.predict(Xnew)
    print(caudl)
    return round(float(caudl), 2)
예제 #7
0
def predict_list(x):

    root_dir = os.path.dirname(os.path.realpath(__file__))
    epochs = int(config["hyperparameters"]["epochs"])
    batch_size = int(config["hyperparameters"]["batch_size"])
    estimator = KerasRegressor(build_fn=build_by_loading,
                               nb_epoch=epochs,
                               batch_size=batch_size,
                               verbose=1)
    estimator.model = load_model(
        os.path.join(root_dir, "output", "model.please"))

    print("Fed to estimator")
    print(x)
    prediction = estimator.predict(x)
    scaler = joblib.load(os.path.join(root_dir, "output", "y_scaler.please"))
    #return prediction
    return scaler.inverse_transform(prediction)
예제 #8
0
def predict_single_outcome(date, home_team, away_team, city, country):
    #get home path
    root_dir = os.path.dirname(os.path.realpath(__file__))

    print("Running prediction!")

    en = joblib.load(os.path.join(root_dir, "output", "label_encoder.please"))
    tournament = ["FIFA World Cup"]
    tournament = en.transform(tournament)[0]

    estimator = KerasRegressor(build_fn=build_by_loading,
                               nb_epoch=5000,
                               batch_size=50,
                               verbose=1)
    estimator.model = load_model(
        os.path.join(root_dir, "output", "model.please"))

    #cities from csv file
    cities = pd.read_csv(os.path.join(root_dir, "data", "cities.csv"))
    cities = cities.set_index(['city', 'country'])

    #countries from csv
    countries = pd.read_csv(os.path.join(root_dir, "data", "countries.csv"))
    coutries = countries.set_index(['country'])

    scaler = joblib.load(os.path.join(root_dir, "output", "y_scaler.please"))
    sc_X = joblib.load(os.path.join(root_dir, "output", "x_scaler.please"))

    #results = results[['date_int','lat','lng','lat_home','lng_home','lat_away','lng_away','home_score','away_score', 'pop_home', 'pop_away']]
    dt_date = dt.strptime(date, '%Y-%m-%d')
    hard_date = dt.strptime('1800-11-01', '%Y-%m-%d')
    date_delta = dt_date - hard_date
    date_int = date_delta.days

    #lookup lat/long of fixture
    row = cities.loc[city, country]
    lat = row.lat[0]
    lng = row.lng[0]

    #lookup lat/long of home
    home = countries.loc[home_team]
    lat_home = home.lat
    lng_home = home.lng
    pop_home = home[2]

    #lookup lat/long of away
    away = countries.loc[away_team]
    lat_away = away.lat
    lng_away = away.lng
    pop_away = away[2]
    try:
        travel = geodesic({lat, lng}, {lat_away, lng_away}).kilometers
        home_local = geodesic({lat, lng}, {lat_home, lng_home}).kilometers
    except:
        travel = 0
        home_local = 0
        pass

    data_frame = pd.DataFrame(columns=[
        'date_int', 'tournament', 'travel', 'home_local', 'pop_home',
        'pop_away'
    ])
    data_frame.loc[0] = [
        date_int, tournament, travel, home_local, pop_home, pop_away
    ]
    print("Fed to x-scaler")
    print(data_frame)

    x = sc_X.transform(data_frame)

    print("Fed to estimator")
    print(x)

    prediction = estimator.predict(x)

    prediction = prediction.reshape(1, -1)
    print("{0} vs {1}".format(home_team, away_team))
    print(scaler.inverse_transform(prediction))

def baseline_model():
    model = Sequential()
    model.add(Dense(30, input_dim=input_size, kernel_initializer='normal', activation='relu'))
    model.add(Dense(1, kernel_initializer='normal', activation="relu"))
    model.compile(loss='mean_squared_error', optimizer='adam')
    return model

estimators = []
with open('models/pss2_regressor_std_scaler_all.pickle', 'rb') as handle:
    std_scaler = pickle.load(handle)
    estimators.append(('standardize', std_scaler))

estimator = KerasRegressor(build_fn=baseline_model, epochs=100, batch_size=10, verbose=1)
estimator.model = load_model("models/model_pss2_regressor_all.h5")

estimators.append(('mlp', estimator))
pipeline = Pipeline(estimators)


# load dataset
pandas.set_option('display.max_colwidth', -1)
df = pandas.read_csv("data/pss2_ranking_global.tsv", delimiter='\t', header=0)

X = df.iloc[:, 3:194]
Y_tmp = df.iloc[:, 0]
Y = []

total_sents = len(Y_tmp)
for i in range(0,total_sents):
예제 #10
0
import numpy as np
from keras.models import model_from_json
from joblib import load
from ft_engineering import add_extra_features
from model_utils import bl_nn
from keras.wrappers.scikit_learn import KerasRegressor

app = Flask(__name__)

# load the model and standard scaler
sk_reg = KerasRegressor(build_fn=bl_nn(), epochs=100, batch_size=5, verbose=1)

scaler = load("static/bin/final_scaler.bin")
with open("static/json/final_model_reg.json", "r") as json_file:
    loaded_model_json = json_file.read()
sk_reg.model = model_from_json(loaded_model_json)
sk_reg.model.load_weights("static/h5/final_model_reg.h5")


# define a predict function as an endpoint
@app.route("/predict", methods=["GET", "POST"])
def predict():
    data = {"success": False}
    params = request.json
    if params is None:
        params = request.args

    # if parameters are found, return a prediction
    if params is not None:
        params = params.to_dict()
        print(params)
예제 #11
0
파일: monte.py 프로젝트: Ornamus/VillAInous
def get_model(model_type, train=False):
    if train:
        dataset = loadtxt('game.txt', delimiter=',')
        print(f"len: {len(dataset[0])}")
        X = dataset[:, 0:42]  #42
        Y = dataset[:, 42:]
        print(f"Top Y: {Y[0]}")

    def flat_model():
        model = Sequential()
        model.add(
            Dense(42,
                  input_dim=42,
                  kernel_initializer='normal',
                  activation='relu'))
        model.add(Dense(8, kernel_initializer='normal'))
        model.compile(loss='mean_squared_error', optimizer='adam')
        return model

    def baseline_model():
        model = models.Sequential()
        model.add(
            layers.Conv2D(32, (3, 3), activation='relu',
                          input_shape=(6, 7, 1)))
        model.add(layers.MaxPooling2D((2, 2)))
        #model.add(layers.Conv2D(64, (3, 3), activation='relu'))
        #model.add(layers.MaxPooling2D((2, 2)))
        #model.add(layers.Conv2D(32, (3, 3), activation='relu'))
        model.add(layers.Flatten())
        model.add(layers.Dense(64, activation='relu'))
        model.add(layers.Dense(8))

        model.compile(
            'adam',
            loss='mean_squared_error',
        )
        model.summary()
        return model

    if train and model_type == 1:
        boards = []
        for data in X:
            #print("=========Loading Board========")
            board = []
            prev = 0
            row_sum = 0
            for i in range(6):
                row = data[prev:(i + 1) * 7]
                board.append(row)
                prev = (i + 1) * 7
                #print(row)
                row_sum += len(row)
            boards.append(board)

        boards = np.asarray(boards)
        boards = boards.reshape(len(boards), 6, 7, 1)
        #print(boards)

    if model_type == 0:
        estimator = KerasRegressor(build_fn=flat_model,
                                   epochs=5000,
                                   batch_size=100,
                                   verbose=2)
        if train:
            estimator.fit(X, Y)
            estimator.model.save("Con4_Flat_Recent.h5")
        else:
            estimator.model = load_model(
                'Con4_Flat_Recent.h5')  #load_model('Con4_Flat_Best.h5')
        return estimator
    elif model_type == 1:
        estimator = KerasRegressor(build_fn=baseline_model,
                                   epochs=300,
                                   batch_size=100,
                                   verbose=2)
        if train:
            estimator.fit(boards, Y)
            estimator.model.save("Con4_Conv_Recent.h5")
        else:
            estimator.model = load_model('Con4_Conv_Recent.h5')
        return estimator
    return None
estimator.fit(X_train, y_train)
prediction = estimator.predict(X_test)
mean_absolute_percentage_error(y_test, prediction) #54.58289499388174

# Save the entire model as a SavedModel.
!mkdir -p saved_model
estimator.model.save('saved_model/my_model')

from keras.models import load_model

# Instantiate the model as you please (we are not going to use this)
model2 = KerasRegressor(build_fn=baseline_model, epochs=10, batch_size=10, verbose=1)

# This is where you load the actual saved model into new variable.
model2.model = load_model('saved_model/my_model')

# Now you can use this to predict on new data (without fitting model2, because it uses the older saved model)
model2.model.summary()

"""# Predicting on Test"""

test = pd.read_csv('test_FewQE9B.csv')
print(test.shape)

print(test.isnull().sum())
test['Outlet_Size'].fillna(test['Outlet_Size'].mode()[0], inplace=True)
# filling missing values of continuous variables with mean
test['Item_Weight'].fillna(test['Item_Weight'].mean(), inplace=True)
cat_cols = test.select_dtypes(include=['object']).columns.tolist()
# print(cat_cols)
예제 #13
0
def load(model_path):
    model = KerasRegressor(build_fn=baseline_model)
    model.model = load_model(model_path)
    return model
예제 #14
0
gra = 62.346
gra = 0.021
p_h = 340.000
pot = 4.00
Xnew = np.array([[0.021, 300.000, 4.00]])

result = pipeline.predict(Xnew)
print(result)

filename = '/home/bjur/PycharmProjects/invter_app/venv/data/finalized_modelB.sav'
#pickle.dump(pipeline, open(filename, 'wb'))
pipeline.model.save('modelBV1.h5')

from keras.models import load_model

# Instantiate the model as you please (we are not going to use this)
model2 = KerasRegressor(build_fn=larger_model,
                        epochs=10,
                        batch_size=10,
                        verbose=1)

model2.model = load_model('modelBV1.h5')

# Now you can use this to predict on new data (without fitting model2, because it uses the older saved model)
print(model2.predict(Xnew))

#import joblib
#joblib.dump(pipeline, 'model_bomb.sav')
#print('Saved %s pipeline to file' % pipe_dict[best_clf])
예제 #15
0
pandas.set_option('display.max_colwidth', -1)
# load dataset
df = pandas.read_csv("data/pss2_features_pairs_align.tsv",
                     delimiter='\t',
                     header=0)

estimators = []
with open('models/pss2_std_scaler_class.pickle', 'rb') as handle:
    std_scaler = pickle.load(handle)
    estimators.append(('standardize', std_scaler))

estimator = KerasRegressor(build_fn=baseline_model,
                           epochs=100,
                           batch_size=10,
                           verbose=1)
estimator.model = load_model("models/model_pss2_class.h5")

estimators.append(('mlp', estimator))
pipeline = Pipeline(estimators)

new_list = []
for i in range(0, len(df)):
    line_from = {
        'level': '',
        'text': df.iloc[i, 4],
        'feats': numpy.asanyarray(df.iloc[i, 5:194])
    }
    line_to = {
        'level': '',
        'text': df.iloc[i, 194],
        'feats': numpy.asanyarray(df.iloc[i, 195:384])
예제 #16
0
from keras.models import load_model
from keras.wrappers.scikit_learn import KerasRegressor
import numpy as np

import Network as ntw



model = KerasRegressor(build_fn=ntw.create_model, epochs=70, batch_size=5, verbose=0)
model.model = load_model('saved_model.h5')

ex = ntw.get_example('Codebook_cell8x8\\0person01118-30-30.npy')

ex = np.array([ex])
print(model.predict(ex))