コード例 #1
0
def load_model():
    app_settings = read_config()
    model_path = app_settings['model_path']
    model_file_name = app_settings['model_file']
    model_file_path = path.join(model_path, model_file_name)
    model = pickle.load(open(model_file_path, 'rb'))
    return model
コード例 #2
0
def load_data(filename, skus, country, category):
    app_settings = read_config()
    data_path = app_settings['data_path']
    data_file_path = path.join(data_path, filename)

    df = pd.read_csv(data_file_path)

    df.rename(columns={
        'Sku': 'sku',
        'Sales': 'actualVolume',
        'Week': 'forecastWeek',
        'Retailer': 'accountPlanningGroupCode',
        'Market': 'market',
        'Category': 'category'
    },
              inplace=True)

    cols = [
        'sku', 'actualVolume', 'forecastWeek', 'accountPlanningGroupCode',
        'market', 'category'
    ]

    df = df[df['market'] == country]
    df = df[df['category'] == category]
    df = df[df['sku'].isin(skus)]

    df = df[cols]

    df_sku_sales = df.groupby(['sku', 'forecastWeek'],
                              as_index=False)['actualVolume'].sum()
    df_sku_sales['category'] = category
    df_sku_sales['market'] = country

    return df_sku_sales, df
コード例 #3
0
def train():
    """
        Trains ARIMA model as per Region, State $ sku 
        & selects the best model and saves it
    """
    app_settings = read_config()
    data_path = app_settings['data_path']
    file = app_settings['file']
    file_path = path.join(data_path, file)
    print(file_path)
    #Load the data set
    df = load_data(file_path)

    #Extract the unique codes for filtering the over all data
    sku_group = df.groupby('Product_SKU', as_index=False)
    sku_list = sku_group.groups.keys()

    region_group = df.groupby('Region', as_index=False)
    region_list = region_group.groups.keys()

    state_group = df.groupby('State', as_index=False)
    state_list = state_group.groups.keys()

    #Loops for getting the conditional data  and fitting the model

    for Region in region_list:
        for state in state_list:
            for sku in sku_list:
                print('Current filtering condition :', Region, state, sku)

                # load the conditional data for each condition

                conditional_df = load_conditional_data(Region, state, sku, df)

                data = conditional_df.Sales.reset_index(drop=True)
                train, test = data[:330], data[330:365]
                # #################Fit model with some validation (cv) samples ##############

                arima = evaluate_models(train)

                # Save model to disk
                result_path = app_settings['result_path']
                model_file_name = Region + state + sku + ".Pickle"
                model_file_path = path.join(result_path, model_file_name)
                save_model_to_disk(arima, model_file_path)


# #################saving the model as pickel file ##############

    print(
        '____________________________________________________________________________________________'
    )
    print('Training completed')
コード例 #4
0
ファイル: samplee.py プロジェクト: Ashokswarna/Forecast
def train(filename):
    """
        Trains ARIMA model post least MSE per sku & selects the best model and saves it
    :return: None
    """

    app_settings = read_config()
    data_path = app_settings['data_path']
    file_path = path.join(data_path, filename)
    print(file_path)

    begin = 0
    end = 1

    df = pd.read_excel(file_path)
    #df.set_index('Order_date', inplace=True)

    # ExcelFile
    #df = pd.read_excel(file_path)
    #df = pd.read_excel(file_path)

    # Columns: Sku, Week, Sales

    sku_group = df.groupby('Product SKU', as_index=False)
    sku_list = sku_group.groups.keys()

    region_group = df.groupby('Region', as_index=False)
    region_list = region_group.groups.keys()

    state_group = df.groupby('State', as_index=False)
    state_list = state_group.groups.keys()

    sku_best_model = []
    for Region in region_list:
        for state in state_list:
            for sku in sku_list:
                print()
                print(sku)

                # Select SKU to train & validate model
                df_sku = df[df['Product SKU'].isin([sku])
                            & df['State'].isin([state])
                            & df['Region'].isin([Region])]
                period_index = 0
                best_period_models = []

                for tp in train_period:
                    print()
                    # print('Begin:%d End:%d' % (tp[0], tp[1]))
                    print()

                    # Select SKU data from beginning to end of train period
                    df_train_period = df_sku[
                        (df_sku['Order_date'] >= tp[begin])
                        & (df_sku['Order_date'] <= tp[end])]

                    # Select SKU data from beginning to end of in-time validation period
                    df_validation_period = df_sku[(
                        df_sku['Order_date'] >= validation_period[period_index]
                        [begin]) & (df_sku['Order_date'] <=
                                    validation_period[period_index][end])]

                    df_mse_period = df_sku[(
                        df_sku['Order_date'] >= mse_period[period_index][begin]
                    ) & (
                        df_sku['Order_date'] <= mse_period[period_index][end])]

                    print('%d train samples for %d period.' %
                          (len(df_train_period), (period_index + 1)))
                    print('%d validation samples for %d period.' %
                          (len(df_validation_period), (period_index + 1)))
                    print('%d mse samples for %d period.' %
                          (len(df_mse_period), (period_index + 1)))

                    # Select sales data for training & validation
                    train_sales = df_train_period['Sales'].reset_index(
                        drop=True)
                    validation_sales = df_validation_period[
                        'Sales'].reset_index(drop=True)
                    mse_sales = df_mse_period['Sales'].reset_index(drop=True)

                    train_valid_set = (train_sales, validation_sales,
                                       mse_sales)

                    # Evaluate best model of selected train period
                    #Added for my reference

                    print('Reference:', p_range, d_range, q_range)
                    #Above line
                    best_score, best_cfg, best_params, best_residuals, best_p, best_q, best_k_exog, best_k_trend, best_intercept, y_predict_log = evaluate_models(
                        train_valid_set, p_range, d_range, q_range)

                    best_period_model = {
                        'best_cfg': best_cfg,
                        'mse': best_score,
                        'sku': sku,
                        'week': (period_index + 1),
                        'residuals': best_residuals,
                        'p': best_p,
                        'q': best_q,
                        'k_exog': best_k_exog,
                        'k_trend': best_k_trend,
                        'State': state,
                        'Region': Region,
                        'params': best_params,
                        'intercept': best_intercept
                    }
                    best_period_models.append(best_period_model)
                    period_index += 1

                # Select best model in entire perio
                best_model = find_best_model(best_period_models)

                # Add to best models list
                sku_best_model.append(best_model)
    print(
        '____________________________________________________________________________________________'
    )
    print(
        '____________________________________________________________________________________________'
    )

    # Save model to disk
    model_path = app_settings['model_path']

    file_parts = filename.split('.')
    # model_file_name = file_parts[0] + '_HyperParameters.pickle'
    model_file_name = 'demand_forecast.pickle'

    model_file_path = path.join(model_path, model_file_name)
    save_model_to_disk(model_file_path, sku_best_model)

    print('Training completed')
Created on 3 Mar 2018
Project: forecastlibrary
File: forecast_service
Author: prasenjit.giri
Copyright: Accenture AI
"""

from flask import Flask, request, jsonify
from flask_cors import CORS, cross_origin
from forecast import forecast_oot
from app_settings import read_config
from forecast_service_helper import parse_json

app = Flask(__name__)
CORS(app)


@app.route('/df/v1/forecast', methods=['POST'])
@cross_origin()
def forecast():
    json_string = request.get_json()
    req_dict = parse_json(json_string)
    result = forecast_oot(req_dict)
    return jsonify(result)


if __name__ == '__main__':
    app_settings = read_config()
    port = int(app_settings['port'])
    app.run(port=port, debug=True)