def demand_var(info_rv):
    pdf = sum([
        dirac.f * pacal.PoissonDistr(dirac.a).get_piecewise_pdf()
        for dirac in info_rv.get_piecewise_pdf().getDiracs()
    ])
    d_rv = pacal.DiscreteDistr([d.a for d in pdf.getDiracs()],
                               [d.f for d in pdf.getDiracs()])
    return d_rv.var()
def test_case_var(info_states):
    weight = 1 / len(info_states)
    pdf = sum([
        weight * pacal.PoissonDistr(info_state).get_piecewise_pdf()
        for info_state in info_states
    ])
    d_rv = pacal.DiscreteDistr([d.a for d in pdf.getDiracs()],
                               [d.f for d in pdf.getDiracs()])
    return d_rv.var()
Example #3
0
def geometric_rv(p):
    trunk = 1e-3
    limit = 0
    while (1-p)**(limit+1) > trunk:
        limit += 1
    limit += 1
    values = list(range(limit))
    probs = list(p*(1-p)**k for k in values)
    scale = 1 / sum(probs)
    probs = list(p * scale for p in probs)
    return pacal.DiscreteDistr(values, probs)
Example #4
0
from scm_optimization.model import ModelConfig, run_configs, PoissonUsageModel, BinomUsageModel, DeterministUsageModel
import pacal
from decimal import *


rv_0 = pacal.ConstDistr(0)
rv_5_5 = pacal.DiscreteDistr([5], [1])
rv_3_7 = pacal.DiscreteDistr([3, 7], [0.5, 0.5])
rv_1_9 = pacal.DiscreteDistr([1, 9], [0.5, 0.5])
rv_0_10 = pacal.DiscreteDistr([0, 10], [0.5, 0.5])

rv_10_10 = pacal.DiscreteDistr([10], [1])
rv_8_12 = pacal.DiscreteDistr([8, 12], [0.5, 0.5])
rv_7_13 = pacal.DiscreteDistr([7, 13], [0.5, 0.5])
rv_6_14 = pacal.DiscreteDistr([6, 14], [0.5, 0.5])
rv_5_15 = pacal.DiscreteDistr([5, 15], [0.5, 0.5])
rv_4_16 = pacal.DiscreteDistr([4, 16], [0.5, 0.5])
rv_2_18 = pacal.DiscreteDistr([2, 18], [0.5, 0.5])
rv_0_20 = pacal.DiscreteDistr([0, 20], [0.5, 0.5])
rv_8_16 = pacal.DiscreteDistr([8, 16], [0.5, 0.5])


rv_0_1 = pacal.DiscreteDistr([0, 1], [0.5, 0.5])
rv_0_2 = pacal.DiscreteDistr([0, 2], [0.5, 0.5])
rv_0_3 = pacal.DiscreteDistr([0, 3], [0.5, 0.5])


def geometric_rv(p):
    trunk = 1e-3
    limit = 0
    while (1-p)**(limit+1) > trunk:
Example #5
0
from scm_optimization.model import StationaryOptModel
import plotly
import pacal
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot


gamma = 0.9
lead_time = 0
horizon = 2
info_state_rvs = [pacal.ConstDistr(0),
                  pacal.ConstDistr(0),
                  pacal.DiscreteDistr([1, 2, 3, 4], [0.25, 0.25, 0.25, 0.25])]

info_state_rvs = [pacal.DiscreteDistr([1, 2, 3, 4], [0.25, 0.25, 0.25, 0.25]),
                  pacal.ConstDistr(0),
                  pacal.ConstDistr(0)]

holding_cost = 1
backlogging_cost = 10
setup_cost = 5
unit_price = 0

model = StationaryOptModel(gamma,
                           lead_time,
                           horizon,
                           info_state_rvs,
                           holding_cost,
                           backlogging_cost,
                           setup_cost,
                           unit_price)
Example #6
0
    def __init__(self,
                 gamma,
                 lead_time,
                 info_state_rvs,
                 holding_cost,
                 backlogging_cost,
                 setup_cost,
                 unit_price,
                 usage_model=None,
                 increments=1,
                 detailed=False):

        # parameters in order:
        # single period discount factor
        # lead time for items to arrive, >= 0
        # information horizon N >= 0, N = 0 for no advanced information
        # vector of random variables, transition of the state of advanced information, M_{t, s} in notation
        self.detailed = detailed
        self.gamma = gamma
        self.lead_time = lead_time
        self.info_state_rvs = info_state_rvs
        self.increments = increments

        # usage_model = lambda o: pacal.BinomialDistr(o, p=0.5)
        # usage_model = lambda o: pacal.ConstDistr(o)
        # usage_model = usage_model=pacal.PoissonDistr
        default_usage_model = PoissonUsageModel(scale=1)
        self.usage_model = usage_model if usage_model else default_usage_model

        self.h = holding_cost
        self.b = backlogging_cost
        self.k = setup_cost
        self.c = unit_price

        # static list of possible info states
        self.info_states_cache = None
        self.info_states_prob_cache = {}

        # all caches
        self.value_function_j = {}
        self.j_h = {}
        self.j_b = {}
        self.j_k = {}
        self.j_p = {}

        self.value_function_v = {}
        self.v_h = {}
        self.v_b = {}
        self.v_k = {}
        self.v_p = {}

        self.value_function_v_argmin = {}
        self.base_stock_level_cache = {}
        self.current_demand_cache = {}

        self.reward_funcion_g_cache = {}
        self.g_h = {}
        self.g_b = {}
        self.g_p = {}

        ### Apppend Const(0) to info_state_rvs if leadtime > info_horizon
        if len(self.info_state_rvs) < self.lead_time + 1:
            diff = self.lead_time - len(self.info_state_rvs) + 1
            self.info_state_rvs = self.info_state_rvs + diff * [
                pacal.ConstDistr(0)
            ]

        unknown_lt_info = sum(self.info_state_rvs[i]
                              for j in range(self.lead_time + 1)
                              for i in range(j + 1))
        if len(unknown_lt_info.get_piecewise_pdf().getDiracs()) == 1:
            unknown_lt_info = unknown_lt_info.get_piecewise_pdf().getDiracs(
            )[0].a
            if unknown_lt_info:
                self.unknown_lt_demand_rv = self.usage_model.usage(
                    unknown_lt_info)
            else:
                self.unknown_lt_demand_rv = 0
        else:
            unknown_lt_demand_pdf = sum([
                dirac.f * self.usage_model.usage(dirac.a).get_piecewise_pdf()
                for dirac in unknown_lt_info.get_piecewise_pdf().getDiracs()
            ])
            self.unknown_lt_demand_rv = pacal.DiscreteDistr(
                [dirac.a for dirac in unknown_lt_demand_pdf.getDiracs()],
                [dirac.f for dirac in unknown_lt_demand_pdf.getDiracs()])

        unknown_info = self.info_state_rvs[0]
        if len(unknown_info.get_piecewise_pdf().getDiracs()) == 1:
            val = self.info_state_rvs[0].get_piecewise_pdf().getDiracs()[0].a
            if val:
                self.unknown_demand_rv = self.usage_model.usage(val)
            else:
                self.unknown_demand_rv = 0
        else:
            unknown_demand_pdf = sum([
                dirac.f * self.usage_model.usage(dirac.a).get_piecewise_pdf()
                for dirac in unknown_info.get_piecewise_pdf().getDiracs()
            ])
            self.unknown_demand_rv = pacal.DiscreteDistr(
                [dirac.a for dirac in unknown_demand_pdf.getDiracs()],
                [dirac.f for dirac in unknown_demand_pdf.getDiracs()])
        self.info_states()
def boostrap_info_process(item_id="38242"):
    case_service = "Cardiac Surgery"
    #item_id = "3824ns_info_state_rvs2"
    info_granularity = 1
    eps_trunk = 1e-3

    elective_outdir = "scm_implementation/ns_info_state_rvs/elective"
    emergency_outdir = "scm_implementation/ns_info_state_rvs/emergency"

    analytics = ScmAnalytics.ScmAnalytics(lhs_config)

    filters = [{
        "dim": "case_service",
        "op": "eq",
        "val": case_service
    }, {
        "dim": "urgent_elective",
        "op": "eq",
        "val": "Elective"
    }]
    elective_filter = [{
        "dim": "urgent_elective",
        "op": "eq",
        "val": "Elective"
    }]
    emergency_filter = [{
        "dim": "urgent_elective",
        "op": "eq",
        "val": "Urgent"
    }]
    case_service_filter = [{
        "dim": "case_service",
        "op": "eq",
        "val": case_service
    }]

    surgery_df = pre_process_columns(analytics.surgery_df)
    surgery_df = surgery_df[surgery_df["start_date"].notna()]
    surgery_df = surgery_df[
        surgery_df["start_date"] > datetime.date(2016, 1, 1)]
    surgery_df = Analytics.process_filters(surgery_df,
                                           filters=elective_filter +
                                           case_service_filter)
    dist_df = surgeries_per_day_distribution(surgery_df,
                                             day_group_by="is_weekday",
                                             filters=[])
    data = dist_df.set_index("is_weekday").loc[True]["data"]
    bins = range(1 + int(max(data)))
    binom_x = [x + 0.5 for x in bins]
    n = int(max(data))
    p = np.mean(data) / n

    surgery_df = pre_process_columns(analytics.surgery_df)
    surgery_df = surgery_df[surgery_df["start_date"].notna()]
    surgery_df = surgery_df[
        surgery_df["start_date"] > datetime.date(2016, 1, 1)]
    surgery_df = Analytics.process_filters(surgery_df,
                                           filters=emergency_filter +
                                           case_service_filter)
    dist_df = surgeries_per_day_distribution(surgery_df, filters=[])
    emergency_surgeries_mean = np.mean(dist_df)

    surgery_df = Analytics.process_filters(analytics.surgery_df,
                                           filters=case_service_filter)
    surgery_df["procedure_count"] = surgery_df["procedures"].apply(
        lambda x: len(x))
    procedure_count_df = surgery_df.groupby("procedure_count").agg({
        "event_id":
        "count"
    }).reset_index()
    procedure_count_df = procedure_count_df[
        procedure_count_df["procedure_count"] != 6]
    procedure_count_df["p"] = procedure_count_df["procedure_count"] / sum(
        procedure_count_df["procedure_count"])
    procedure_count_rv = pacal.DiscreteDistr(
        procedure_count_df["procedure_count"], procedure_count_df["p"])
    """
    Procedure weights
    """
    usage_events = set(analytics.usage_df["event_id"])
    surgery_df = analytics.surgery_df[analytics.surgery_df["event_id"].isin(
        usage_events)]
    surgery_df = Analytics.process_filters(surgery_df,
                                           filters=case_service_filter)
    surgery_df["procedures"] = surgery_df["procedures"].apply(
        lambda x: set(e.replace(" ", "_") for e in x))
    procedures = surgery_df["procedures"].apply(lambda x: list(x)).to_list()
    procedures = pd \
        .DataFrame({"procedure": [val for sublist in procedures for val in sublist],
                    "count": [1 for sublist in procedures for val in sublist]}) \
        .groupby("procedure") \
        .agg({"count": "count"}) \
        .reset_index()

    procedures["p"] = procedures["count"] / sum(procedures["count"])

    def procedure_pick_rv(size):
        return np.random.choice(procedures["procedure"],
                                p=procedures["p"],
                                replace=False,
                                size=size)

    synthetic_surgeries = pd.DataFrame({"event_id": list(range(1000))})
    synthetic_surgeries["procedure_count"] = procedure_count_rv.rand(1000)
    synthetic_surgeries["procedures"] = synthetic_surgeries[
        "procedure_count"].apply(lambda x: procedure_pick_rv(x))

    synthetic_procedure_df = pd.concat(
        [pd.Series(row['event_id'], row['procedures']) for _, row in synthetic_surgeries.iterrows()]) \
        .reset_index() \
        .rename(columns={"index": "procedure",
                         0: "event_id"}
                )
    synthetic_procedure_df["flag"] = 1
    synthetic_surgeries_df = synthetic_procedure_df \
        .pivot(index="event_id", columns="procedure", values="flag") \
        .fillna(0) \
        .reset_index()

    feature_df = pd.read_csv(os.path.join("regression_results", item_id))
    features = feature_df["feature"]
    featured_procedures = list(
        filter(lambda x: "." not in x, feature_df["feature"]))
    if "other" in featured_procedures:
        featured_procedures.remove("other")
    for fp in featured_procedures:
        if fp not in synthetic_surgeries_df:
            print(procedures.set_index("procedure").loc[fp])
            synthetic_surgeries_df[fp] = 0

    all_procedures = set.union(*surgery_df["procedures"])

    interactions = list(filter(lambda x: "." in x, feature_df["feature"]))
    interactions = list(Interaction(i.split(".")) for i in interactions)
    data, _ = SURegressionModel.extract_features_data(synthetic_surgeries_df,
                                                      featured_procedures, [],
                                                      interactions,
                                                      other=True)

    for f in feature_df["feature"]:
        if f not in data:
            print(f)
            data[f] = 0
    synthetic_surgeries_df["feature_vector"] = data[features].values.tolist()
    coeff = np.array(feature_df["estimate"])
    synthetic_surgeries_df["expected_usage"] = synthetic_surgeries_df["feature_vector"] \
        .apply(lambda x: np.exp(np.dot(x, coeff)))
    """
    Information rv for empirical surgeries
    """
    surgery_df = surgery_df.drop_duplicates("event_id", keep="last")
    empirical_procedure_df = pd.concat(
        [pd.Series(row['event_id'], row['procedures']) for _, row in surgery_df.iterrows()]) \
        .reset_index() \
        .rename(columns={"index": "procedure",
                         0: "event_id"}
                )
    empirical_procedure_df["flag"] = 1
    empirical_surgeries_df = empirical_procedure_df \
        .pivot(index="event_id", columns="procedure", values="flag") \
        .fillna(0) \
        .reset_index()
    data, _ = SURegressionModel.extract_features_data(empirical_surgeries_df,
                                                      featured_procedures, [],
                                                      interactions,
                                                      other=True)
    empirical_surgeries_df["feature_vector"] = data[features].values.tolist()
    empirical_surgeries_df["expected_usage"] = empirical_surgeries_df["feature_vector"] \
        .apply(lambda x: np.exp(np.dot(x, coeff)))
    """
    Plotly histogram for per surgery info rv, empirical surgeries and synthetic using regression results 
    """
    s = 0
    e = int(
        max(max(empirical_surgeries_df["expected_usage"]),
            max(synthetic_surgeries_df["expected_usage"])) + 1)
    empirical_trace = go.Histogram(
        x=empirical_surgeries_df["expected_usage"],
        name='Empirical Surgery Info RV (mean={:0.2f})'.format(
            np.mean(empirical_surgeries_df["expected_usage"])),
        xbins=dict(start=s, end=e, size=info_granularity),
        histnorm='probability density',
        opacity=0.75)
    synthetic_trace = go.Histogram(
        x=synthetic_surgeries_df["expected_usage"],
        name='Synthetic Surgery Info RV (mean={:0.2f})'.format(
            np.mean(synthetic_surgeries_df["expected_usage"])),
        xbins=dict(start=s, end=e, size=info_granularity),
        histnorm='probability density',
        opacity=0.75)
    layout = go.Layout(title="Per Surgery Info R.V Item: {0}".format(item_id),
                       xaxis={'title': 'Info [Expected Usage]'},
                       yaxis={'title': 'Probability Density'})
    figure = go.Figure(data=[empirical_trace, synthetic_trace], layout=layout)
    plot(figure, filename="{0}_Per_Surgery_Info_Rv.html".format(item_id))
    """
    Plotly histogram for per weekday elective surgery RV
    """
    empirical_rv_df = empirical_surgeries_df.groupby(["expected_usage"]) \
        .agg({"event_id": "count"}) \
        .rename(columns={"event_id": "count"}) \
        .reset_index()
    empirical_rv_df["p"] = empirical_rv_df["count"] / sum(
        empirical_rv_df["count"])
    emp_surgery_rv = pacal.DiscreteDistr(empirical_rv_df["expected_usage"],
                                         empirical_rv_df["p"])
    surgery_demand_rv = pacal.BinomialDistr(n, p)
    days = 100000
    elective_samples = [
        sum(emp_surgery_rv.rand(x)) for x in np.random.binomial(n, p, days)
    ]
    elective_samples = [
        round(sample / info_granularity) * info_granularity
        for sample in elective_samples
    ]
    weekday_elective_trace = go.Histogram(
        x=elective_samples,
        name='{} Elective Info RV (mean={:0.2f})'.format(
            item_id, np.mean(elective_samples)),
        xbins=dict(start=0, end=max(elective_samples), size=info_granularity),
        histnorm='probability',
        opacity=0.75)
    """
    Plotly histogram for per day emergency surgery RV
    """
    emergency_samples = [
        sum(emp_surgery_rv.rand(x))
        for x in np.random.poisson(emergency_surgeries_mean, days)
    ]
    emergency_samples = [
        round(sample / info_granularity) * info_granularity
        for sample in emergency_samples
    ]
    emergency_trace = go.Histogram(
        x=emergency_samples,
        name='{} Emergency Info RV (mean={:0.2f})'.format(
            item_id, np.mean(emergency_samples)),
        xbins=dict(start=0, end=max(emergency_samples), size=info_granularity),
        histnorm='probability',
        opacity=0.75)
    layout = go.Layout(
        title="Weekday Elective Info R.V Item: {0}".format(item_id),
        xaxis={'title': 'Info State (Poisson Usage)]'},
        yaxis={'title': 'Probability'})
    figure = go.Figure(data=[weekday_elective_trace, emergency_trace],
                       layout=layout)
    plot(figure, filename="{0}_Weekday_Elective_Info_Rv.html".format(item_id))

    elective_info_df = pd.DataFrame({"info": elective_samples, "count": [1] * len(elective_samples)}) \
        .groupby(["info"]) \
        .agg({"count": "count"}) \
        .reset_index()
    elective_info_df["p"] = elective_info_df["count"] / sum(
        elective_info_df["count"])
    elective_info_rv = pacal.DiscreteDistr(elective_info_df["info"],
                                           elective_info_df["p"])

    emergency_info_df = pd.DataFrame({"info": emergency_samples, "count": [1] * len(emergency_samples)}) \
        .groupby(["info"]) \
        .agg({"count": "count"}) \
        .reset_index()
    emergency_info_df["p"] = emergency_info_df["count"] / sum(
        emergency_info_df["count"])
    emergency_info_rv = pacal.DiscreteDistr(emergency_info_df["info"],
                                            emergency_info_df["p"])

    max_v = 999
    for d in elective_info_rv.get_piecewise_pdf().getDiracs():
        if 1 - elective_info_rv.cdf(d.a) < eps_trunk:
            max_v = d.a
            break
    diracs = (pacal.CondLtDistr(elective_info_rv, max_v)) \
        .get_piecewise_pdf().getDiracs()
    diracs = list(filter(lambda d: d.f > 0, diracs))
    elective_info_rv = pacal.DiscreteDistr([d.a for d in diracs],
                                           [d.f for d in diracs])

    max_v = 999
    for d in emergency_info_rv.get_piecewise_pdf().getDiracs():
        if 1 - emergency_info_rv.cdf(d.a) < eps_trunk:
            max_v = d.a
            break
    diracs = (pacal.CondLtDistr(emergency_info_rv, max_v)) \
        .get_piecewise_pdf().getDiracs()
    diracs = list(filter(lambda d: d.f > 0, diracs))
    emergency_info_rv = pacal.DiscreteDistr([d.a for d in diracs],
                                            [d.f for d in diracs])

    with open(os.path.join(elective_outdir, "{0}.pickle".format(item_id)),
              "wb") as f:
        pickle.dump(elective_info_rv, f)

    with open(os.path.join(emergency_outdir, "{0}.pickle".format(item_id)),
              "wb") as f:
        pickle.dump(emergency_info_rv, f)

    return emergency_trace, weekday_elective_trace
from scm_optimization.model import StationaryOptModel
import plotly
import pacal
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import itertools
import pandas as pd

inv_pos_max = 30
t_max = 15
n_max = 5

rv_0 = pacal.ConstDistr(0)
rv_5_5 = pacal.DiscreteDistr([5], [1])
rv_3_7 = pacal.DiscreteDistr([3, 7], [0.5, 0.5])
rv_1_9 = pacal.DiscreteDistr([1, 9], [0.5, 0.5])
rv_0_10 = pacal.DiscreteDistr([0, 10], [0.5, 0.5])
rv_uni_0_9 = pacal.DiscreteDistr([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [1 / 10] * 10)

test_cases = {
    "E[Demand] 5": rv_5_5,
    "E[Demand] 3, 7": rv_3_7,
    "E[Demand] 1, 9": rv_1_9,
    "E[Demand] 0, 10": rv_0_10
}

gamma = 0.9
lead_time = 0
holding_cost = 1
backlogging_cost = 10
setup_cost = 5
from scm_optimization.model import ModelConfig, run_configs, BinomUsageModel, DeterministUsageModel, get_model
import pacal
from numpy import mean
from decimal import *
import copy

rv_0_1 = pacal.DiscreteDistr([0, 1], [0.5, 0.5])
rvs = [rv_0_1]

configs = []
i = 0

h = 50
b = 50
usage_n = 1
usage_p = 0.6
info_n = 1
info_p = 0.5

params = {"h": 50,
          "b": 50,
          "usage_n": 3,
          "usage_p": 0.6}


def get_neighbours(params):
    int_params = {"h", "b", "usage_n"}
    prob_params = {"usage_p"}
    neighbours = []
    for param in params:
        neighbour = copy.copy(params)