コード例 #1
0
def main():
    df = load_data('src/data/data.csv')
    with st.beta_container():
        st.subheader('Gender Distribution')
        figure = px.sunburst(df,
                             names=df['Gender'],
                             parent=df['Membership Type'],
                             value=df['Lifetime Days'].value_counts())
        st.plotly_chart(figure_or_data=figure)
コード例 #2
0
ファイル: data.py プロジェクト: felixodette/DataAnalyst
def main():
    img = Image.open('assets/logo.png')
    st.image(img)
    st.title('Data Snapshot')
    date = datetime.today()
    df = None
    load_state = st.text('Loading data.......')
    while True:
        try:
            df = load_data('src/data/data.csv')
        except Exception as e:
            print('File not found')
            continue
        break
    load_state.text('Loading data.......done!')

    # if st.checkbox('Member population distribution map', value=True):
    #     with st.beta_container():
    #         with st.spinner('Rendering chart...'):
    #             world_map = folium.Map(tiles="OpenStreetMap", zoom_start=105)
    #             marker_cluster = MarkerCluster().add_to(world_map)
    #             # for each coordinate, create circlemarker of user percent
    #             data = df[['Latitude', 'Longitude']].dropna()
    #             for i in range(len(data)):
    #                 lat = data.iloc[i]['Latitude']
    #                 long = data.iloc[i]['Longitude']
    #                 radius = 5
    #                 popup_text = """Country : {df.iloc[i]['Country']}<br>%of Users : {}<br>"""
    #                 # popup_text = popup_text.format(df.iloc[i]['Country'],
    #                 #                         df.iloc[i]['Gender']
    #                 #                         )
    #                 folium.CircleMarker(location=[lat, long], radius=radius, popup=popup_text, fill=True).add_to(marker_cluster)
    #
    #             # call to render Folium map in Streamlit
    #             folium_static(world_map)
    if st.checkbox('Show raw data', value=True):
        st.subheader('Raw data')
        st.write(df)
    st.subheader('Quick facts')
    st.markdown(f'* The Room has members in {len(df["Continent"].unique())} continents and {len(df["Country"].unique())} countries and {len(df["City"].unique())} cities around the world.')
    st.markdown(f'* There are {len(df["Customer Segment"].unique())} customer segments, {len(df["Membership Type"].unique())} membership types, and {len(df["Membership Level"].unique())} membership levels.')
    st.markdown(f'* Membership consists of {len(df[df["Gender"] == "male"])} males and {len(df[df["Gender"] == "female"])} females.')
コード例 #3
0
def main():
    st.title("Dashboard")
    pio.templates.default = "plotly_dark"
    date = datetime.today()
    DATA_URL = ""
    df = None
    while True:
        try:
            df = load_data(fetch_url(date))
        except:
            date = date - timedelta(days=1)
            continue
        break
    df = df[df["Country_Region"] == "US"]
    state = st.sidebar.selectbox(
        "Choose state", sorted(df["Province_State"].unique().tolist()))
    st.subheader(state)
    viz = st.selectbox("Choose visualization", ["Summary", "Top Cities"])
    if viz == "Summary":
        st.plotly_chart(state_summary(df, state))
    elif viz == "Top Cities":
        st.plotly_chart(plot_state(df, state))
コード例 #4
0
def main():
    # Setting
    state_dim = 140
    action_dim = 40
    state_order = 5
    action_order = 5
    alpha = 0.001  # Workset smoothness
    time_limit = 5  # seconds
    train_data_path = [
        'docs/new_data/expert/data_1.csv', 'docs/new_data/expert/data_2.csv'
    ]

    m = get_reparam_multi_linear_model(state_dim, action_dim, state_order,
                                       action_order)
    model_filename = 'model/Multistep_linear/model_reparam_55.pt'
    m.load_state_dict(torch.load(model_filename, map_location=device))
    m.eval()

    train_states, train_actions, info = load_data(paths=train_data_path,
                                                  scaling=True,
                                                  preprocess=True,
                                                  history_x=state_order,
                                                  history_u=action_order)
    scaler = (info['scale_min'].item(), info['scale_max'].item())

    print('Min. state scaler: {}, Max. state scaler: {}'.format(
        scaler[0], scaler[1]))
    print('Min. action scaler: {}, Max. action scaler: {}'.format(
        scaler[0], scaler[1]))

    optimizer_mode = 'LBFGS'

    initial_temp = 150.0
    heatup_times = [545]
    anneal_times = [363]
    target_temp = [375.0]
    target = generate_reference(initial_temp=initial_temp,
                                heatup_times=heatup_times,
                                anneal_times=anneal_times,
                                target_temps=target_temp)
    target = torch.reshape(torch.tensor(target).to(device),
                           shape=(-1, 1)).repeat(
                               repeats=(1, state_dim))  # [908 x 140]
    target = (target - scaler[0]) / (scaler[1] - scaler[0]
                                     )  # Change scale into 0-1

    H = 50
    T = target.shape[0]

    history_tc = np.ones(
        (state_order,
         state_dim)) * 150  # Please fill the real data, 0~139: glass TC
    history_ws = np.ones(
        (action_order - 1,
         action_dim)) * 150  # Please fill the real data, 0~39: workset
    runner = Runner(m=m,
                    optimizer_mode=optimizer_mode,
                    state_scaler=scaler,
                    action_scaler=scaler,
                    alpha=alpha,
                    timeout=time_limit)
    """x0 = torch.zeros((state_order, state_dim)).to(device)
    u0 = torch.zeros((action_order-1, action_dim)).to(device)
    us = torch.zeros((H, action_dim)).to(device)
    sample_predicted = m.multi_setp_prediction(x0, u0, us)
    print(sample_predicted.shape)"""

    for t in range(T - H):
        print("Now time [{}] / [{}]".format(t, T - H))
        start = time.time()
        workset = runner.solve(history_tc, history_ws, target[
            t:t +
            H, :])  # [1 x 40] torch.Tensor, use this workset to the furnace
        end = time.time()
        print('Time computation : {}'.format(end - start))
        with torch.no_grad():
            x0 = torch.from_numpy(history_tc).float().to(device)
            u0 = torch.from_numpy(history_ws).float().to(device)
            x0 = (x0 - scaler[0]) / (scaler[1] - scaler[0])
            u0 = (u0 - scaler[0]) / (scaler[1] - scaler[0])
            observed_tc = m.multi_step_prediction(
                x0, u0, workset).cpu().detach().numpy()  # [1 x 140]

            observed_tc = observed_tc * (scaler[1] -
                                         scaler[0]) + scaler[0]  # unscaling
        workset = workset * (scaler[1] - scaler[0]) + scaler[0]
        workset = workset.cpu().detach().numpy()  # [1 x 40] numpy.array
        now_target = target[t, 0] * (scaler[1] - scaler[0]) + scaler[0]
        print('Target tc is {}'.format(now_target))
        print('Average tc is {}'.format(observed_tc.mean()))
        print('Average ws is {}'.format(workset.mean()))
        history_tc = np.concatenate([history_tc[1:, :], observed_tc], axis=0)
        history_ws = np.concatenate([history_ws[1:, :], workset], axis=0)
コード例 #5
0
def main():
    pio.templates.default = 'plotly_dark'
    df = None
    while True:
        try:
            df = load_data('src/data/data.csv')
        except Exception as e:
            print('File not found')
            continue
        break
    granularity = st.sidebar.selectbox("Granularity",
                                       ["Worldwide", "Continent", "Country"])
    if granularity == "Worldwide":
        viz = [
            'Global Member Distribution', 'Gender Distribution',
            'Membership Type', 'Membership Level'
        ]
        choice = st.sidebar.selectbox('Choose Visualization', viz)
        st.title("Member Distribution")
        if choice == 'Global Member Distribution':
            conti = pd.DataFrame(
                df.groupby('Continent')['Gender'].value_counts()).rename(
                    columns={'Gender': 'Numbers'})
            conti = conti.unstack(level=0)
            conti.columns = conti.columns.droplevel([0])
            conti = conti.rename_axis([None], axis=1).reset_index()
            with st.beta_container():
                with st.spinner('Rendering chart...'):
                    world_map = folium.Map(tiles="OpenStreetMap",
                                           zoom_start=2000)
                    marker_cluster = MarkerCluster().add_to(world_map)
                    data = df[['Latitude', 'Longitude', 'Country',
                               'Gender']].dropna()
                    for i in range(len(data)):
                        lat = data.iloc[i]['Latitude']
                        long = data.iloc[i]['Longitude']
                        radius = 5
                        # popup_text = f"Country : {data.iloc[i]['Country']}<br>%of Users : {}<br>"
                        # popup_text = popup_text.format(data.iloc[i]['Country'], data.iloc[i]['Gender'])
                        folium.CircleMarker(location=[lat, long],
                                            radius=radius,
                                            fill=True).add_to(marker_cluster)

                    # call to render Folium map in Streamlit
                    folium_static(world_map)
                if st.checkbox('Show raw data'):
                    st.write(conti)
                expander = st.beta_expander('Analysis')
                expander.write(
                    f'Africa has the highest membership rate at {round(((669+708)/1707)*100, 2)}%, followed by North America {round(((71+95)/1707)*100, 2)}%, Europe {round(((669+708)/1707)*100, 2)}%, Asia {round((20/1707)*100, 2)}% and finally Oceania {round((2/1707)*100, 2)}%'
                )
        elif choice == 'Gender Distribution':
            with st.beta_container():
                st.subheader('Worldwide Member Gender Distribution')
                data = cat_numbers(df, 'Gender')
                pie_chart(data, 'Gender', 'Global Member Gender Distribution')
                expander = st.beta_expander('Analysis')
                expander.write(
                    f"The Room's membership consists of 52.1% males and 47.9% females."
                )
        elif choice == 'Membership Type':
            mem = pd.DataFrame(
                df.groupby('Gender')['Membership Type'].value_counts()).rename(
                    columns={'Membership Type': 'Number'})
            mem = mem.unstack(level=1)
            mem.columns = mem.columns.droplevel([0])
            mem = mem.rename_axis([None], axis=1).reset_index()
            mem.drop(columns='NONE')
            with st.beta_container():
                fig = px.bar(mem,
                             x='Gender',
                             y=[
                                 'Founding Member', 'Free Trial',
                                 'Premium Paying', 'Staff Membership'
                             ])
                fig.update_layout(barmode='group')
                st.plotly_chart(figure_or_data=fig)
                expander = st.beta_expander('Analysis')
                expander.write(
                    f'A majority, 55.5%, of members hold Free Trial accounts (53.4% female and 57.3%). '
                    f'11.6% on the Premium Paying Plan, 20.3% are Founding Members and 12.1% are on Staff '
                    f'Membership.')
        elif choice == 'Membership Level':
            mlevel = pd.DataFrame(
                df.groupby('Membership Level')['Gender'].value_counts()
            ).rename(columns={'Membership Level': 'Number'})
            mlevel = mlevel.unstack(level=1)
            mlevel.columns = mlevel.columns.droplevel([0])
            mlevel = mlevel.rename_axis([None], axis=1).reset_index()
            mlevel['male%'] = round(
                mlevel['male'] / (mlevel['male'] + mlevel['female']) * 100, 2)
            mlevel['female%'] = round(
                mlevel['female'] / (mlevel['male'] + mlevel['female']) * 100,
                2)
            with st.beta_container():
                fig = px.bar(mlevel,
                             x='Membership Level',
                             y=['female%', 'male%'])
                fig.update_layout(
                    title_text="Membership Level Distribution",
                    barmode="stack",
                    uniformtext=dict(mode="hide", minsize=10),
                )
                st.plotly_chart(figure_or_data=fig)
                expander = st.beta_expander('Analysis')
                expander.write(
                    f'A majority of the members, {round(363/(363+242+200),2)*100}% are Mid-career Leaders. '
                    f'However, gender distribution across the membership levels is relatively even, '
                    f'males being dominating membership with little margin:\n '
                    '* Young Leader: 50.98% male and 49.02% female'
                    '* Mid-career Leader: 51.21% male and 48.79% female'
                    '* Senior Leader: 53.99% male and 49.02% female')
    if granularity == "Country":
        country = st.sidebar.selectbox('country', df['Country'].unique())
        st.title(country)
        graph_type = st.selectbox("Choose visualization", [
            'Gender', 'Membership Type Distribution',
            'Membership Level Distribution'
        ])
        if graph_type == "Gender":
            st.subheader("Gender Distribution")
            with st.beta_container():
                data = cat_numbers(df[df['Country'] == country], 'Gender')
                st.write(data)
                pie_chart(data, 'Gender', 'Gender Distribution')
                expander = st.beta_expander(f'{country} Analysis')
                expander.write(
                    f'{country} has {round((data.loc[0]["Number"] / (data.loc[0]["Number"] + data.loc[1]["Number"]))*100,1)}% {data.loc[0]["Gender"]} and {round((data.loc[1]["Number"] / (data.loc[0]["Number"] + data.loc[1]["Number"]))*100,1)}% {data.loc[1]["Gender"]} membership.'
                )
                # exp_data(f'{country}', data)
        elif graph_type == "Membership Type Distribution":
            st.subheader("Membership Type Distribution")
            with st.beta_container():
                data = cat_numbers(df[df['Country'] == country],
                                   'Membership Type')
                pie_chart(data, 'Membership Type',
                          'Membership Type Distribution')
        elif graph_type == "Membership Level Distribution":
            st.subheader("Membership Level Distribution")
            with st.beta_container():
                data = cat_numbers(df[df['Country'] == country],
                                   'Membership Level')
                pie_chart(data, 'Membership Level',
                          'Membership Level Distribution')
    elif granularity == "Continent":
        continent = st.sidebar.selectbox('continent',
                                         df['Continent'].dropna().unique())
        st.title(continent)
        graph_type = st.selectbox("Choose visualization", [
            'Gender', 'Membership Type Distribution',
            'Membership Level Distribution'
        ])
        if graph_type == "Gender":
            st.subheader("Gender Distribution")
            with st.beta_container():
                data = cat_numbers(df[df['Continent'] == continent], 'Gender')
                pie_chart(data, 'Gender', 'Gender Distribution')
        elif graph_type == "Membership Type Distribution":
            with st.beta_container():
                data = cat_numbers(df[df['Continent'] == continent],
                                   'Membership Type')
                pie_chart(data, 'Membership Type',
                          'Membership Type Distribution')
        elif graph_type == "Membership Level Distribution":
            with st.beta_container():
                data = cat_numbers(df[df['Continent'] == continent],
                                   'Membership Level')
                pie_chart(data, 'Membership Level',
                          'Membership Level Distribution')
コード例 #6
0
def train(config):
    wandb.init(project='graph_ssm',
               entity='sentinel',
               group=config.wandb.group,
               config=config.to_dict())

    # save config
    config.to_yaml(join(wandb.run.dir, "exp_config.yaml"))

    train_data_path = './docs/100_slm_data/data_1.csv'
    train_states, train_actions, info = load_data(train_data_path,
                                                  scaling=config.scaling,
                                                  action_ws=config.action_ws,
                                                  preprocess=config.preprocess)

    test_data_path = './docs/100_slm_data/data_2.csv'
    test_scaler = (info['scale_min'], info['scale_max'])
    test_states, test_actions, info = load_data(test_data_path,
                                                scaling=config.scaling,
                                                scaler=test_scaler,
                                                action_ws=config.action_ws,
                                                preprocess=config.preprocess)

    state_dim = train_states.shape[1]
    action_dim = train_actions.shape[1]

    adj_xx = torch.eye(state_dim)
    glass_tc_pos_path = './docs/location/1_glass_Tc_r1.csv'
    control_tc_pos_path = './docs/location/5_controlTC_r1.csv'
    threshold = config.adjmat.threshold
    weight = config.adjmat.weight

    adj_xu = get_adj_matrix(glass_tc_pos_path, control_tc_pos_path, threshold,
                            weight)
    a_mlp = MLP(state_dim,
                adj_xx.nonzero(as_tuple=True)[0].size(0), **config.nn.a_mlp)
    b_mlp = MLP(action_dim,
                adj_xu.nonzero(as_tuple=True)[0].size(0), **config.nn.b_mlp)
    m = HyperLinearSSM(state_dim, action_dim, a_mlp, b_mlp, adj_xx,
                       adj_xu).to(config.device)
    wandb.watch(m)
    criteria = torch.nn.SmoothL1Loss()  # Huberloss
    opt = torch.optim.Adam(m.parameters(), lr=config.lr)

    # setup LR scheduler
    use_lr_schedule = config.lr_scheduler
    if use_lr_schedule:
        scheduler_name = config.lr_scheduler.pop('name')
        scheduler = getattr(lr_scheduler,
                            scheduler_name)(opt,
                                            **config.lr_scheduler.to_dict())

    train_xs, train_us, train_ys = get_xuy(train_states, train_actions,
                                           config.train_time_window,
                                           config.device)

    test_xs, test_us, test_ys = get_xuy(test_states, test_actions,
                                        test_states.shape[0] - 1,
                                        config.device)

    ds = TensorDataset(train_xs, train_us, train_ys)
    train_loader = DataLoader(ds, batch_size=config.batch_size, shuffle=True)

    iters = len(train_loader)
    n_update = 0
    min_test_loss = 100
    for iter in range(config.epoch):
        print("Epoch [{}] / [{}]".format(iter, config.epoch))
        for i, (x, u, y) in enumerate(train_loader):
            x = x + (torch.randn_like(x) * config.perturb_x_param).to(
                config.device)
            pred = m.rollout(x, u)
            pred_loss = criteria(pred, y)

            # Regularization
            loss = pred_loss

            # Update
            opt.zero_grad()
            loss.backward()
            opt.step()

            n_update += 1

            log_dict = dict()
            log_dict['train_loss'] = pred_loss

            if n_update % config.test_every == 0:
                m.eval()
                test_pred = m.rollout(test_xs, test_us)
                test_pred_loss = criteria(test_pred, test_ys)
                log_dict['test_loss'] = test_pred_loss
                m.train()

                if test_pred_loss <= min_test_loss:
                    print("BEST model found")
                    torch.save(
                        m.state_dict(),
                        join(wandb.run.dir, '{}_model.pt'.format(n_update)))
                    min_test_loss = test_pred_loss

            if use_lr_schedule:
                scheduler.step(iter + i / iters)
                log_dict['lr'] = opt.param_groups[0]['lr']

            wandb.log(log_dict)

    torch.save(m.state_dict(), join(wandb.run.dir, 'model.pt'))
コード例 #7
0
def main(file_src, horizon):
    # Setting
    state_dim = 140
    action_dim = 40
    state_order = 5
    action_order = 5
    alpha = 0.001  # Workset smoothness
    time_limit = 5  # seconds
    train_data_path = [
        'docs/new_data/expert/data_1.csv', 'docs/new_data/expert/data_2.csv'
    ]

    m = get_reparam_multi_linear_model(state_dim, action_dim, state_order,
                                       action_order)
    model_filename = 'model/Multistep_linear/model_reparam_55.pt'
    m.load_state_dict(torch.load(model_filename, map_location=device))
    m.eval()

    train_states, train_actions, info = load_data(paths=train_data_path,
                                                  scaling=True,
                                                  preprocess=True,
                                                  history_x=state_order,
                                                  history_u=action_order)
    scaler = (info['scale_min'].item(), info['scale_max'].item())
    print(train_states[0].shape)
    print(train_actions[0].shape)
    print('Min. state scaler: {}, Max. state scaler: {}'.format(
        scaler[0], scaler[1]))
    print('Min. action scaler: {}, Max. action scaler: {}'.format(
        scaler[0], scaler[1]))

    validate_states, validate_actions, _ = load_data(paths=file_src,
                                                     scaling=False,
                                                     preprocess=True,
                                                     history_x=state_order,
                                                     history_u=action_order,
                                                     device=device)
    validate_states = validate_states[0][:, :state_dim]
    validate_actions = validate_actions[0]
    print(validate_states.shape)
    print(validate_actions.shape)
    optimizer_mode = 'LBFGS'

    initial_temp = 150.0
    heatup_times = [545]
    anneal_times = [363]
    target_temp = [375.0]
    target = generate_reference(initial_temp=initial_temp,
                                heatup_times=heatup_times,
                                anneal_times=anneal_times,
                                target_temps=target_temp)
    target = torch.reshape(torch.tensor(target).to(device),
                           shape=(-1, 1)).repeat(
                               repeats=(1, state_dim))  # [908 x 140]
    target = (target - scaler[0]) / (scaler[1] - scaler[0]
                                     )  # Change scale into 0-1

    H = horizon
    T = target.shape[0]

    history_tc = np.ones(
        (state_order,
         state_dim)) * 150  # Please fill the real data, 0~139: glass TC
    history_ws = np.ones(
        (action_order - 1,
         action_dim)) * 150  # Please fill the real data, 0~39: workset
    runner = Runner(m=m,
                    optimizer_mode=optimizer_mode,
                    state_scaler=scaler,
                    action_scaler=scaler,
                    alpha=alpha,
                    timeout=time_limit)
    """
    x0 = torch.zeros((state_order, state_dim)).to(device)
    u0 = torch.zeros((action_order-1, action_dim)).to(device)
    us = torch.zeros((H, action_dim)).to(device)
    sample_predicted = m.multi_setp_prediction(x0, u0, us)
    print(sample_predicted.shape)
    """
    optimized_workset = []
    for t in range(T - H):
        print("Now time [{}] / [{}]".format(t, T - H))
        start = time.time()
        workset = runner.solve(
            validate_states[t:t + state_order],
            validate_actions[t:t + action_order - 1], target[t:t + H, :]
        )  # [1 x 40] torch.Tensor, use this workset to the furnace
        end = time.time()
        workset = workset * (scaler[1] - scaler[0]) + scaler[0]
        optimized_workset.append(workset)
        print('Time computation : {}'.format(end - start))
        print('Target tc is {}'.format(target[t:t + H, :].mean()))
        print('Average ws is {}'.format(workset.mean()))
    optimized_workset = torch.stack(optimized_workset)
    torch.save(optimized_workset,
               'multistep_linear_' + str(horizon) + '_WS.pt')
コード例 #8
0
def train(config):
    wandb.init(project='graph_ssm', config=config.to_dict())

    # save config
    config.to_yaml(join(wandb.run.dir, "exp_config.yaml"))

    train_data_path = './docs/100_slm_data/data_1.csv'
    train_states, train_actions, info = load_data(train_data_path,
                                                  scaling=config.scaling)

    test_data_path = './docs/100_slm_data/data_2.csv'
    test_scaler = (info['scale_min'], info['scale_max'])
    test_states, test_actions, info = load_data(test_data_path,
                                                scaling=config.scaling,
                                                scaler=test_scaler)

    state_dim = train_states.shape[1]
    action_dim = train_actions.shape[1]

    adj_xx = torch.eye(state_dim)
    glass_tc_pos_path = './docs/location/1_glass_Tc_r1.csv'
    control_tc_pos_path = './docs/location/5_controlTC_r1.csv'
    threshold = 1200
    weight = (1., 1., 10.0)

    adj_xu = get_adj_matrix(glass_tc_pos_path, control_tc_pos_path, threshold,
                            weight)

    m = LinearSSM(state_dim, action_dim, adj_xx, adj_xu).to(config.device)
    wandb.watch(m)
    criteria = torch.nn.SmoothL1Loss()  # Huberloss
    opt = torch.optim.Adam(m.parameters(), lr=2 * 1e-4)

    train_xs, train_us, train_ys = get_xuy(train_states, train_actions,
                                           config.train_time_window,
                                           config.device)

    test_xs, test_us, test_ys = get_xuy(test_states, test_actions,
                                        test_states.shape[0] - 1,
                                        config.device)

    ds = TensorDataset(train_xs, train_us, train_ys)
    train_loader = DataLoader(ds, batch_size=config.batch_size)

    n_update = 0
    for ep in range(config.epoch):
        print("Epoch [{}] / [{}]".format(ep, config.epoch))
        for x, u, y in train_loader:
            x = x + (torch.randn_like(x) * config.perturb_x_param).to(
                config.device)
            pred = m.rollout(x, u)
            pred_loss = criteria(pred, y)

            # Regularization

            A_2_norm = torch.norm(m.A.weight, p=2)
            B_1_norm = torch.norm(m.B.weight, p=1)
            loss = pred_loss + B_1_norm * config.lambda_B - A_2_norm * config.lambda_A

            # Update
            opt.zero_grad()
            loss.backward()
            opt.step()

            n_update += 1

            # Clip params
            m.A.clip_params(min=0.0, max=1.0)
            m.B.clip_params(min=0.0, max=1.0)

            log_dict = dict()
            log_dict['train_loss'] = pred_loss
            log_dict['A_2_norm'] = A_2_norm
            log_dict['B_1_norm'] = B_1_norm

            if n_update % config.test_every == 0:
                test_pred = m.rollout(test_xs, test_us)
                test_pred_loss = criteria(test_pred, test_ys)
                log_dict['test_loss'] = test_pred_loss

            wandb.log(log_dict)
    torch.save(m.state_dict(), join(wandb.run.dir, 'model.pt'))