def load_model(pickled_model, args_dict, test_data):

    lstm_settings_dict = {  # this works because these variables are set in locals from json_dict
        'no_subnets': args_dict['no_subnets'],
        'hidden_dims': {
            'master': args_dict['hidden_nodes_master'],
            'acous': args_dict['hidden_nodes_acous'],
            'visual': args_dict['hidden_nodes_visual'],
        },
        'uses_master_time_rate': {},
        'time_step_size': {},
        'is_irregular': {},
        'layers': num_layers,
        'dropout': args_dict['dropout_dict'],
        'freeze_glove': args_dict['freeze_glove_embeddings']
    }
    lstm_settings_dict = test_data.get_lstm_settings_dict(
        lstm_settings_dict)  # add some extra items to the lstm settings related to the dataset

    model = LSTMPredictor(lstm_settings_dict=lstm_settings_dict, feature_size_dict=test_data.get_feature_size_dict(),
                          batch_size=train_batch_size, seq_length=args_dict['sequence_length'],
                          prediction_length=prediction_length, embedding_info=test_data.get_embedding_info())
    with open(pickled_model, "rb") as model_file:
        if torch.cuda.is_available():
            model.load_state_dict(torch.load(model_file))
        else:
            model.load_state_dict(torch.load(model_file, map_location=torch.device('cpu')))

    return model
def model_fn(model_dir):
    print('Loading model.')

    # First, load the parameters used to create the model.
    model_info = {}
    model_info_path = os.path.join(model_dir, 'model_info.pth')
    with open(model_info_path, 'rb') as f:
        model_info = torch.load(f)

    print('model_info: {}'.format(model_info))

    # Determine the device and construct the model.
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    model = LSTMPredictor(
        model_info['input_dim'],
        model_info['hidden_dim'],
        model_info['output_dim'],
        model_info['n_layers']
    )

    # Load the stored model parameters.
    model_path = os.path.join(model_dir, 'model.pth')
    with open(model_path, 'rb') as f:
        model.load_state_dict(torch.load(f))

    model.to(device).eval()

    print('Done loading model.')
    return model
示例#3
0
#    if not os.path.exists("./{0}".format(file_name)):
#        os.makedirs("./{0}".format(file_name))
#    df_MJ.to_excel(outpath)
#    print(df_MJ_result)
    df_MJ_result.to_excel(outpath)
    # majority baseline:
    # f1_score(true_vals,np.zeros([len(true_vals),]).tolist(),average='weighted')


# %% Init model
# model = LSTMPredictor(feature_size_dict, hidden_nodes, num_layers,train_batch_size,sequence_length,prediction_length,train_dataset.get_embedding_info(),dropout=dropout)
embedding_info = train_dataset.get_embedding_info()
#print(embedding_info)
model = LSTMPredictor(lstm_settings_dict=lstm_settings_dict,
                      feature_size_dict=feature_size_dict,
                      batch_size=train_batch_size,
                      seq_length=sequence_length,
                      prediction_length=prediction_length,
                      embedding_info=embedding_info)

model.weights_init(init_std)

optimizer_list = []

optimizer_list.append(
    optim.Adam(model.out.parameters(),
               lr=learning_rate,
               weight_decay=l2_dict['out']))
for embed_inf in embedding_info.keys():
    if embedding_info[embed_inf]:
        for embedder in embedding_info[embed_inf]:
            if embedder['embedding_use_func'] or (
    results_save['test_losses'].append(loss_weighted_mean)
    results_save['test_losses_l1'].append(loss_weighted_mean_l1)
    #    results_save['test_losses_mse'].append(loss_weighted_mean_mse)

    indiv_perf = {'bar_chart_labels': bar_chart_labels,
                  'bar_chart_vals': bar_chart_vals}
    results_save['indiv_perf'].append(indiv_perf)
    # majority baseline:
    # f1_score(true_vals,np.zeros([len(true_vals),]).tolist(),average='weighted')


# %% Init model
embedding_info = train_dataset.get_embedding_info()

model = LSTMPredictor(lstm_settings_dict=lstm_settings_dict, feature_size_dict=feature_size_dict,
                      batch_size=train_batch_size, seq_length=sequence_length, prediction_length=prediction_length,
                      embedding_info=embedding_info)

model.weights_init(init_std)

optimizer_list = []

optimizer_list.append( optim.Adam( model.out.parameters(), lr=learning_rate, weight_decay=l2_dict['out'] ) )
for embed_inf in embedding_info.keys():
    if embedding_info[embed_inf]:
        for embedder in embedding_info[embed_inf]:
            if embedder['embedding_use_func'] or (embedder['use_glove'] and not(lstm_settings_dict['freeze_glove'])):
                optimizer_list.append(
                    optim.Adam( model.embedding_func.parameters(), lr=learning_rate, weight_decay=l2_dict['emb'] )
                                      )
    print('Using device {}.'.format(device))

    torch.manual_seed(args.seed)

    # Load the training data.
    train_loader = _get_data_loader(
        args.batch_size, os.path.join(args.data_dir, 'train.zip')
    )
    val_loader = _get_data_loader(
        args.batch_size, os.path.join(args.data_dir, 'val.zip')
    )

    # Build the model.
    model = LSTMPredictor(
        args.input_dim,
        args.hidden_dim,
        args.output_dim,
        args.n_layers
    ).to(device)

    print(
        'Model loaded with input_dim {}, hidden_dim {}, outout_dim: {}, \
            n_layers {}.'.format(
              args.input_dim, args.hidden_dim, args.output_dim, args.n_layers))

    # Train the model.
    optimizer = optim.Adam(model.parameters(), lr=0.001)
    loss_fn = RSMELoss()

    train(
        model,
        train_loader,
示例#6
0
                  'bar_chart_vals': bar_chart_vals}
    results_save['indiv_perf'].append(indiv_perf)
    # majority baseline:
    # f1_score(true_vals,np.zeros([len(true_vals),]).tolist(),average='weighted')


# %% Init model
# model = LSTMPredictor(feature_size_dict, hidden_nodes, num_layers,train_batch_size,sequence_length,prediction_length,train_dataset.get_embedding_info(),dropout=dropout)
embedding_info = train_dataset.get_embedding_info()
#import glob
fileexists = os. path. isfile('./model_save/checkpoint_60.pkl')

optimizer_list = []
if fileexists:
    model = LSTMPredictor(lstm_settings_dict=lstm_settings_dict, feature_size_dict=feature_size_dict,
                          batch_size=train_batch_size, seq_length=sequence_length, prediction_length=prediction_length,
                          embedding_info=embedding_info)
    
    PATH="./model_save/checkpoint.pkl"
    checkpoint = torch.load(PATH)
#    model.load_state_dict(checkpoint['model_state_dict'])
    # =============================================================================
    #     Load hidden state
    # =============================================================================
    model.state_dict()['lstm_acous.bias_ih_l0']=checkpoint['model_state_dict']['lstm_master.bias_ih_l0']
    model.state_dict()['lstm_acous.bias_hh_l0']=checkpoint['model_state_dict']['lstm_master.bias_hh_l0']
    model.state_dict()['lstm_acous.weight_ih_l0']=checkpoint['model_state_dict']['lstm_master.weight_ih_l0']
    model.state_dict()['lstm_acous.weight_hh_l0']=checkpoint['model_state_dict']['lstm_master.weight_hh_l0']
    # =============================================================================
    #     Load out state
    # =============================================================================
示例#7
0
    results_save['test_losses'].append(loss_weighted_mean)
    results_save['test_losses_l1'].append(loss_weighted_mean_l1)
    indiv_perf = {
        'bar_chart_labels': bar_chart_labels,
        'bar_chart_vals': bar_chart_vals
    }
    results_save['indiv_perf'].append(indiv_perf)


# %% Init model
embedding_info = train_dataset.get_embedding_info()
model = LSTMPredictor(lstm_settings_dict=lstm_settings_dict,
                      feature_size_dict=feature_size_dict,
                      batch_size=train_batch_size,
                      seq_length=sequence_length,
                      prediction_length=prediction_length,
                      embedding_info=embedding_info,
                      dropout_acous_p=dropout_acous_p,
                      dropout_visual_p=dropout_visual_p)

model.weights_init(init_std)

optimizer_list = []
optimizer_list.append(
    optim.Adam(model.out.parameters(),
               lr=learning_rate,
               weight_decay=l2_dict['out']))
for embed_inf in embedding_info.keys():
    if embedding_info[embed_inf]:
        for embedder in embedding_info[embed_inf]:
            if embedder['embedding_use_func']: