('tech_mask_tensor', 'techniques_mask'),
    ]
    tensor_names, tensor_cols = zip(*tensors_to_load)

    # Load tensors into memory
    memory_tensors = load_recipe_tensors(df_r,
                                         DEVICE,
                                         cols=tensor_cols,
                                         types=[torch.LongTensor] *
                                         len(tensors_to_load))
    memory_tensor_map = dict(zip(tensor_names, memory_tensors))
    print('{} - Tensors loaded in memory.'.format(datetime.now() - start))

    # Samplers
    train_data = DataFrameDataset(train_df, ['u', 'i'])
    train_sampler = BatchSampler(train_data, batch_size, random=True)
    valid_data = DataFrameDataset(valid_df, ['u', 'i'])
    valid_sampler = BatchSampler(valid_data, batch_size)
    test_data = DataFrameDataset(test_df, ['u', 'i'])
    test_sampler = BatchSampler(test_data, batch_size)
    '''
    Create model
    '''
    model = create_model(
        vocab_emb_dim=vocab_emb_dim,
        calorie_emb_dim=calorie_emb_dim,
        hidden_size=hidden_size,
        n_layers=n_layers,
        dropout=dropout,
        max_ingr=MAX_INGR,
        max_ingr_tok=MAX_INGR_TOK,
    )
    memory_tensor_map = dict(zip(tensor_names, memory_tensors))
    print('{} - Tensors loaded in memory.'.format(datetime.now() - start))

    # Name padding for item
    memory_tensor_map['name_tensor'] = torch.cat(
        [memory_tensor_map['name_tensor'],
        torch.LongTensor([[PAD_INDEX] * MAX_NAME]).to(DEVICE)]
    )

    if n_samples < len(test_df) and not ppx_only:
        sampled_test = test_df.sample(n=n_samples)
    else:
        sampled_test = test_df
    test_data = DataFrameDataset(sampled_test, ['u', 'i'])
    test_sampler = BatchSampler(test_data, batch_size)

    '''
    Create model
    '''
    model = create_model(
        vocab_emb_dim=vocab_emb_dim, calorie_emb_dim=calorie_emb_dim,
        item_emb_dim=item_emb_dim, n_items_w_pad=NUM_ITEM_EMBEDDING, hidden_size=hidden_size,
        n_layers=n_layers, dropout=0.0, max_ingr=MAX_INGR, max_ingr_tok=MAX_INGR_TOK,
        use_cuda=USE_CUDA, state_dict_path=model_path,
        ingr_gru=ingr_gru, decode_name=decode_name, ingr_emb=ingr_emb,
        num_ingr=N_INGREDIENTS, ingr_emb_dim=ingr_emb_dim, shared_projection=shared_proj,
        item_emb=item_emb
    )

    model_id = os.path.basename(model_path)[:-3]