示例#1
0
# Construct RBM
# -----------------------------------------------------------------------------
sampler = PersistentContrastiveDivergence(k=k,
                                          hidden_activations=True,
                                          continuous_output=True)
optimizer = Adam(learning_rate)
rbm = RBM(n_visible=vis,
          n_hidden=hidd,
          sampler=sampler,
          optimizer=optimizer,
          device=device,
          vbias=vbias,
          verbose=verbose)
pre_trained = os.path.isfile(model_dir)
if pre_trained:
    rbm.load_state_dict(torch.load(model_dir))

# -----------------------------------------------------------------------------
# Training
# -----------------------------------------------------------------------------
if not pre_trained:
    validation = data[:10000]
    for _ in range(epochs):
        train_loader = torch.utils.data.DataLoader(data,
                                                   batch_size=batch_size,
                                                   shuffle=True)
        rbm.train(train_loader)
        # A good measure of well-fitting is the free energy difference
        # between some known and unknown instances. It is related to the
        # log-likelihood difference, but it does not depend on the
        # partition function. It should be around 0, and if it grows, it