Beispiel #1
0
    ckpt = os.path.join(
        args.save_path,
        'etm_{}_K_{}_Htheta_{}_Optim_{}_Clip_{}_ThetaAct_{}_Lr_{}_Bsz_{}_RhoSize_{}_trainEmbeddings_{}'
        .format(args.dataset, args.num_topics, args.t_hidden_size,
                args.optimizer, args.clip, args.theta_act, args.lr,
                args.batch_size, args.rho_size, args.train_embeddings))

## define model and optimizer
model = ETM(args.num_topics, vocab_size, args.t_hidden_size, args.rho_size,
            args.emb_size, args.theta_act, embeddings, args.train_embeddings,
            args.enc_drop).to(device)

print('model: {}'.format(model))

if args.optimizer == 'adam':
    optimizer = optim.Adam(model.parameters(),
                           lr=args.lr,
                           weight_decay=args.wdecay)
elif args.optimizer == 'adagrad':
    optimizer = optim.Adagrad(model.parameters(),
                              lr=args.lr,
                              weight_decay=args.wdecay)
elif args.optimizer == 'adadelta':
    optimizer = optim.Adadelta(model.parameters(),
                               lr=args.lr,
                               weight_decay=args.wdecay)
elif args.optimizer == 'rmsprop':
    optimizer = optim.RMSprop(model.parameters(),
                              lr=args.lr,
                              weight_decay=args.wdecay)
elif args.optimizer == 'asgd':
Beispiel #2
0
        ckpt = os.path.join(
            args.save_path,
            'Dec17_etm_{}_K_{}_Htheta_{}_Optim_{}_Clip_{}_ThetaAct_{}_Lr_{}_Bsz_{}_RhoSize_{}_trainEmbeddings_{}'
            .format(args.dataset, args.num_topics, args.t_hidden_size,
                    args.optimizer, args.clip, args.theta_act, args.lr,
                    args.batch_size, args.rho_size, args.train_embeddings))

    for num_topics in [10, 15, 20, 25, 30, 35, 40, 45, 50]:
        args.num_topics = num_topics
        ## define model and optimizer
        model = ETM(args.num_topics, vocab_size, args.t_hidden_size,
                    args.rho_size, args.emb_size, args.theta_act, embeddings,
                    args.train_embeddings, args.enc_drop).to(device)
        # print('model: {}'.format(model))
        if args.optimizer == 'adam':
            optimizer = optim.Adam(model.parameters(),
                                   lr=args.lr,
                                   weight_decay=args.wdecay)

        def train(epoch):
            model.train()
            acc_loss = 0
            acc_kl_theta_loss = 0
            cnt = 0
            indices = torch.randperm(args.num_docs_train)
            indices = torch.split(indices, args.batch_size)
            for idx, ind in enumerate(indices):
                optimizer.zero_grad()
                model.zero_grad()
                data_batch = data.get_batch(train_tokens, train_counts, ind,
                                            args.vocab_size, device)
Beispiel #3
0
if args.mode == 'eval':
    ckpt = args.load_from
else:
    ckpt = os.path.join(args.save_path, 
        'etm_{}_K_{}_Htheta_{}_Optim_{}_Clip_{}_ThetaAct_{}_Lr_{}_Bsz_{}_RhoSize_{}_trainEmbeddings_{}'.format(
        args.dataset, args.num_topics, args.t_hidden_size, args.optimizer, args.clip, args.theta_act, 
            args.lr, args.batch_size, args.rho_size, args.train_embeddings))

## define model and optimizer
model = ETM(args.num_topics, vocab_size, args.t_hidden_size, args.rho_size, args.emb_size, 
                args.theta_act, embeddings, args.train_embeddings, args.enc_drop).to(device)

print('model: {}'.format(model))

if args.optimizer == 'adam':
    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'adagrad':
    optimizer = optim.Adagrad(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'adadelta':
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'rmsprop':
    optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wdecay)
elif args.optimizer == 'asgd':
    optimizer = optim.ASGD(model.parameters(), lr=args.lr, t0=0, lambd=0., weight_decay=args.wdecay)
else:
    print('Defaulting to vanilla SGD')
    optimizer = optim.SGD(model.parameters(), lr=args.lr)


def train(epoch):
    model.train()