Пример #1
0
gamma = args.gamma
alpha = args.alpha
learning_rate = 0.001

# the number of training epoches
num_of_epoch = 8
# the number of batch size for gradient descent when training
batch_sz = 64

# set up the criterion
criterion = nn.CrossEntropyLoss().to(device)
# set up models
clstm = CNN_LSTM(INPUT_DIM, EMBEDDING_DIM, KER_SIZE, N_FILTERS,
                 HIDDEN_DIM).to(device)
print(clstm)
policy_s = Policy_S(HIDDEN_DIM, HIDDEN_DIM, OUTPUT_DIM).to(device)
policy_n = Policy_N(HIDDEN_DIM, HIDDEN_DIM, MAX_K).to(device)
policy_c = Policy_C(HIDDEN_DIM, HIDDEN_DIM, LABEL_DIM).to(device)
value_net = ValueNetwork(HIDDEN_DIM, HIDDEN_DIM, OUTPUT_DIM).to(device)

# set up optimiser
params_pg = list(policy_s.parameters()) + list(policy_c.parameters()) + list(
    policy_n.parameters())
optim_loss = optim.Adam(clstm.parameters(), lr=learning_rate)
optim_policy = optim.Adam(params_pg, lr=learning_rate)
optim_value = optim.Adam(value_net.parameters(), lr=learning_rate)

# add pretrained embeddings
pretrained_embeddings = TEXT.vocab.vectors
clstm.embedding.weight.data.copy_(pretrained_embeddings)
clstm.embedding.weight.requires_grad = True  # update the initial weights
Пример #2
0
        out = self.fc_n_output(out)
        return self.softmax(out)


INPUT_DIM = 25002
EMBEDDING_DIM = 100
KER_SIZE = 5
HIDDEN_DIM = 128
OUTPUT_DIM = 1
CHUNCK_SIZE = 20
TEXT_LEN = 400
MAX_K = 3
N_FILTERS = 128
BATCH_SIZE = 1

cnn_model = CNN(INPUT_DIM, EMBEDDING_DIM, KER_SIZE, N_FILTERS, HIDDEN_DIM)
test_policy_s = Policy_S(HIDDEN_DIM, OUTPUT_DIM).train()
test_policy_n = Policy_N(HIDDEN_DIM, MAX_K).train()
test_policy_c = Policy_C(HIDDEN_DIM, OUTPUT_DIM).train()

input_size = torch.randint(1, 2, (1, 20))
cnn_cost = print_model_parm_flops(cnn_model, input_size)
p = torch.rand(1, 128)
s_cost = print_model_parm_flops(test_policy_s, p)
c_cost = print_model_parm_flops(test_policy_c, p)
n_cost = print_model_parm_flops(test_policy_n, p)
print('cnn_cost', cnn_cost)
print('s_cost', s_cost)
print('c_cost', c_cost)
print('n_cost', n_cost)