Beispiel #1
0
	def __init__(self, state_dim, action_dim,
				 non_linearity=F.relu, hidden_layers=1, hidden_dim=20, output_non_linearity=F.sigmoid):
		super(MlpCritic, self).__init__()

		input_dim = state_dim + action_dim

		# Q1 FA
		self._Q1 = MLP(input_dim=input_dim, output_dim=1, output_non_linearity=output_non_linearity,
				 hidden_dim=hidden_dim, hidden_non_linearity=non_linearity, hidden_layers=hidden_layers)

		# Q2 FA
		self._Q2 = MLP(input_dim=input_dim, output_dim=1, output_non_linearity=output_non_linearity,
				 hidden_dim=hidden_dim, hidden_non_linearity=non_linearity, hidden_layers=hidden_layers)
Beispiel #2
0
class MlpCritic(nn.Module):
	def __init__(self, state_dim, action_dim,
				 non_linearity=F.relu, hidden_layers=1, hidden_dim=20, output_non_linearity=F.sigmoid):
		super(MlpCritic, self).__init__()

		input_dim = state_dim + action_dim

		# Q1 FA
		self._Q1 = MLP(input_dim=input_dim, output_dim=1, output_non_linearity=output_non_linearity,
				 hidden_dim=hidden_dim, hidden_non_linearity=non_linearity, hidden_layers=hidden_layers)

		# Q2 FA
		self._Q2 = MLP(input_dim=input_dim, output_dim=1, output_non_linearity=output_non_linearity,
				 hidden_dim=hidden_dim, hidden_non_linearity=non_linearity, hidden_layers=hidden_layers)

	def forward(self, s, a):
		s = tt(s)
		a = tt(a)

		if len(s.shape) == 1:
			x = torch.cat((s, a))
		else:
			x = torch.cat((s, a), dim=1)

		q1 = self._Q1(x)
		q2 = self._Q2(x)

		return q1, q2

	def Q1(self, s, a):
		s = tt(s)
		a = tt(a)

		if len(s.shape) == 1:
			x = torch.cat((s, a))
		else:
			x = torch.cat((s, a), dim=1)

		q1 = self._Q1(x)

		return q1

	def reset_parameters(self):
		self._Q1.reset_parameters()
		self._Q2.reset_parameters()
Beispiel #3
0
    for snt_id in fea_dev.keys():
        spk_id=snt_id.split('_')[0]
        fea_pase_dev[snt_id]=(fea_pase_dev[snt_id]-mean_spk_dev[spk_id])#/std_spk_dev[spk_id]


  
# Label file reading
with open(lab_file, 'rb') as handle:
    lab = pickle.load(handle)

with open(lab_file_dev, 'rb') as handle:
    lab_dev = pickle.load(handle)
    

# Network initialization
nnet=MLP(options,inp_dim)

nnet.to(device)

cost=nn.NLLLoss()

# Optimizer initialization
optimizer = optim.SGD(nnet.parameters(), lr=lr, momentum=0.0)

# Seeds initialization
np.random.seed(seed)
torch.manual_seed(seed)

# Batch creation (train)
fea_lst=[]
lab_lst=[]
        # apply speaker normalization
        for snt_id in fea_dev.keys():
            spk_id = snt_id.split('_')[0]
            # /std_spk_dev[spk_id]
            fea_pase_dev[snt_id] = (fea_pase_dev[snt_id] -
                                    mean_spk_dev[spk_id])

    # Label file reading
    with open(lab_file, 'rb') as handle:
        lab = pickle.load(handle)

    with open(lab_file_dev, 'rb') as handle:
        lab_dev = pickle.load(handle)

    # Network initialization
    nnet = MLP(options, inp_dim)

    nnet.to(device)

    cost = nn.NLLLoss()

    # Optimizer initialization
    optimizer = optim.SGD(nnet.parameters(), lr=lr, momentum=0.0)

    # Seeds initialization
    np.random.seed(seed)
    torch.manual_seed(seed)

    # Batch creation (train)
    fea_lst = []
    lab_lst = []
Beispiel #5
0
    fea_pase[snt_id] = pase(fea[snt_id])
    fea_pase[snt_id] = fea_pase[snt_id].view(
        fea_pase[snt_id].shape[1], fea_pase[snt_id].shape[2]).transpose(0, 1)

inp_dim = fea_pase[snt_id].shape[1] * (left + right + 1)

# Computing pase features for test
fea_pase_dev = {}
for snt_id in fea_dev.keys():
    fea_pase_dev[snt_id] = pase(fea_dev[snt_id]).detach()
    fea_pase_dev[snt_id] = fea_pase_dev[snt_id].view(
        fea_pase_dev[snt_id].shape[1],
        fea_pase_dev[snt_id].shape[2]).transpose(0, 1)

# Network initialization
nnet = MLP(options, inp_dim)
nnet.to(device)
cost = nn.NLLLoss()

# Optimizer initialization
optimizer = optim.SGD(list(nnet.parameters()) + list(pase.parameters()),
                      lr=lr,
                      momentum=0.0)

# Seeds initialization
np.random.seed(seed)
torch.manual_seed(seed)

# Batch creation (train)
fea_lst = []
lab_lst = []
Beispiel #6
0
                mean_spk_dev[spk_id]),
                                              dim=0)
            std_spk_dev[spk_id] = torch.mean(torch.stack(std_spk_dev[spk_id]),
                                             dim=0)

        # apply speaker normalization
        for snt_id in fea_dev.keys():
            spk_id = snt_id.split('_')[0]
            fea_pase_dev[snt_id] = (fea_pase_dev[snt_id] - mean_spk_dev[spk_id]
                                    )  #/std_spk_dev[spk_id]
            fea_pase_dev[snt_id] = context_window(fea_pase_dev[snt_id], left,
                                                  right)

    # Network initialization
    inp_dim = fea_pase_dev[snt_id].shape[1] * (left + right + 1)
    nnet = MLP(options, inp_dim)
    nnet.to(device)

    nnet.load_state_dict(torch.load(model_file))
    nnet.eval()

    post_file = open_or_fd(ark_file, output_folder, 'wb')

    for snt_id in fea_dev.keys():
        pout = nnet(torch.from_numpy(fea_pase_dev[snt_id]).to(device).float())
        # TO DO IT!!
        #pout=pout-log_counts
        write_mat(output_folder, post_file, pout.data.cpu().numpy(), snt_id)

# doing decoding
print('Decoding...')