Ejemplo n.º 1
0
    def __init__(self, input_size, output_size, num_layers, num_parallels, rnn_size, att_size, bu_size, dropout):
        super(LSTM_DOUBLE_ATT_STACK_PARALLEL_MUL_OUT_ATT_WITH_BU, self).__init__()

        self.input_size = input_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.num_parallels = num_parallels
        self.rnn_size = rnn_size
        self.att_size = att_size
        self.bu_size = bu_size
        self.dropout = dropout

        # core
        self.cores = nn.ModuleList()
        for i in range(self.num_layers * self.num_parallels):
            core = CORE.lstm_core_with_att_bu(self.input_size, self.rnn_size)
            self.cores.append(core)

        # attention
        self.attens = nn.ModuleList()
        for i in range(self.num_layers * 2):
            att = ATT.lstm_att_with_x_att_h(self.rnn_size, self.att_size)
            self.attens.append(att)

        # bu attention
        self.bu_attens = nn.ModuleList()
        for i in range(self.num_layers * 2):
            att = ATT.lstm_att_with_att_h(self.rnn_size, self.bu_size)
            self.bu_attens.append(att)

        # proj
        self.projs = nn.ModuleList()
        for i in range(self.num_layers):
            proj = nn.Linear(self.rnn_size, self.output_size)
            self.projs.append(proj)
Ejemplo n.º 2
0
    def __init__(self, input_size, output_size, num_layers, num_parallels, rnn_size, att_size, dropout):
        super(LSTM_SOFT_ATT_STACK_PARALLEL_WITH_FC_WEIGHT, self).__init__()

        self.input_size = input_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.num_parallels = num_parallels
        self.rnn_size = rnn_size
        self.att_size = att_size
        self.dropout = dropout

        # core
        self.cores = nn.ModuleList()
        for i in range(self.num_layers * self.num_parallels):
            core = CORE.lstm_core_with_att(self.input_size, self.rnn_size)
            self.cores.append(core)

        # attention
        self.attens = nn.ModuleList()
        for i in range(self.num_layers):
            att = ATT.lstm_att_with_att_h(self.rnn_size, self.att_size)
            self.attens.append(att)

        # proj
        self.proj = nn.Linear(self.rnn_size, self.output_size)

        # proj weight
        self.proj_weight = nn.Linear(self.rnn_size, self.output_size)

        init.xavier_normal(self.proj.weight)
        init.xavier_normal(self.proj_weight.weight)
Ejemplo n.º 3
0
    def __init__(self, input_size, output_size, num_layers, num_parallels,
                 rnn_size, att_size, bu_size, bu_num, dropout):
        super(LSTM_WITH_TOP_DOWN_ATTEN, self).__init__()

        self.input_size = input_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.num_parallels = num_parallels
        self.rnn_size = rnn_size
        self.att_size = att_size
        self.bu_size = bu_size
        self.bu_num = bu_num
        self.dropout = dropout

        # core
        self.core1 = CORE.lstm_core(self.input_size, self.rnn_size)

        self.core2 = CORE.lstm_core_with_att(self.input_size, self.rnn_size)

        # bu attention
        self.att = ATT.lstm_att_with_att_h(self.rnn_size, self.bu_size)

        # proj
        self.proj = nn.Linear(self.rnn_size, self.output_size)

        init.xavier_normal(self.proj.weight)
Ejemplo n.º 4
0
    def __init__(self, input_size, output_size, num_layers, num_parallels, rnn_size, att_size, num_hop, dropout):
        super(LSTM_SOFT_ATT_STACK_PARALLEL_MEMORY, self).__init__()

        self.input_size = input_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.num_parallels = num_parallels
        self.rnn_size = rnn_size
        self.att_size = att_size
        self.dropout = dropout
        self.num_hop = num_hop

        # core
        self.cores = nn.ModuleList()
        for i in range(self.num_layers * self.num_parallels):
            core = CORE.lstm_core_with_att(self.input_size, self.rnn_size)
            self.cores.append(core)

        # attention
        self.attens = nn.ModuleList()
        for i in range(self.num_layers):
            att = ATT.lstm_att_with_att_h(self.rnn_size, self.att_size)
            self.attens.append(att)

        # proj
        # self.proj = nn.Linear(self.rnn_size, self.output_size)

        # memory
        self.memory = DMN.DMNCPlus(self.rnn_size, self.output_size, self.num_hop)
Ejemplo n.º 5
0
    def __init__(self, input_size, output_size, num_layers, num_parallels,
                 rnn_size, att_size, pool_size, spp_num, dropout):
        super(LSTM_SOFT_ATT_STACK_PARALLEL_SPP, self).__init__()

        self.input_size = input_size
        self.output_size = output_size
        self.num_layers = num_layers
        self.num_parallels = num_parallels
        self.rnn_size = rnn_size
        self.att_size = att_size
        self.pool_size = pool_size
        self.spp_num = spp_num
        self.dropout = dropout

        # core
        self.cores = nn.ModuleList()
        for i in range(self.num_layers * self.num_parallels):
            core = CORE.lstm_core_with_att(self.input_size, self.rnn_size)
            self.cores.append(core)

        # attention
        self.attens = nn.ModuleList()
        for i in range(self.num_layers):
            att = ATT.lstm_att_with_att_h_spp(self.rnn_size, self.att_size,
                                              self.pool_size, self.spp_num)
            self.attens.append(att)

        # proj
        self.proj = nn.Linear(self.rnn_size, self.output_size)
Ejemplo n.º 6
0
def temps(motor):

	
	audios=["audio1sE.wav","audio1s.wav"]

	if motor=='Google':

		a='Google\n'

		for f in range(2):
			if f==0:
				lan="en-US"
			else:
				lan="es-US"

			resposta=GoogleAPI.response_time(lan,audios[f])
		
			a=a+'\n'+str(resposta)

		return a

	elif motor=='ATT':
		a='\n\nAT&T\n'

		for f in range(2):
			if f==0:
				lan="en-US"
			else:
				lan="es-US"

			resposta=ATT.response_time(lan,audios[f])
		
			a=a+'\n'+str(resposta)

		

		return a

	elif motor=='Witai':

		a='\n\nWit.ai\n'

		for f in range(2):
			if f==0:
				lan="en-US"
			else:
				lan="es-US"

			resposta=witai.response_time(lan,audios[f])
		
			a=a+'\n'+str(resposta)


		return a