Пример #1
0
    def __init__(self, memory_size=4):
        """Initialize Dialog Manager, Trainer Classes, Reading Training Data.

		Args:
			memory_size: Past Length of Conversation to be considered as History of the
						 conversation.

		"""
        self.conversation_count = 0

        # Initialize a queue with the required memory of past conversations
        self.history = deque("", memory_size)

        # Load the training sets for both states
        self.training_set_state1 = rdr.trainer_reader("training_data.csv").load()
        self.training_set_state2 = rdr.trainer_reader("training_data_2.csv").load()

        self.training_map_1 = dict({0: 1})
        self.training_map_2 = dict({0: 1})

        self.training_set_state1 = self._map_multiple_response(self.training_set_state1, self.training_map_1)
        self.training_set_state2 = self._map_multiple_response(self.training_set_state2, self.training_map_2)

        lambda_pi_1 = [
            0.2395105181493061,
            0.0006173868035890566,
            0.03237275574954535,
            0.0006901882640402509,
            0.44723785605466126,
            0.2795712949788579,
        ]
        lambda_pi_2 = [
            0.0015018046263117243,
            0.001374250681872707,
            0.12433409846344812,
            0.002426809340382256,
            0.663739481446308,
            0.06767643891963208,
        ]

        # Use the training sets to train two classifiers
        self.trn_1 = tr.trainer(0.002166020551556791, lambda_pi_1, self.training_set_state1)
        self.trn_2 = tr.trainer(0.006249742180327811, lambda_pi_2, self.training_set_state2)
        self.trn_1.train()
        self.trn_2.train()

        # Set threshold for dialogue generation
        self.threshold_1 = float("-0.000428095769801")
        self.threshold_2 = float("-0.00154795755273")
Пример #2
0
	def __init__(self, memory_size = 4):
		self.conversation_count = 0

		# Initialize a queue with the required memory of past conversations
		self.history = deque('', memory_size)

		# Load the training sets for both states
		self.training_set_state1 = rdr.trainer_reader('Data/training_ML.csv').load()
		
		self.training_map_1 = dict({0:1})
		
		self.training_set_state1 = self._map_multiple_response(self.training_set_state1, self.training_map_1)
	

		#lambda_pi_1 = [0.022637184564677337, 0.0006614527936524828, 0.8151936922826796, 0.009768802605860376, 0.9977721108143799, 0.9676503420836119]
		#self.lambda_pi_1 = [0.5089104109775326, 0.5432925251421259, 0.02218191679541291, 0.8877708494421628, 0.8206554620659708, 0.6351178059195106]
		self.lambda_pi_1 = [0.2517703455341973, 0.024354405309287175, 0.3108059389694587, 0.6223442089112752, 0.28378859405365237, 0.41789353929407375]
		self.lambda_pi_1 = map(lambda x: x/sum(self.lambda_pi_1), self.lambda_pi_1 )
#0.5340087189383883 #0.7204150300022019
	
		# Use the training sets to train two classifiers
		#self.trn_1 = tr.trainer(0.007672586062919887, lambda_pi_1 , self.training_set_state1)
		self.trn_1 = tr.trainer(self.lambda_pi_1 , self.training_set_state1)
		self.trn_1.train()

		# Set threshold for dialogue generation
		self.threshold_1 = float('0.06')#-0.0015
Пример #3
0
    def __init__(self, memory_size=4):
        self.conversation_count = 0

        # Initialize a queue with the required memory of past conversations
        self.history = deque('', memory_size)

        # Load the training sets for both states
        self.training_set_state1 = rdr.trainer_reader(
            'Data/training_ML.csv').load()

        self.training_map_1 = dict({0: 1})

        self.training_set_state1 = self._map_multiple_response(
            self.training_set_state1, self.training_map_1)

        #lambda_pi_1 = [0.022637184564677337, 0.0006614527936524828, 0.8151936922826796, 0.009768802605860376, 0.9977721108143799, 0.9676503420836119]
        #self.lambda_pi_1 = [0.5089104109775326, 0.5432925251421259, 0.02218191679541291, 0.8877708494421628, 0.8206554620659708, 0.6351178059195106]
        self.lambda_pi_1 = [
            0.2517703455341973, 0.024354405309287175, 0.3108059389694587,
            0.6223442089112752, 0.28378859405365237, 0.41789353929407375
        ]
        self.lambda_pi_1 = map(lambda x: x / sum(self.lambda_pi_1),
                               self.lambda_pi_1)
        #0.5340087189383883 #0.7204150300022019

        # Use the training sets to train two classifiers
        #self.trn_1 = tr.trainer(0.007672586062919887, lambda_pi_1 , self.training_set_state1)
        self.trn_1 = tr.trainer(self.lambda_pi_1, self.training_set_state1)
        self.trn_1.train()

        # Set threshold for dialogue generation
        self.threshold_1 = float('0.06')  #-0.0015
Пример #4
0
import trainer_main as tr
import utility as ut
import numpy as np
import ACOR
import trainer_reader as rdr

def map_multiple_response(training_set, tmap):
	training_map = dict()
	for (ques, ans) in training_set:
		training_map[ques] = training_map.get(ques, []) + [ans]
	mtrs = []
	for ques, answers in training_map.iteritems():
		tmap[answers[0]] = answers
		mtrs.append((ques, answers[0]))
	return mtrs

tmap = dict()
reader = rdr.trainer_reader('Data/training_ML.csv')
training_set = reader.load()

training_set = map_multiple_response(training_set, tmap)

print np.sort(training_set),'\n'

CM = tr.OptimizeLambda(training_set)
Optimization = ACOR.ACO(cFunc = CM.costFunction, nDim = 6, nAnt = 50, m = 20, q = 2.14, e = 3.5, limit = CM.getLimits())


Optimization.optimizeMin(500)

Пример #5
0
import trainer_main as tr
import utility as ut
import numpy as np
import ACOR
import trainer_reader as rdr

def map_multiple_response(training_set, tmap):
	training_map = dict()
	for (ques, ans) in training_set:
		training_map[ques] = training_map.get(ques, []) + [ans]
	mtrs = []
	for ques, answers in training_map.iteritems():
		tmap[answers[0]] = answers
		mtrs.append((ques, answers[0]))
	return mtrs

tmap = dict()
reader = rdr.trainer_reader('training_ML.csv')
training_set = reader.load()

training_set = map_multiple_response(training_set, tmap)

print np.sort(training_set),'\n'

CM = tr.OptimizeLambda(training_set)
Optimization = ACOR.ACO(cFunc = CM.costFunction, nDim = 6, nAnt = 50, m = 20, q = 2.14, e = 3.5, limit = CM.getLimits())


Optimization.optimizeMin(500)

Пример #6
0
import trainer_reader as rdr


def map_multiple_response(training_set, tmap):
    training_map = dict()
    for (ques, ans) in training_set:
        training_map[ques] = training_map.get(ques, []) + [ans]
    mtrs = []
    for ques, answers in training_map.iteritems():
        tmap[answers[0]] = answers
        mtrs.append((ques, answers[0]))
    return mtrs


tmap = dict()
reader = rdr.trainer_reader('Data/training_ML.csv')
training_set = reader.load()

training_set = map_multiple_response(training_set, tmap)

print np.sort(training_set), '\n'

CM = tr.OptimizeLambda(training_set)
Optimization = ACOR.ACO(cFunc=CM.costFunction,
                        nDim=6,
                        nAnt=50,
                        m=20,
                        q=2.14,
                        e=3.5,
                        limit=CM.getLimits())
Пример #7
0
		 .
		 .
		 .
		]
		.

	"""
	training_map = dict()
	for (ques, ans) in training_set:
		training_map[ques] = training_map.get(ques, []) + [ans]
	mtrs = []
	for ques, answers in training_map.iteritems():
		tmap[answers[0]] = answers
		mtrs.append((ques, answers[0]))
	return mtrs

tmap = dict()
reader = rdr.trainer_reader('training_data_2.csv')
training_set = reader.load()

training_set = map_multiple_response(training_set, tmap)

print np.sort(training_set)

CM = tr.OptimizeLambda(training_set)
Optimization = ACOR.ACO(cFunc = CM.costFunction, nDim = 7, nAnt = 50, m = 20, q = 2.14, e = 3.5, limit = CM.getLimits())


Optimization.optimizeMin(500)