def __init__(self, isYarpRunning = False, delta=False, context=2, n_mixtures=25, gmm_atts=None, inputImagePort="/visionDriver/image:o", openPorts=True): # -------------------------------------------------------------------------------# # Call parent class init if not isYarpRunningGlobal: # if I can't find Yarp, I'll overwrite the given flag isYarpRunning = False SAMDriver.__init__(self, isYarpRunning) # Extra stuff needed for the specific driver self.inputImagePort=inputImagePort self.delta = delta self.context = context self.n_mixtures = n_mixtures self.file_suffix=".wav" self.verbose_L=0 if not gmm_atts==None: self.gmm_data = gmm_atts else: self.gmm_data = {} self.participant_index = None if( isYarpRunning == True and openPorts == True): yarp.Network.init() self.createPorts() self.openPorts() self.createImageArrays()
def __init__(self): SAMDriver.__init__(self) self.data = dict() self.jointsList = [] self.objectsList = [] self.dataLogList = [] self.labelsLogList = [] # self.combinationList = [] # self.combinationKeys = [] self.humanStaticLabels = None self.featureInds = None self.featureSections = None self.featureValues = None self.numJoints = 9 self.labelToRemove = [] self.indsToRemove = [] self.actionsAllowed = None self.contactThreshold = None self.seqConf = None self.seqPerc = None self.joint = 0 self.action = 1 self.dataset = 2 self.start = 3 self.end = 4 self.additionalParametersList = ['Yall', 'Lall', 'Ytest', 'Ltest', 'numJoints', 'Ymean', 'Ystd', 'Y_normalized', 'ignoreStationary', 'humanStaticLabels', 'featureSections', 'featureValues', 'dataLogList', 'labelsLogList', 'segTrainConf', 'segTrainPerc', 'segTestConf', 'segTestPerc', 'seqConf', 'seqPerc']
def __init__(self): SAMDriver.__init__(self) self.data = dict() self.jointsList = [] self.objectsList = [] self.dataLogList = [] self.labelsLogList = [] # self.combinationList = [] # self.combinationKeys = [] self.humanStaticLabels = None self.featureInds = None self.featureSections = None self.featureValues = None self.numJoints = 9 self.labelToRemove = [] self.indsToRemove = [] self.actionsAllowed = None self.contactThreshold = None self.seqConf = None self.seqPerc = None self.joint = 0 self.action = 1 self.dataset = 2 self.start = 3 self.end = 4 self.additionalParametersList = [ 'Yall', 'Lall', 'Ytest', 'Ltest', 'numJoints', 'Ymean', 'Ystd', 'Y_normalized', 'ignoreStationary', 'humanStaticLabels', 'featureSections', 'featureValues', 'dataLogList', 'labelsLogList', 'segTrainConf', 'segTrainPerc', 'segTestConf', 'segTestPerc', 'seqConf', 'seqPerc' ]
def __init__(self): """ Initialise class using SAMDriver.__init__ and augment with custom parameters. additionalParameterList is a list of extra parameters to preserve between training and interaction. """ SAMDriver.__init__(self) self.additionalParametersList = [ 'imgH', 'imgW', 'imgHNew', 'imgWNew', 'image_suffix', 'pose_index', 'pose_selection' ]
def __init__(self): SAMDriver.__init__(self) self.data = dict() self.numJoints = 9 self.dataLogList = [] self.labelsLogList = [] self.itemsPerJoint = None self.featureSequence = None self.handsCombined = None self.data2Labels = None self.dataVec = None self.allDataDict = None self.listOfVectorsToClassify = None self.seqTestConf = None self.seqTestPerc = None self.additionalParametersList = ['listOfVectorsToClassify', 'handsCombined', 'featureSequence', 'itemsPerJoint', 'segTrainConf', 'segTrainPerc', 'segTestConf', 'segTestPerc', 'seqTestConf', 'seqTestPerc']
def prepareData(self, model='mrd', Ntr = 50, pose_selection = 0): #""--- Now Y has 4 dimensions: #1. Pixels #2. Images #3. Person #4. Movement (Static. up/down. left / right) # #We can prepare the face data using different scenarios about what to be perceived. #In each scenario, a different LFM is used. We have: #- gp scenario, where we regress from images to labels (inputs are images, outputs are labels) #- bgplvm scenario, where we are only perceiving images as outputs (no inputs, no labels) #- mrd scenario, where we have no inputs, but images and labels form two different views of the output space. # #The store module of the LFM automatically sees the structure of the assumed perceived data and #decides on the LFM backbone to be used. # #! Important: The global variable Y is changed in this section. From the multi-dim. matrix of all #modalities, it turns into the training matrix of image data and then again it turns into the #dictionary used for the LFM. #---""" # Take all poses if pose selection ==-1 if pose_selection == -1: ttt=numpy.transpose(self.Y,(0,1,3,2)) ttt=ttt.reshape((ttt.shape[0],ttt.shape[1]*ttt.shape[2],ttt.shape[3])) else: ttt=self.Y[:,:,:,pose_selection] ttt=numpy.transpose(ttt,(0,2,1)) self.Y=ttt.reshape(ttt.shape[0],ttt.shape[2]*ttt.shape[1]) self.Y=self.Y.T #N=self.Y.shape[0] if pose_selection == -1: ttt=numpy.transpose(self.L,(0,1,3,2)) ttt=ttt.reshape((ttt.shape[0],ttt.shape[1]*ttt.shape[2],ttt.shape[3])) else: ttt=self.L[:,:,:,pose_selection] ttt=numpy.transpose(ttt,(0,2,1)) self.L=ttt.reshape(ttt.shape[0],ttt.shape[2]*ttt.shape[1]) self.L=self.L.T self.L=self.L[:,:1] SAMDriver.prepareData(self, model, Ntr)
def __init__(self, isYarpRunning = False, imgH = 200, imgW = 200, imgHNew = 200, imgWNew = 200, inputImagePort="/visionDriver/image:o"): # Call parent class init SAMDriver.__init__(self, isYarpRunning) # Extra stuff needed for the specific driver self.inputImagePort=inputImagePort self.imgHeight = imgH self.imgWidth = imgW self.imgHeightNew = imgHNew self.imgWidthNew = imgWNew self.image_suffix=".ppm" self.participant_index = None if( isYarpRunning == True ): yarp.Network.init() self.createPorts() self.openPorts() self.createImageArrays()
def __init__(self): SAMDriver.__init__(self) self.data = dict() self.numJoints = 9 self.dataLogList = [] self.labelsLogList = [] self.itemsPerJoint = None self.featureSequence = None self.handsCombined = None self.data2Labels = None self.dataVec = None self.allDataDict = None self.listOfVectorsToClassify = None self.seqTestConf = None self.seqTestPerc = None self.additionalParametersList = [ 'listOfVectorsToClassify', 'handsCombined', 'featureSequence', 'itemsPerJoint', 'segTrainConf', 'segTrainPerc', 'segTestConf', 'segTestPerc', 'seqTestConf', 'seqTestPerc' ]
def __init__(self): """ Initialise class using SAMDriver.__init__ and augment with custom parameters. additionalParameterList is a list of extra parameters to preserve between training and interaction. """ SAMDriver.__init__(self) self.data = dict() self.numJoints = 9 self.dataLogList = [] self.labelsLogList = [] self.itemsPerJoint = None self.featureSequence = None self.handsCombined = None self.data2Labels = None self.dataVec = None self.allDataDict = None self.listOfVectorsToClassify = None self.seqTestConf = None self.seqTestPerc = None self.additionalParametersList = ['listOfVectorsToClassify', 'handsCombined', 'featureSequence', 'itemsPerJoint', 'segTrainConf', 'segTrainPerc', 'segTestConf', 'segTestPerc', 'seqTestConf', 'seqTestPerc']
def __init__(self, isYarpRunning = False, imgH = 200, imgW = 200, imgHNew = 200, imgWNew = 200, inputImagePort="/visionDriver/image:o", openPorts=True): # Call parent class init if not isYarpRunningGlobal: # if I can't find Yarp, I'll overwrite the given flag isYarpRunning = False SAMDriver.__init__(self, isYarpRunning) # Extra stuff needed for the specific driver self.inputImagePort=inputImagePort self.imgHeight = imgH self.imgWidth = imgW self.imgHeightNew = imgHNew self.imgWidthNew = imgWNew self.image_suffix=".ppm" self.participant_index = None if( isYarpRunning == True and openPorts == True): yarp.Network.init() self.createPorts() self.openPorts() self.createImageArrays()
def __init__(self, isYarpRunning=False, delta=False, context=2, n_mixtures=25, gmm_atts=None, inputImagePort="/visionDriver/image:o", openPorts=True): # -------------------------------------------------------------------------------# # Call parent class init if not isYarpRunningGlobal: # if I can't find Yarp, I'll overwrite the given flag isYarpRunning = False SAMDriver.__init__(self, isYarpRunning) # Extra stuff needed for the specific driver self.inputImagePort = inputImagePort self.delta = delta self.context = context self.n_mixtures = n_mixtures self.file_suffix = ".wav" self.verbose_L = 0 if not gmm_atts == None: self.gmm_data = gmm_atts else: self.gmm_data = {} self.participant_index = None if (isYarpRunning == True and openPorts == True): yarp.Network.init() self.createPorts() self.openPorts() self.createImageArrays()
def prepareData(self, model='mrd', Ntr=50, pose_selection=0, randSeed=0): #""--- Now Y has 4 dimensions: #1. Pixels #2. Images #3. Person #4. Movement (Static. up/down. left / right) # #We can prepare the face data using different scenarios about what to be perceived. #In each scenario, a different LFM is used. We have: #- gp scenario, where we regress from images to labels (inputs are images, outputs are labels) #- bgplvm scenario, where we are only perceiving images as outputs (no inputs, no labels) #- mrd scenario, where we have no inputs, but images and labels form two different views of the output space. # #The store module of the LFM automatically sees the structure of the assumed perceived data and #decides on the LFM backbone to be used. # #! Important: The global variable Y is changed in this section. From the multi-dim. matrix of all #modalities, it turns into the training matrix of image data and then again it turns into the #dictionary used for the LFM. #---""" # -------------------------------------Change------------------------------------------# # Take all poses if pose selection ==-1 if pose_selection == -1: ttt = numpy.transpose(self.Y, (0, 1, 3, 2)) ttt = ttt.reshape( (ttt.shape[0], ttt.shape[1] * ttt.shape[2], ttt.shape[3])) else: ttt = self.Y[:, :, :, pose_selection] ttt = numpy.transpose(ttt, (0, 2, 1)) self.Y = ttt.reshape(ttt.shape[0], ttt.shape[2] * ttt.shape[1]) self.Y = self.Y.T #N=self.Y.shape[0] if pose_selection == -1: ttt = numpy.transpose(self.L, (0, 1, 3, 2)) ttt = ttt.reshape( (ttt.shape[0], ttt.shape[1] * ttt.shape[2], ttt.shape[3])) else: ttt = self.L[:, :, :, pose_selection] ttt = numpy.transpose(ttt, (0, 2, 1)) self.L = ttt.reshape(ttt.shape[0], ttt.shape[2] * ttt.shape[1]) self.L = self.L.T self.L = self.L[:, :1] ret = SAMDriver.prepareData(self, model, Ntr, randSeed=randSeed) return ret
def initialiseModels(argv, update, initMode='training'): # argv[1] = dataPath # argv[2] = modelPath # argv[3] = driverName # update = 'update' or 'new' from SAM.SAM_Core import SAMDriver as Driver dataPath = argv[0] modelPath = argv[1] driverName = argv[2] logging.info(argv) stringCommand = 'from SAM.SAM_Drivers import ' + driverName + ' as Driver' logging.info(stringCommand) exec stringCommand mySAMpy = Driver() mode = update trainName = dataPath.split('/')[-1] # participantList is extracted from number of subdirectories of dataPath participantList = [ f for f in listdir(dataPath) if isdir(join(dataPath, f)) ] off = 17 logging.info('-------------------') logging.info('Training Settings:') logging.info('') logging.info('Init mode: '.ljust(off) + str(initMode)) logging.info('Data Path: '.ljust(off) + str(dataPath)) logging.info('Model Path: '.ljust(off) + str(modelPath)) logging.info('Participants: '.ljust(off) + str(participantList)) logging.info('Model Root Name: '.ljust(off) + str(trainName)) logging.info('Training Mode:'.ljust(off) + str(mode)) logging.info('Driver:'.ljust(off) + str(driverName)) logging.info('-------------------') logging.info('Loading Parameters...') logging.info('') temporalFlag = False modeConfig = '' found = '' try: parser = SafeConfigParser() found = parser.read(dataPath + "/config.ini") if parser.has_option(trainName, 'update_mode'): modeConfig = parser.get(trainName, 'update_mode') else: modeConfig = 'update' logging.info(modeConfig) except IOError: pass defaultParamsList = [ 'experiment_number', 'model_type', 'model_num_inducing', 'model_num_iterations', 'model_init_iterations', 'verbose', 'Quser', 'kernelString', 'ratioData', 'update_mode', 'model_mode', 'temporalModelWindowSize', 'optimiseRecall', 'classificationDict', 'useMaxDistance', 'calibrateUnknown' ] mySAMpy.experiment_number = None mySAMpy.model_type = None mySAMpy.kernelString = None mySAMpy.fname = None mySAMpy.ratioData = None if initMode == 'training' and (mode == 'new' or modeConfig == 'new' or 'exp' not in modelPath): logging.info('Loading training parameters from:' + str(dataPath) + "/config.ini") try: default = False parser = SafeConfigParser() parser.optionxform = str found = parser.read(dataPath + "/config.ini") mySAMpy.experiment_number = 'exp' # load parameters from config file # if parser.has_option(trainName, 'experiment_number'): # mySAMpy.experiment_number = int(parser.get(trainName, 'experiment_number')) # elif '.pickle' in modelPath: # mySAMpy.experiment_number = int(modelPath.split('__')[-2].replace('exp', '')) + 1 # else: if parser.has_option(trainName, 'model_type'): mySAMpy.model_type = parser.get(trainName, 'model_type') else: default = True mySAMpy.model_type = 'mrd' if parser.has_option(trainName, 'model_num_inducing'): mySAMpy.model_num_inducing = int( parser.get(trainName, 'model_num_inducing')) else: default = True mySAMpy.model_num_inducing = 30 if parser.has_option(trainName, 'model_num_iterations'): mySAMpy.model_num_iterations = int( parser.get(trainName, 'model_num_iterations')) else: default = True mySAMpy.model_num_iterations = 700 if parser.has_option(trainName, 'model_init_iterations'): mySAMpy.model_init_iterations = int( parser.get(trainName, 'model_init_iterations')) else: default = True mySAMpy.model_init_iterations = 2000 if parser.has_option(trainName, 'verbose'): mySAMpy.verbose = parser.get(trainName, 'verbose') == 'True' else: default = True mySAMpy.verbose = False if parser.has_option(trainName, 'optimiseRecall'): mySAMpy.optimiseRecall = int( parser.get(trainName, 'optimiseRecall')) else: default = True mySAMpy.optimiseRecall = 200 if parser.has_option(trainName, 'useMaxDistance'): mySAMpy.useMaxDistance = parser.get(trainName, 'useMaxDistance') == 'True' else: mySAMpy.useMaxDistance = False if parser.has_option(trainName, 'calibrateUnknown'): mySAMpy.calibrateUnknown = parser.get( trainName, 'calibrateUnknown') == 'True' else: mySAMpy.calibrateUnknown = False if parser.has_option(trainName, 'model_mode'): mySAMpy.model_mode = parser.get(trainName, 'model_mode') if mySAMpy.model_mode == 'temporal' and parser.has_option( trainName, 'temporalModelWindowSize'): mySAMpy.temporalWindowSize = int( parser.get(trainName, 'temporalModelWindowSize')) else: temporalFlag = True else: default = True mySAMpy.model_mode = 'single' if parser.has_option(trainName, 'Quser'): mySAMpy.Quser = int(parser.get(trainName, 'Quser')) else: default = True mySAMpy.Quser = 2 if parser.has_option(trainName, 'kernelString'): mySAMpy.kernelString = parser.get(trainName, 'kernelString') else: default = True mySAMpy.kernelString = "GPy.kern.RBF(Q, ARD=False) + GPy.kern.Bias(Q) + GPy.kern.White(Q)" if parser.has_option(trainName, 'ratioData'): mySAMpy.ratioData = int(parser.get(trainName, 'ratioData')) else: default = True mySAMpy.ratioData = 50 if default: logging.info('Default settings applied') mySAMpy.paramsDict = dict() mySAMpy.loadParameters(parser, trainName) except IOError: logging.warning('IO Exception reading ', found) pass else: logging.info('Loading parameters from: \n \t' + str(modelPath)) try: parser = SafeConfigParser() parser.optionxform = str found = parser.read(dataPath + "/config.ini") # load parameters from config file mySAMpy.experiment_number = modelPath.split('__')[-1] modelPickle = pickle.load(open(modelPath + '.pickle', 'rb')) mySAMpy.paramsDict = dict() for j in parser.options(trainName): if j not in defaultParamsList: logging.info(str(j)) mySAMpy.paramsDict[j] = modelPickle[j] mySAMpy.ratioData = modelPickle['ratioData'] mySAMpy.model_type = modelPickle['model_type'] mySAMpy.model_mode = modelPickle['model_mode'] if mySAMpy.model_mode == 'temporal': mySAMpy.temporalModelWindowSize = modelPickle[ 'temporalModelWindowSize'] mySAMpy.model_type = 'mrd' mySAMpy.model_num_inducing = modelPickle['model_num_inducing'] mySAMpy.model_num_iterations = modelPickle['model_num_iterations'] mySAMpy.model_init_iterations = modelPickle[ 'model_init_iterations'] mySAMpy.verbose = modelPickle['verbose'] mySAMpy.Quser = modelPickle['Quser'] mySAMpy.optimiseRecall = modelPickle['optimiseRecall'] mySAMpy.kernelString = modelPickle['kernelString'] mySAMpy.calibrated = modelPickle['calibrated'] # try loading classification parameters for multiple model implementation try: mySAMpy.useMaxDistance = modelPickle['useMaxDistance'] except: logging.warning( 'Failed to load useMaxDistace. Possible reasons: ' 'Not saved or multiple model implementation') mySAMpy.calibrateUnknown = modelPickle['calibrateUnknown'] if mySAMpy.calibrateUnknown: mySAMpy.classificationDict = modelPickle['classificationDict'] # try: # mySAMpy.listOfModels = modelPickle['listOfModels'] # mySAMpy.classifiers = modelPickle['classifiers'] # mySAMpy.classif_thresh = modelPickle['classif_thresh'] # mulClassLoadFail = False # logging.info('Successfully loaded multiple model classifiers') # except: # mulClassLoadFail = True # logging.info('Failed to load multiple model classifiers') # pass # # # try loading classification parameters for single model implementation # try: # mySAMpy.varianceDirection = modelPickle['varianceDirection'] # mySAMpy.varianceThreshold = modelPickle['varianceThreshold'] # mySAMpy.bestDistanceIDX = modelPickle['bestDistanceIDX'] # logging.info('Successfully loaded single model classifiers') # singClassLoadFail = False # except: # singClassLoadFail = True # logging.info('Failed to load single model classifiers') # pass # if mulClassLoadFail and singClassLoadFail: # raise ValueError('Failed to load model classifiers') except IOError: logging.warning('IO Exception reading ', found) pass if 'exp' in modelPath or 'best' in modelPath or 'backup' in modelPath: fnameProto = '/'.join(modelPath.split('/')[:-1]) + '/' + dataPath.split('/')[-1] + '__' + driverName + \ '__' + mySAMpy.model_type + '__' + str(mySAMpy.experiment_number) else: fnameProto = modelPath + dataPath.split('/')[-1] + '__' + driverName + '__' + mySAMpy.model_type + \ '__' + str(mySAMpy.experiment_number) logging.info('Full model name: ' + str(fnameProto)) logging.info('-------------------') logging.info('') mySAMpy.save_model = False mySAMpy.economy_save = True mySAMpy.visualise_output = False # test_mode = True mySAMpy.readData(dataPath, participantList) # at this point, all the data that will be eventually used for training is contained in mySAMpy.Y # and mySAMpy.L contains all labels if any (depending on mrd model or bgplvm model) # mySAMpy.L is a list of labels while mySAMpy.Y is a numpy array of data # mySAMpy.Y should have 2 dimensions, length of dimension 0 = number of instances # length of dimension 1 = length of feature vector if mySAMpy.model_mode != 'temporal': # get list of labels mySAMpy.textLabels = list(set(mySAMpy.L)) # convert L from list of strings to array of indices mySAMpy.L = np.asarray( [mySAMpy.textLabels.index(i) for i in mySAMpy.L])[:, None] mySAMpy.textLabels = mySAMpy.textLabels else: mySAMpy.X, mySAMpy.Y = transformTimeSeriesToSeq( mySAMpy.Y1, mySAMpy.temporalModelWindowSize) mySAMpy.L, mySAMpy.tmp = transformTimeSeriesToSeq( mySAMpy.U1, mySAMpy.temporalModelWindowSize) mm = [mySAMpy] # mm.append(mySAMpy) # mm[0] contains root model # this is the only model in the case of a single model # or contains all info for the rest of the models in case of multiple models # if mySAMpy.model_mode == 'single' or mySAMpy.model_mode == 'temporal': mm[0].participantList = ['all'] else: mm[0].participantList = ['root'] + mySAMpy.textLabels for k in range(len(mm[0].participantList)): if mm[0].participantList[k] == 'all': normaliseData = True minData = len(mm[k].L) mm[0].fname = fnameProto mm[0].model_type = mySAMpy.model_type Ntr = int(mySAMpy.ratioData * minData / 100) else: if k > 0: mm.append(Driver()) # extract subset of data corresponding to this model inds = [ i for i in range(len(mm[0].Y['L'])) if mm[0].Y['L'][i] == k - 1 ] mm[k].Y = mm[0].Y['Y'][inds] mm[k].L = mm[0].Y['L'][inds] mm[k].Quser = mm[0].Quser mm[k].verbose = mm[0].verbose logging.info('Object class: ' + str(mm[0].participantList[k])) minData = len(inds) mm[k].fname = fnameProto + '__L' + str(k - 1) mm[0].listOfModels.append(mm[k].fname) mm[k].model_type = 'bgplvm' Ntr = int(mySAMpy.ratioData * minData / 100) normaliseData = True else: normaliseData = False mm[0].listOfModels = [] mm[0].fname = fnameProto mm[0].SAMObject.kernelString = '' minData = len(mm[0].L) Ntr = int(mySAMpy.ratioData * minData / 100) mm[k].modelLabel = mm[0].participantList[k] if mm[0].model_mode != 'temporal': [Yall, Lall, YtestAll, LtestAll] = mm[k].prepareData(mm[k].model_type, Ntr, randSeed=0, normalise=normaliseData) mm[k].Yall = Yall mm[k].Lall = Lall mm[k].YtestAll = YtestAll mm[k].LtestAll = LtestAll elif mm[0].model_mode == 'temporal': [Xall, Yall, Lall, XtestAll, YtestAll, LtestAll] = mm[k].prepareData(mm[k].model_type, Ntr, randSeed=0, normalise=normaliseData) mm[k].Xall = Xall mm[k].Yall = Yall mm[k].Lall = Lall mm[k].XtestAll = XtestAll mm[k].YtestAll = YtestAll mm[k].LtestAll = LtestAll logging.info('minData = ' + str(minData)) logging.info('ratioData = ' + str(mySAMpy.ratioData)) logging.info( '-------------------------------------------------------------------------------------------------' ) if initMode == 'training': samOptimiser.deleteModel(modelPath, 'exp') for k in range(len(mm[0].participantList)): # for k = 0 check if multiple model or not if mm[0].participantList[k] != 'root': logging.info("Training with " + str(mm[0].model_num_inducing) + ' inducing points for ' + str(mm[0].model_init_iterations) + '|' + str(mm[0].model_num_iterations)) logging.info("Fname:" + str(mm[k].fname)) mm[k].training(mm[0].model_num_inducing, mm[0].model_num_iterations, mm[0].model_init_iterations, mm[k].fname, mm[0].save_model, mm[0].economy_save, keepIfPresent=False, kernelStr=mm[0].kernelString) if mm[0].visualise_output: ax = mm[k].SAMObject.visualise() visualiseInfo = dict() visualiseInfo['ax'] = ax else: visualiseInfo = None else: for k in range(len(mm[0].participantList)): # for k = 0 check if multiple model or not if mm[0].participantList[k] != 'root': logging.info("Training with " + str(mm[0].model_num_inducing) + ' inducing points for ' + str(mm[0].model_init_iterations) + '|' + str(mm[0].model_num_iterations)) mm[k].training(mm[0].model_num_inducing, mm[0].model_num_iterations, mm[0].model_init_iterations, mm[k].fname, mm[0].save_model, mm[0].economy_save, keepIfPresent=True, kernelStr=mm[0].kernelString) return mm
def saveParameters(self): """ Executes SAMDriver.saveParameters to save default parameters. """ SAMDriver.saveParameters(self)
def __init__(self): SAMDriver.__init__(self) self.additionalParametersList = ['imgH', 'imgW', 'imgHNew', 'imgWNew', 'image_suffix', 'pose_index', 'pose_selection']
def initialiseModels(argv, update, initMode='training'): # argv[1] = dataPath # argv[2] = modelPath # argv[3] = driverName # update = 'update' or 'new' from SAM.SAM_Core import SAMDriver as Driver dataPath = argv[0] modelPath = argv[1] driverName = argv[2] logging.info(argv) stringCommand = 'from SAM.SAM_Drivers import ' + driverName + ' as Driver' logging.info(stringCommand) exec stringCommand mySAMpy = Driver() mode = update trainName = dataPath.split('/')[-1] # participantList is extracted from number of subdirectories of dataPath participantList = [f for f in listdir(dataPath) if isdir(join(dataPath, f))] off = 17 logging.info('-------------------') logging.info('Training Settings:') logging.info('') logging.info('Init mode: '.ljust(off) + str(initMode)) logging.info('Data Path: '.ljust(off) + str(dataPath)) logging.info('Model Path: '.ljust(off) + str(modelPath)) logging.info('Participants: '.ljust(off) + str(participantList)) logging.info('Model Root Name: '.ljust(off) + str(trainName)) logging.info('Training Mode:'.ljust(off) + str(mode)) logging.info('Driver:'.ljust(off) + str(driverName)) logging.info('-------------------') logging.info('Loading Parameters...') logging.info('') temporalFlag = False modeConfig = '' found = '' try: parser = SafeConfigParser() found = parser.read(dataPath + "/config.ini") if parser.has_option(trainName, 'update_mode'): modeConfig = parser.get(trainName, 'update_mode') else: modeConfig = 'update' logging.info(modeConfig) except IOError: pass defaultParamsList = ['experiment_number', 'model_type', 'model_num_inducing', 'model_num_iterations', 'model_init_iterations', 'verbose', 'Quser', 'kernelString', 'ratioData', 'update_mode', 'model_mode', 'temporalModelWindowSize', 'optimiseRecall', 'classificationDict', 'useMaxDistance', 'calibrateUnknown'] mySAMpy.experiment_number = None mySAMpy.model_type = None mySAMpy.kernelString = None mySAMpy.fname = None mySAMpy.ratioData = None if initMode == 'training' and (mode == 'new' or modeConfig == 'new' or 'exp' not in modelPath): logging.info('Loading training parameters from:' + str(dataPath) + "/config.ini") try: default = False parser = SafeConfigParser() parser.optionxform = str found = parser.read(dataPath + "/config.ini") mySAMpy.experiment_number = 'exp' # load parameters from config file # if parser.has_option(trainName, 'experiment_number'): # mySAMpy.experiment_number = int(parser.get(trainName, 'experiment_number')) # elif '.pickle' in modelPath: # mySAMpy.experiment_number = int(modelPath.split('__')[-2].replace('exp', '')) + 1 # else: if parser.has_option(trainName, 'model_type'): mySAMpy.model_type = parser.get(trainName, 'model_type') else: default = True mySAMpy.model_type = 'mrd' if parser.has_option(trainName, 'model_num_inducing'): mySAMpy.model_num_inducing = int(parser.get(trainName, 'model_num_inducing')) else: default = True mySAMpy.model_num_inducing = 30 if parser.has_option(trainName, 'model_num_iterations'): mySAMpy.model_num_iterations = int(parser.get(trainName, 'model_num_iterations')) else: default = True mySAMpy.model_num_iterations = 700 if parser.has_option(trainName, 'model_init_iterations'): mySAMpy.model_init_iterations = int(parser.get(trainName, 'model_init_iterations')) else: default = True mySAMpy.model_init_iterations = 2000 if parser.has_option(trainName, 'verbose'): mySAMpy.verbose = parser.get(trainName, 'verbose') == 'True' else: default = True mySAMpy.verbose = False if parser.has_option(trainName, 'optimiseRecall'): mySAMpy.optimiseRecall = int(parser.get(trainName, 'optimiseRecall')) else: default = True mySAMpy.optimiseRecall = 200 if parser.has_option(trainName, 'useMaxDistance'): mySAMpy.useMaxDistance = parser.get(trainName, 'useMaxDistance') == 'True' else: mySAMpy.useMaxDistance = False if parser.has_option(trainName, 'calibrateUnknown'): mySAMpy.calibrateUnknown = parser.get(trainName, 'calibrateUnknown') == 'True' else: mySAMpy.calibrateUnknown = False if parser.has_option(trainName, 'model_mode'): mySAMpy.model_mode = parser.get(trainName, 'model_mode') if mySAMpy.model_mode == 'temporal' and parser.has_option(trainName, 'temporalModelWindowSize'): mySAMpy.temporalWindowSize = int(parser.get(trainName, 'temporalModelWindowSize')) else: temporalFlag = True else: default = True mySAMpy.model_mode = 'single' if parser.has_option(trainName, 'Quser'): mySAMpy.Quser = int(parser.get(trainName, 'Quser')) else: default = True mySAMpy.Quser = 2 if parser.has_option(trainName, 'kernelString'): mySAMpy.kernelString = parser.get(trainName, 'kernelString') else: default = True mySAMpy.kernelString = "GPy.kern.RBF(Q, ARD=False) + GPy.kern.Bias(Q) + GPy.kern.White(Q)" if parser.has_option(trainName, 'ratioData'): mySAMpy.ratioData = int(parser.get(trainName, 'ratioData')) else: default = True mySAMpy.ratioData = 50 if default: logging.info('Default settings applied') mySAMpy.paramsDict = dict() mySAMpy.loadParameters(parser, trainName) except IOError: logging.warning('IO Exception reading ', found) pass else: logging.info('Loading parameters from: \n \t' + str(modelPath)) try: parser = SafeConfigParser() parser.optionxform = str found = parser.read(dataPath + "/config.ini") # load parameters from config file mySAMpy.experiment_number = modelPath.split('__')[-1] modelPickle = pickle.load(open(modelPath+'.pickle', 'rb')) mySAMpy.paramsDict = dict() for j in parser.options(trainName): if j not in defaultParamsList: logging.info(str(j)) mySAMpy.paramsDict[j] = modelPickle[j] mySAMpy.ratioData = modelPickle['ratioData'] mySAMpy.model_type = modelPickle['model_type'] mySAMpy.model_mode = modelPickle['model_mode'] if mySAMpy.model_mode == 'temporal': mySAMpy.temporalModelWindowSize = modelPickle['temporalModelWindowSize'] mySAMpy.model_type = 'mrd' mySAMpy.model_num_inducing = modelPickle['model_num_inducing'] mySAMpy.model_num_iterations = modelPickle['model_num_iterations'] mySAMpy.model_init_iterations = modelPickle['model_init_iterations'] mySAMpy.verbose = modelPickle['verbose'] mySAMpy.Quser = modelPickle['Quser'] mySAMpy.optimiseRecall = modelPickle['optimiseRecall'] mySAMpy.kernelString = modelPickle['kernelString'] mySAMpy.calibrated = modelPickle['calibrated'] # try loading classification parameters for multiple model implementation try: mySAMpy.useMaxDistance = modelPickle['useMaxDistance'] except: logging.warning('Failed to load useMaxDistace. Possible reasons: ' 'Not saved or multiple model implementation') mySAMpy.calibrateUnknown = modelPickle['calibrateUnknown'] if mySAMpy.calibrateUnknown: mySAMpy.classificationDict = modelPickle['classificationDict'] # try: # mySAMpy.listOfModels = modelPickle['listOfModels'] # mySAMpy.classifiers = modelPickle['classifiers'] # mySAMpy.classif_thresh = modelPickle['classif_thresh'] # mulClassLoadFail = False # logging.info('Successfully loaded multiple model classifiers') # except: # mulClassLoadFail = True # logging.info('Failed to load multiple model classifiers') # pass # # # try loading classification parameters for single model implementation # try: # mySAMpy.varianceDirection = modelPickle['varianceDirection'] # mySAMpy.varianceThreshold = modelPickle['varianceThreshold'] # mySAMpy.bestDistanceIDX = modelPickle['bestDistanceIDX'] # logging.info('Successfully loaded single model classifiers') # singClassLoadFail = False # except: # singClassLoadFail = True # logging.info('Failed to load single model classifiers') # pass # if mulClassLoadFail and singClassLoadFail: # raise ValueError('Failed to load model classifiers') except IOError: logging.warning('IO Exception reading ', found) pass if 'exp' in modelPath or 'best' in modelPath or 'backup' in modelPath: fnameProto = '/'.join(modelPath.split('/')[:-1]) + '/' + dataPath.split('/')[-1] + '__' + driverName + \ '__' + mySAMpy.model_type + '__' + str(mySAMpy.experiment_number) else: fnameProto = modelPath + dataPath.split('/')[-1] + '__' + driverName + '__' + mySAMpy.model_type + \ '__' + str(mySAMpy.experiment_number) logging.info('Full model name: ' + str(fnameProto)) logging.info('-------------------') logging.info('') mySAMpy.save_model = False mySAMpy.economy_save = True mySAMpy.visualise_output = False # test_mode = True mySAMpy.readData(dataPath, participantList) # at this point, all the data that will be eventually used for training is contained in mySAMpy.Y # and mySAMpy.L contains all labels if any (depending on mrd model or bgplvm model) # mySAMpy.L is a list of labels while mySAMpy.Y is a numpy array of data # mySAMpy.Y should have 2 dimensions, length of dimension 0 = number of instances # length of dimension 1 = length of feature vector if mySAMpy.model_mode != 'temporal': # get list of labels mySAMpy.textLabels = list(set(mySAMpy.L)) # convert L from list of strings to array of indices mySAMpy.L = np.asarray([mySAMpy.textLabels.index(i) for i in mySAMpy.L])[:, None] mySAMpy.textLabels = mySAMpy.textLabels else: mySAMpy.X, mySAMpy.Y = transformTimeSeriesToSeq(mySAMpy.Y1, mySAMpy.temporalModelWindowSize) mySAMpy.L, mySAMpy.tmp = transformTimeSeriesToSeq(mySAMpy.U1, mySAMpy.temporalModelWindowSize) mm = [mySAMpy] # mm.append(mySAMpy) # mm[0] contains root model # this is the only model in the case of a single model # or contains all info for the rest of the models in case of multiple models # if mySAMpy.model_mode == 'single' or mySAMpy.model_mode == 'temporal': mm[0].participantList = ['all'] else: mm[0].participantList = ['root'] + mySAMpy.textLabels for k in range(len(mm[0].participantList)): if mm[0].participantList[k] == 'all': normaliseData = True minData = len(mm[k].L) mm[0].fname = fnameProto mm[0].model_type = mySAMpy.model_type Ntr = int(mySAMpy.ratioData * minData / 100) else: if k > 0: mm.append(Driver()) # extract subset of data corresponding to this model inds = [i for i in range(len(mm[0].Y['L'])) if mm[0].Y['L'][i] == k - 1] mm[k].Y = mm[0].Y['Y'][inds] mm[k].L = mm[0].Y['L'][inds] mm[k].Quser = mm[0].Quser mm[k].verbose = mm[0].verbose logging.info('Object class: ' + str(mm[0].participantList[k])) minData = len(inds) mm[k].fname = fnameProto + '__L' + str(k - 1) mm[0].listOfModels.append(mm[k].fname) mm[k].model_type = 'bgplvm' Ntr = int(mySAMpy.ratioData * minData / 100) normaliseData = True else: normaliseData = False mm[0].listOfModels = [] mm[0].fname = fnameProto mm[0].SAMObject.kernelString = '' minData = len(mm[0].L) Ntr = int(mySAMpy.ratioData * minData / 100) mm[k].modelLabel = mm[0].participantList[k] if mm[0].model_mode != 'temporal': [Yall, Lall, YtestAll, LtestAll] = mm[k].prepareData(mm[k].model_type, Ntr, randSeed=0, normalise=normaliseData) mm[k].Yall = Yall mm[k].Lall = Lall mm[k].YtestAll = YtestAll mm[k].LtestAll = LtestAll elif mm[0].model_mode == 'temporal': [Xall, Yall, Lall, XtestAll, YtestAll, LtestAll] = mm[k].prepareData(mm[k].model_type, Ntr, randSeed=0, normalise=normaliseData) mm[k].Xall = Xall mm[k].Yall = Yall mm[k].Lall = Lall mm[k].XtestAll = XtestAll mm[k].YtestAll = YtestAll mm[k].LtestAll = LtestAll logging.info('minData = ' + str(minData)) logging.info('ratioData = ' + str(mySAMpy.ratioData)) logging.info('-------------------------------------------------------------------------------------------------') if initMode == 'training': samOptimiser.deleteModel(modelPath, 'exp') for k in range(len(mm[0].participantList)): # for k = 0 check if multiple model or not if mm[0].participantList[k] != 'root': logging.info("Training with " + str(mm[0].model_num_inducing) + ' inducing points for ' + str(mm[0].model_init_iterations) + '|' + str(mm[0].model_num_iterations)) logging.info("Fname:" + str(mm[k].fname)) mm[k].training(mm[0].model_num_inducing, mm[0].model_num_iterations, mm[0].model_init_iterations, mm[k].fname, mm[0].save_model, mm[0].economy_save, keepIfPresent=False, kernelStr=mm[0].kernelString) if mm[0].visualise_output: ax = mm[k].SAMObject.visualise() visualiseInfo = dict() visualiseInfo['ax'] = ax else: visualiseInfo = None else: for k in range(len(mm[0].participantList)): # for k = 0 check if multiple model or not if mm[0].participantList[k] != 'root': logging.info("Training with " + str(mm[0].model_num_inducing) + ' inducing points for ' + str(mm[0].model_init_iterations) + '|' + str(mm[0].model_num_iterations)) mm[k].training(mm[0].model_num_inducing, mm[0].model_num_iterations, mm[0].model_init_iterations, mm[k].fname, mm[0].save_model, mm[0].economy_save, keepIfPresent=True, kernelStr=mm[0].kernelString) return mm
def initialiseModels(argv, update, initMode='training'): # argv[1] = dataPath # argv[2] = modelPath # argv[3] = driverName # update = 'update' or 'new' from SAM.SAM_Core import SAMDriver as Driver dataPath = argv[0] modelPath = argv[1] driverName = argv[2] print argv stringCommand = 'from SAM.SAM_Drivers import ' + driverName + ' as Driver' print stringCommand exec stringCommand mySAMpy = Driver() mode = update trainName = dataPath.split('/')[-1] # participantList is extracted from number of subdirectories of dataPath participantList = [ f for f in listdir(dataPath) if isdir(join(dataPath, f)) ] off = 17 print '-------------------' print 'Training Settings:' print print 'Data Path: '.ljust(off), dataPath print 'Model Path: '.ljust(off), modelPath print 'Participants: '.ljust(off), participantList print 'Model Root Name: '.ljust(off), trainName print 'Training Mode:'.ljust(off), mode print 'Driver:'.ljust(off), driverName print '-------------------' print 'Loading Parameters...' print temporalFlag = False modeConfig = '' found = '' try: parser = SafeConfigParser() found = parser.read(dataPath + "/config.ini") if parser.has_option(trainName, 'update_mode'): modeConfig = parser.get(trainName, 'update_mode') else: modeConfig = 'update' print modeConfig except IOError: pass defaultParamsList = [ 'experiment_number', 'model_type', 'model_num_inducing', 'model_num_iterations', 'model_init_iterations', 'verbose', 'Quser', 'kernelString', 'ratioData', 'update_mode', 'model_mode', 'windowSize' ] mySAMpy.experiment_number = None mySAMpy.model_type = None mySAMpy.kernelString = None mySAMpy.fname = None mySAMpy.ratioData = None if initMode == 'training' and (mode == 'new' or modeConfig == 'new' or 'exp' not in modelPath): print 'Loading training parameters from: \n ', '\t' + dataPath + "/config.ini" try: default = False parser = SafeConfigParser() parser.optionxform = str found = parser.read(dataPath + "/config.ini") # load parameters from config file if parser.has_option(trainName, 'experiment_number'): mySAMpy.experiment_number = int( parser.get(trainName, 'experiment_number')) elif '.pickle' in modelPath: mySAMpy.experiment_number = int( modelPath.split('__')[-2].replace('exp', '')) + 1 else: fail = True print 'No experiment_number found' if parser.has_option(trainName, 'model_type'): mySAMpy.model_type = parser.get(trainName, 'model_type') else: default = True mySAMpy.model_type = 'mrd' if parser.has_option(trainName, 'model_num_inducing'): mySAMpy.model_num_inducing = int( parser.get(trainName, 'model_num_inducing')) else: default = True mySAMpy.model_num_inducing = 30 if parser.has_option(trainName, 'model_num_iterations'): mySAMpy.model_num_iterations = int( parser.get(trainName, 'model_num_iterations')) else: default = True mySAMpy.model_num_iterations = 700 if parser.has_option(trainName, 'model_init_iterations'): mySAMpy.model_init_iterations = int( parser.get(trainName, 'model_init_iterations')) else: default = True mySAMpy.model_init_iterations = 2000 if parser.has_option(trainName, 'verbose'): mySAMpy.verbose = parser.get(trainName, 'verbose') == 'True' else: default = True mySAMpy.verbose = False if parser.has_option(trainName, 'model_mode'): mySAMpy.model_mode = parser.get(trainName, 'model_mode') if mySAMpy.model_mode == 'temporal' and parser.has_option( trainName, 'windowSize'): mySAMpy.windowSize = int( parser.get(trainName, 'windowSize')) else: temporalFlag = True else: default = True mySAMpy.model_mode = 'single' if parser.has_option(trainName, 'Quser'): mySAMpy.Quser = int(parser.get(trainName, 'Quser')) else: default = True mySAMpy.Quser = 2 if parser.has_option(trainName, 'kernelString'): mySAMpy.kernelString = parser.get(trainName, 'kernelString') else: default = True mySAMpy.kernelString = "GPy.kern.RBF(Q, ARD=False) + GPy.kern.Bias(Q) + GPy.kern.White(Q)" if parser.has_option(trainName, 'ratioData'): mySAMpy.ratioData = int(parser.get(trainName, 'ratioData')) else: default = True mySAMpy.ratioData = 50 if default: print 'Default settings applied' mySAMpy.paramsDict = dict() mySAMpy.loadParameters(parser, trainName) except IOError: print 'IO Exception reading ', found pass else: print 'Loading parameters from: \n ', '\t' + modelPath try: parser = SafeConfigParser() parser.optionxform = str found = parser.read(dataPath + "/config.ini") # load parameters from config file if parser.has_option(trainName, 'experiment_number'): mySAMpy.experiment_number = int( parser.get(trainName, 'experiment_number')) else: mySAMpy.experiment_number = int( modelPath.split('__')[-2].replace('exp', '')) modelPickle = pickle.load(open(modelPath + '.pickle', 'rb')) mySAMpy.paramsDict = dict() for j in parser.options(trainName): if j not in defaultParamsList: print j mySAMpy.paramsDict[j] = modelPickle[j] mySAMpy.ratioData = modelPickle['ratioData'] mySAMpy.model_type = modelPickle['model_type'] mySAMpy.model_mode = modelPickle['model_mode'] if mySAMpy.model_mode == 'temporal': mySAMpy.windowSize = modelPickle['windowSize'] mySAMpy.model_type = 'mrd' mySAMpy.model_num_inducing = modelPickle['model_num_inducing'] mySAMpy.model_num_iterations = modelPickle['model_num_iterations'] mySAMpy.model_init_iterations = modelPickle[ 'model_init_iterations'] mySAMpy.verbose = modelPickle['verbose'] mySAMpy.Quser = modelPickle['Quser'] mySAMpy.kernelString = modelPickle['kernelString'] try: mySAMpy.listOfModels = modelPickle['listOfModels'] mySAMpy.classifiers = modelPickle['classifiers'] mySAMpy.classif_thresh = modelPickle['classif_thresh'] except: pass except IOError: print 'IO Exception reading ', found pass if 'exp' in modelPath: fnameProto = '/'.join(modelPath.split('/')[:-1]) + '/' + dataPath.split('/')[-1] + '__' + driverName + \ '__' + mySAMpy.model_type + '__exp' + str(mySAMpy.experiment_number) else: fnameProto = modelPath + dataPath.split('/')[-1] + '__' + driverName + '__' + mySAMpy.model_type + \ '__exp' + str(mySAMpy.experiment_number) print 'Full model name: \n', '\t' + fnameProto print '-------------------' print mySAMpy.save_model = False mySAMpy.economy_save = True mySAMpy.visualise_output = False # test_mode = True mySAMpy.readData(dataPath, participantList) # at this point, all the data that will be eventually used for training is contained in mySAMpy.Y # and mySAMpy.L contains all labels if any (depending on mrd model or bgplvm model) # mySAMpy.L is a list of labels while mySAMpy.Y is a numpy array of data # mySAMpy.Y should have 2 dimensions, length of dimension 0 = number of instances # length of dimension 1 = length of feature vector if mySAMpy.model_mode != 'temporal': # get list of labels mySAMpy.textLabels = list(set(mySAMpy.L)) # convert L from list of strings to array of indices mySAMpy.L = np.asarray( [mySAMpy.textLabels.index(i) for i in mySAMpy.L])[:, None] mySAMpy.textLabels = mySAMpy.textLabels else: mySAMpy.X, mySAMpy.Y = transformTimeSeriesToSeq( mySAMpy.Y1, mySAMpy.windowSize) mySAMpy.L, mySAMpy.tmp = transformTimeSeriesToSeq( mySAMpy.U1, mySAMpy.windowSize) mm = [mySAMpy] # mm.append(mySAMpy) # mm[0] contains root model # this is the only model in the case of a single model # or contains all info for the rest of the models in case of multiple models # if mySAMpy.model_mode == 'single' or mySAMpy.model_mode == 'temporal': mm[0].participantList = ['all'] else: mm[0].participantList = ['root'] + mySAMpy.textLabels for k in range(len(mm[0].participantList)): if mm[0].participantList[k] == 'all': minData = len(mm[k].L) mm[0].fname = fnameProto mm[0].model_type = mySAMpy.model_type Ntr = int(mySAMpy.ratioData * minData / 100) else: if k > 0: mm.append(Driver()) # extract subset of data corresponding to this model inds = [ i for i in range(len(mm[0].Y['L'])) if mm[0].Y['L'][i] == k - 1 ] mm[k].Y = mm[0].Y['Y'][inds] mm[k].L = mm[0].Y['L'][inds] mm[k].Quser = mm[0].Quser mm[k].verbose = mm[0].verbose print 'Object class: ', mm[0].participantList[k] minData = len(inds) mm[k].fname = fnameProto + '__L' + str(k - 1) mm[0].listOfModels.append(mm[k].fname) mm[k].model_type = 'bgplvm' Ntr = int(mySAMpy.ratioData * minData / 100) else: mm[0].listOfModels = [] mm[0].fname = fnameProto mm[0].SAMObject.kernelString = '' minData = len(mm[0].L) Ntr = int(mySAMpy.ratioData * minData / 100) mm[k].modelLabel = mm[0].participantList[k] if mm[0].model_mode != 'temporal': [Yall, Lall, YtestAll, LtestAll] = mm[k].prepareData(mm[k].model_type, Ntr, randSeed=mm[0].experiment_number) mm[k].Yall = Yall mm[k].Lall = Lall mm[k].YtestAll = YtestAll mm[k].LtestAll = LtestAll elif mm[0].model_mode == 'temporal': [Xall, Yall, Lall, XtestAll, YtestAll, LtestAll] = mm[k].prepareData(mm[k].model_type, Ntr, randSeed=mm[0].experiment_number) mm[k].Xall = Xall mm[k].Yall = Yall mm[k].Lall = Lall mm[k].XtestAll = XtestAll mm[k].YtestAll = YtestAll mm[k].LtestAll = LtestAll print 'minData = ' + str(minData) print 'ratioData = ' + str(mySAMpy.ratioData) if initMode == 'training': for k in range(len(mm[0].participantList)): # for k = 0 check if multiple model or not if mm[0].participantList[k] != 'root': print "Training with ", mm[0].model_num_inducing, 'inducing points for ', \ mm[0].model_init_iterations, '|', mm[0].model_num_iterations mm[k].training(mm[0].model_num_inducing, mm[0].model_num_iterations, mm[0].model_init_iterations, mm[k].fname, mm[0].save_model, mm[0].economy_save, keepIfPresent=False, kernelStr=mm[0].kernelString) if mm[0].visualise_output: ax = mm[k].SAMObject.visualise() visualiseInfo = dict() visualiseInfo['ax'] = ax else: visualiseInfo = None else: for k in range(len(mm[0].participantList)): # for k = 0 check if multiple model or not if mm[0].participantList[k] != 'root': print "Training with ", mm[0].model_num_inducing, 'inducing points for ', \ mm[0].model_init_iterations, '|', mm[0].model_num_iterations mm[k].training(mm[0].model_num_inducing, mm[0].model_num_iterations, mm[0].model_init_iterations, mm[k].fname, mm[0].save_model, mm[0].economy_save, keepIfPresent=True, kernelStr=mm[0].kernelString) return mm
def __init__(self): SAMDriver.__init__(self) self.additionalParametersList = [ 'imgH', 'imgW', 'imgHNew', 'imgWNew', 'image_suffix', 'pose_index', 'pose_selection' ]
def saveParameters(self): SAMDriver.saveParameters(self)
def __init__(self): SAMDriver.__init__(self) self.additionalParametersList = []
def __init__(self, isYarpRunning = False): SAMDriver.__init__(self, isYarpRunning)
def initialiseModels(argv, update, initMode='training'): """Initialise SAM Model data structure, training parameters and user parameters. This method starts by initialising the required Driver from the driver name in argv[3] if it exists in SAM_Drivers folder. The standard model parameters and the specific user parameters are then initialised and the data is read in by the SAMDriver.readData method to complete the model data structure. This method then replicates the model data structure for training with multiple models if it is required in the config file loaded by the Driver. Args: argv_0: dataPath containing the data that is to be trained on. argv_1: modelPath containing the path of where the model is to be stored. argv_2: driverName containing the name of the driver class that is to be loaded from SAM_Drivers folder. update: String having either a value of 'update' or 'new'. 'new' will load the parameters as set in the config file of the driver being loaded present in the dataPath directory. This is used to train a new model from scratch. 'update' will check for an existing model in the modelPath directory and load the parameters from this model if it exists. This is used for retraining a model when new data becomes available. initMode: String having either a value of 'training' or 'interaction'. 'training' takes into consideration the value of update in loading the parameters. (Used by trainSAMModel.py) 'interaction' loads the parameters directly from the model if the model exists. (Used by interactionSAMModel.py) Returns: The output is a list of SAMDriver models. The list is of length 1 when the config file requests a single model or a list of length n+1 for a config file requesting multiple models where n is the number of requested models. The number of models either depends on the number of directories present in the dataPath or from the length of textLabels returned from the SAMDriver.readData method. """ from SAM.SAM_Core import SAMDriver as Driver dataPath = argv[0] modelPath = argv[1] driverName = argv[2] logging.info(argv) stringCommand = 'from SAM.SAM_Drivers import ' + driverName + ' as Driver' logging.info(stringCommand) exec stringCommand mySAMpy = Driver() mode = update trainName = dataPath.split('/')[-1] # participantList is extracted from number of subdirectories of dataPath participantList = [ f for f in listdir(dataPath) if isdir(join(dataPath, f)) ] off = 17 logging.info('-------------------') logging.info('Training Settings:') logging.info('') logging.info('Init mode: '.ljust(off) + str(initMode)) logging.info('Data Path: '.ljust(off) + str(dataPath)) logging.info('Model Path: '.ljust(off) + str(modelPath)) logging.info('Participants: '.ljust(off) + str(participantList)) logging.info('Model Root Name: '.ljust(off) + str(trainName)) logging.info('Training Mode:'.ljust(off) + str(mode)) logging.info('Driver:'.ljust(off) + str(driverName)) logging.info('-------------------') logging.info('Loading Parameters...') logging.info('') modeConfig = '' found = '' try: parser = SafeConfigParser() found = parser.read(dataPath + "/config.ini") if parser.has_option(trainName, 'update_mode'): modeConfig = parser.get(trainName, 'update_mode') else: modeConfig = 'update' logging.info(modeConfig) except IOError: pass defaultParamsList = [ 'experiment_number', 'model_type', 'model_num_inducing', 'model_num_iterations', 'model_init_iterations', 'verbose', 'Quser', 'kernelString', 'ratioData', 'update_mode', 'model_mode', 'temporalModelWindowSize', 'optimiseRecall', 'classificationDict', 'useMaxDistance', 'calibrateUnknown' ] mySAMpy.experiment_number = None mySAMpy.model_type = None mySAMpy.kernelString = None mySAMpy.fname = None mySAMpy.ratioData = None if initMode == 'training' and (mode == 'new' or modeConfig == 'new' or 'exp' not in modelPath): logging.info('Loading training parameters from:' + str(dataPath) + "/config.ini") try: default = False parser = SafeConfigParser() parser.optionxform = str found = parser.read(dataPath + "/config.ini") mySAMpy.experiment_number = 'exp' if parser.has_option(trainName, 'model_type'): mySAMpy.model_type = parser.get(trainName, 'model_type') else: default = True mySAMpy.model_type = 'mrd' if parser.has_option(trainName, 'model_num_inducing'): mySAMpy.model_num_inducing = int( parser.get(trainName, 'model_num_inducing')) else: default = True mySAMpy.model_num_inducing = 30 if parser.has_option(trainName, 'model_num_iterations'): mySAMpy.model_num_iterations = int( parser.get(trainName, 'model_num_iterations')) else: default = True mySAMpy.model_num_iterations = 700 if parser.has_option(trainName, 'model_init_iterations'): mySAMpy.model_init_iterations = int( parser.get(trainName, 'model_init_iterations')) else: default = True mySAMpy.model_init_iterations = 2000 if parser.has_option(trainName, 'verbose'): mySAMpy.verbose = parser.get(trainName, 'verbose') == 'True' else: default = True mySAMpy.verbose = False if parser.has_option(trainName, 'optimiseRecall'): mySAMpy.optimiseRecall = int( parser.get(trainName, 'optimiseRecall')) else: default = True mySAMpy.optimiseRecall = 200 if parser.has_option(trainName, 'useMaxDistance'): mySAMpy.useMaxDistance = parser.get(trainName, 'useMaxDistance') == 'True' else: mySAMpy.useMaxDistance = False if parser.has_option(trainName, 'calibrateUnknown'): mySAMpy.calibrateUnknown = parser.get( trainName, 'calibrateUnknown') == 'True' else: mySAMpy.calibrateUnknown = False if parser.has_option(trainName, 'model_mode'): mySAMpy.model_mode = parser.get(trainName, 'model_mode') if mySAMpy.model_mode == 'temporal' and parser.has_option( trainName, 'temporalModelWindowSize'): mySAMpy.temporalWindowSize = int( parser.get(trainName, 'temporalModelWindowSize')) else: temporalFlag = True else: default = True mySAMpy.model_mode = 'single' if parser.has_option(trainName, 'Quser'): mySAMpy.Quser = int(parser.get(trainName, 'Quser')) else: default = True mySAMpy.Quser = 2 if parser.has_option(trainName, 'kernelString'): mySAMpy.kernelString = parser.get(trainName, 'kernelString') else: default = True mySAMpy.kernelString = "GPy.kern.RBF(Q, ARD=False) + GPy.kern.Bias(Q) + GPy.kern.White(Q)" if parser.has_option(trainName, 'ratioData'): mySAMpy.ratioData = int(parser.get(trainName, 'ratioData')) else: default = True mySAMpy.ratioData = 50 if default: logging.info('Default settings applied') mySAMpy.paramsDict = dict() mySAMpy.loadParameters(parser, trainName) except IOError: logging.warning('IO Exception reading ', found) pass else: logging.info('Loading parameters from: \n \t' + str(modelPath)) try: parser = SafeConfigParser() parser.optionxform = str found = parser.read(dataPath + "/config.ini") # load parameters from config file mySAMpy.experiment_number = modelPath.split('__')[-1] modelPickle = pickle.load(open(modelPath + '.pickle', 'rb')) mySAMpy.paramsDict = dict() for j in parser.options(trainName): if j not in defaultParamsList: logging.info(str(j)) mySAMpy.paramsDict[j] = modelPickle[j] mySAMpy.ratioData = modelPickle['ratioData'] mySAMpy.model_type = modelPickle['model_type'] mySAMpy.model_mode = modelPickle['model_mode'] if mySAMpy.model_mode == 'temporal': mySAMpy.temporalModelWindowSize = modelPickle[ 'temporalModelWindowSize'] mySAMpy.model_type = 'mrd' mySAMpy.model_num_inducing = modelPickle['model_num_inducing'] mySAMpy.model_num_iterations = modelPickle['model_num_iterations'] mySAMpy.model_init_iterations = modelPickle[ 'model_init_iterations'] mySAMpy.verbose = modelPickle['verbose'] mySAMpy.Quser = modelPickle['Quser'] mySAMpy.optimiseRecall = modelPickle['optimiseRecall'] mySAMpy.kernelString = modelPickle['kernelString'] mySAMpy.calibrated = modelPickle['calibrated'] # try loading classification parameters for multiple model implementation try: mySAMpy.useMaxDistance = modelPickle['useMaxDistance'] except: logging.warning( 'Failed to load useMaxDistace. Possible reasons: ' 'Not saved or multiple model implementation') mySAMpy.calibrateUnknown = modelPickle['calibrateUnknown'] if mySAMpy.calibrateUnknown: mySAMpy.classificationDict = modelPickle['classificationDict'] except IOError: logging.warning('IO Exception reading ', found) pass if 'exp' in modelPath or 'best' in modelPath or 'backup' in modelPath: fnameProto = '/'.join(modelPath.split('/')[:-1]) + '/' + dataPath.split('/')[-1] + '__' + driverName + \ '__' + mySAMpy.model_type + '__' + str(mySAMpy.experiment_number) else: fnameProto = modelPath + dataPath.split('/')[-1] + '__' + driverName + '__' + mySAMpy.model_type + \ '__' + str(mySAMpy.experiment_number) logging.info('Full model name: ' + str(fnameProto)) logging.info('-------------------') logging.info('') mySAMpy.save_model = False mySAMpy.economy_save = True mySAMpy.visualise_output = False # test_mode = True mySAMpy.readData(dataPath, participantList) if mySAMpy.model_mode != 'temporal': # get list of labels mySAMpy.textLabels = list(set(mySAMpy.L)) # convert L from list of strings to array of indices mySAMpy.L = np.asarray( [mySAMpy.textLabels.index(i) for i in mySAMpy.L])[:, None] mySAMpy.textLabels = mySAMpy.textLabels else: mySAMpy.X, mySAMpy.Y = transformTimeSeriesToSeq( mySAMpy.Y1, mySAMpy.temporalModelWindowSize) mySAMpy.L, mySAMpy.tmp = transformTimeSeriesToSeq( mySAMpy.U1, mySAMpy.temporalModelWindowSize) mm = [mySAMpy] # mm.append(mySAMpy) # mm[0] contains root model # this is the only model in the case of a single model # or contains all info for the rest of the models in case of multiple models # if mySAMpy.model_mode == 'single' or mySAMpy.model_mode == 'temporal': mm[0].participantList = ['all'] else: mm[0].participantList = ['root'] + mySAMpy.textLabels for k in range(len(mm[0].participantList)): if mm[0].participantList[k] == 'all': normaliseData = True minData = len(mm[k].L) mm[0].fname = fnameProto mm[0].model_type = mySAMpy.model_type Ntr = int(mySAMpy.ratioData * minData / 100) else: if k > 0: mm.append(Driver()) # extract subset of data corresponding to this model inds = [ i for i in range(len(mm[0].Y['L'])) if mm[0].Y['L'][i] == k - 1 ] mm[k].Y = mm[0].Y['Y'][inds] mm[k].L = mm[0].Y['L'][inds] mm[k].Quser = mm[0].Quser mm[k].verbose = mm[0].verbose logging.info('Object class: ' + str(mm[0].participantList[k])) minData = len(inds) mm[k].fname = fnameProto + '__L' + str(k - 1) mm[0].listOfModels.append(mm[k].fname) mm[k].model_type = 'bgplvm' Ntr = int(mySAMpy.ratioData * minData / 100) normaliseData = True else: normaliseData = False mm[0].listOfModels = [] mm[0].fname = fnameProto mm[0].SAMObject.kernelString = '' minData = len(mm[0].L) Ntr = int(mySAMpy.ratioData * minData / 100) mm[k].modelLabel = mm[0].participantList[k] if mm[0].model_mode != 'temporal': [Yall, Lall, YtestAll, LtestAll] = mm[k].prepareData(mm[k].model_type, Ntr, randSeed=0, normalise=normaliseData) mm[k].Yall = Yall mm[k].Lall = Lall mm[k].YtestAll = YtestAll mm[k].LtestAll = LtestAll elif mm[0].model_mode == 'temporal': [Xall, Yall, Lall, XtestAll, YtestAll, LtestAll] = mm[k].prepareData(mm[k].model_type, Ntr, randSeed=0, normalise=normaliseData) mm[k].Xall = Xall mm[k].Yall = Yall mm[k].Lall = Lall mm[k].XtestAll = XtestAll mm[k].YtestAll = YtestAll mm[k].LtestAll = LtestAll logging.info('minData = ' + str(minData)) logging.info('ratioData = ' + str(mySAMpy.ratioData)) logging.info( '-------------------------------------------------------------------------------------------------' ) if initMode == 'training': samOptimiser.deleteModel(modelPath, 'exp') for k in range(len(mm[0].participantList)): # for k = 0 check if multiple model or not if mm[0].participantList[k] != 'root': logging.info("Training with " + str(mm[0].model_num_inducing) + ' inducing points for ' + str(mm[0].model_init_iterations) + '|' + str(mm[0].model_num_iterations)) logging.info("Fname:" + str(mm[k].fname)) mm[k].training(mm[0].model_num_inducing, mm[0].model_num_iterations, mm[0].model_init_iterations, mm[k].fname, mm[0].save_model, mm[0].economy_save, keepIfPresent=False, kernelStr=mm[0].kernelString) if mm[0].visualise_output: ax = mm[k].SAMObject.visualise() visualiseInfo = dict() visualiseInfo['ax'] = ax else: visualiseInfo = None else: for k in range(len(mm[0].participantList)): # for k = 0 check if multiple model or not if mm[0].participantList[k] != 'root': logging.info("Training with " + str(mm[0].model_num_inducing) + ' inducing points for ' + str(mm[0].model_init_iterations) + '|' + str(mm[0].model_num_iterations)) mm[k].training(mm[0].model_num_inducing, mm[0].model_num_iterations, mm[0].model_init_iterations, mm[k].fname, mm[0].save_model, mm[0].economy_save, keepIfPresent=True, kernelStr=mm[0].kernelString) return mm