print '-------------------'


# Creates and opens ports for interaction with speech module
if yarpRunning:
    yarp.Network.init()
    inputInteractionPort = yarp.Port()
    inputInteractionPort.open("/sam/face/rpc:i");

    inputBottle = yarp.Bottle();
    outputBottle = yarp.Bottle();

imgHNew = 200
imgWNew = 200
# Creates a SAMpy object
mySAMpy = SAMDriver_interaction(True, imgH = 400, imgW = 400, imgHNew = imgHNew, imgWNew = imgWNew,inputImagePort="/CLM/imageSeg/out")

# Specification of the experiment number
experiment_number = 1007#42

# Location of face data
root_data_dir=dataPath

# Image format
image_suffix=".ppm"
# Array of participants to be recognised
participant_index=participantList

# Poses used during the data collection
pose_index=['Seg']
# Use a subset of the data for training
Ejemplo n.º 2
0
ratioData = modelPickle['percentTestData']
image_suffix = modelPickle['image_suffix']
model_type = modelPickle['model_type']
model_num_inducing = modelPickle['num_inducing']
model_init_iterations = modelPickle['model_init_iterations']
model_num_iterations = modelPickle['model_num_iterations']
kernelString = modelPickle['kernelString']
Q = modelPickle['Q']
economy_save = True
pose_index = ['']
pose_selection = 0

# # Creates a SAMpy object
mySAMpy = SAMDriver_interaction(False,
                                imgH=imgH,
                                imgW=imgW,
                                imgHNew=imgHNew,
                                imgWNew=imgWNew)

# # Reading face data, preparation of data and training of the model
mySAMpy.readData(dataPath, participantList, pose_index)

minImages = mySAMpy.Y.shape[1]
Ntr = int(minImages * ratioData / 100)
Ntest = minImages - Ntr

allPersonsY = mySAMpy.Y
allPersonsL = mySAMpy.L

for i in range(len(participantList)):
    #print participantList[i]
Ejemplo n.º 3
0
print '-------------------'
print 'Config file found: ' + pathFound[0]
print dataPath
print modelPath
print participantList
print '-------------------'


# Creates and opens ports for interaction with speech module
yarp.Network.init()
inputInteractionPort = yarp.BufferedPortBottle()
inputInteractionPort.open("/sam/face/interaction:i");
choice = yarp.Bottle();

# Creates a SAMpy object
mySAMpy = SAMDriver_interaction(True, imgH = 400, imgW = 400, imgHNew = 200, imgWNew = 200,inputImagePort="/visionDriver/image:o")

# Specification of the experiment number
experiment_number = 1007#42

# Location of face data
root_data_dir=dataPath

# Image format
image_suffix=".ppm"
# Array of participants to be recognised
participant_index=participantList

# Poses used during the data collection
pose_index=['Seg']
# Use a subset of the data for training
Ejemplo n.º 4
0
imgHNew = modelPickle['imgHNew']
imgWNew = modelPickle['imgWNew']
ratioData = modelPickle['percentTestData']
image_suffix = modelPickle['image_suffix']
model_type = modelPickle['model_type']
model_num_inducing = modelPickle['num_inducing']
model_init_iterations = modelPickle['model_init_iterations']
model_num_iterations = modelPickle['model_num_iterations']
kernelString = modelPickle['kernelString']
Q = modelPickle['Q']
economy_save = True
pose_index=['']
pose_selection = 0

# # Creates a SAMpy object
mySAMpy = SAMDriver_interaction(False, imgH = imgH, imgW = imgW, imgHNew = imgHNew, imgWNew = imgWNew)

# # Reading face data, preparation of data and training of the model
mySAMpy.readData(dataPath, participantList, pose_index)

minImages = mySAMpy.Y.shape[1]
Ntr = int(minImages*ratioData/100)
Ntest = minImages - Ntr

allPersonsY = mySAMpy.Y;
allPersonsL = mySAMpy.L;

for i in range(len(participantList)):
	#print participantList[i]
	mySAMpy.Y = allPersonsY[:,:,i,None]
	mySAMpy.L = allPersonsL[:,:,i,None]
Ejemplo n.º 5
0
	modelPickle = pickle.load(open(modelPath ,'rb'))
	imgH = modelPickle['imgH']
	imgW = modelPickle['imgW']
	imgHNew = modelPickle['imgHNew']
	imgWNew = modelPickle['imgWNew']
	ratioData = modelPickle['percentTestData']
	image_suffix = modelPickle['image_suffix']
	model_type = modelPickle['model_type']
	model_num_inducing = modelPickle['num_inducing']
	model_init_iterations = modelPickle['model_init_iterations']
	model_num_iterations = modelPickle['model_num_iterations']
	kernelString = modelPickle['kernelString']

# Creates a SAMpy object
mySAMpy = SAMDriver_interaction(yarpRunning, imgH = imgH, imgW = imgW, imgHNew = imgHNew, imgWNew = imgWNew)

# Location of face data
root_data_dir=dataPath

# Array of participants to be recognised
participant_index=participantList

# Poses used during the data collection
pose_index=['']

# Pose selected for training
pose_selection = 0

print 'modelPath: ' + modelPath
Ejemplo n.º 6
0
print dataPath
print modelPath
print participantList
print '-------------------'

# Creates and opens ports for interaction with speech module
if yarpRunning:
    yarp.Network.init()
    inputInteractionPort = yarp.BufferedPortBottle()
    inputInteractionPort.open("/sam/face/interaction:i")
    choice = yarp.Bottle()

# Creates a SAMpy object
mySAMpy = SAMDriver_interaction(True,
                                imgH=400,
                                imgW=400,
                                imgHNew=200,
                                imgWNew=200,
                                inputImagePort="/visionDriver/image:o")

# Specification of the experiment number
experiment_number = 1007  #42

# Location of face data
root_data_dir = dataPath

# Image format
image_suffix = ".ppm"
# Array of participants to be recognised
participant_index = participantList

# Poses used during the data collection
Ejemplo n.º 7
0
# Creates and opens ports for interaction with speech module
if yarpRunning:
    yarp.Network.init()
    inputInteractionPort = yarp.Port()
    inputInteractionPort.open("/sam/face/rpc:i")

    inputBottle = yarp.Bottle()
    outputBottle = yarp.Bottle()

imgHNew = 200
imgWNew = 200
# Creates a SAMpy object
mySAMpy = SAMDriver_interaction(True,
                                imgH=400,
                                imgW=400,
                                imgHNew=imgHNew,
                                imgWNew=imgWNew,
                                inputImagePort="/CLM/imageSeg/out")

# Specification of the experiment number
experiment_number = 1007  #42

# Location of face data
root_data_dir = dataPath

# Image format
image_suffix = ".ppm"
# Array of participants to be recognised
participant_index = participantList

# Poses used during the data collection