예제 #1
0
    def respond(self, command, reply):
        # this method responds to samSupervisor commands
        b = yarp.Bottle()
        reply.clear()
        action = command.get(0).asString()
        print(action + ' received')
        print 'responding to ' + action + ' request'
        if action == "reload":
            # send a message to the interaction model to check version of currently loaded model
            # and compare it with that stored on disk. If model on disk is more recent reload model
            # interaction model to return "model reloaded correctly" or "loaded model already up to date"
            print "reloading model"
            try:
                self.mm = SAM_utils.initialiseModels(
                    [self.dataPath, self.modelPath, self.driverName], 'update',
                    'interaction')
                reply.addString('ack')
            except:
                reply.addString('nack')
        # -------------------------------------------------
        elif action == "EXIT":
            reply.addString('ack')
            self.close()
        # -------------------------------------------------
        elif action in self.callSignList:
            if 'label' in action and self.dataInConnected:
                self.classifyInstance(reply)
            elif 'instance' in action and self.dataOutConnected:
                self.generateInstance(reply)
        # -------------------------------------------------
        else:
            reply.addString("nack")

        return True
    def testTemporalPerformance(self, testModel, Xall, Yall, Lall, XtestAll, YtestAll, LtestAll, verbose):
        # Initial window to kick-off free simulation
        x_start = XtestAll[0, :][:, None].T

        # Free simulation
        ygp, varygp = utils.gp_narx(testModel[0].SAMObject.model, x_start, YtestAll.shape[0], LtestAll, self.windowSize)

        return 1000
예제 #3
0
    def testTemporalPerformance(self, testModel, Xall, Yall, Lall, XtestAll,
                                YtestAll, LtestAll, verbose):
        # Initial window to kick-off free simulation
        x_start = XtestAll[0, :][:, None].T

        # Free simulation
        ygp, varygp = utils.gp_narx(testModel[0].SAMObject.model, x_start,
                                    YtestAll.shape[0], LtestAll,
                                    self.windowSize)

        return 1000
예제 #4
0
    def testTemporalPerformance(self, testModel, Xall, Yall, Lall, XtestAll, YtestAll, LtestAll, verbose):
        # Initial window to kick-off free simulation
        x_start = XtestAll[0, :][:, None].T

        # Free simulation
        ygp, varygp = utils.gp_narx(testModel[0].SAMObject.model, x_start, YtestAll.shape[0], LtestAll, self.windowSize)
        pb.figure()
        pb.plot(YtestAll, 'x-')
        pb.plot(ygp, 'ro-')
        pb.legend(('True', 'Pred'))
        pb.title('NARX-with-exogenous')

        err = np.sum((YtestAll - ygp)**2)

        return err
예제 #5
0
    def testTemporalPerformance(self, testModel, Xall, Yall, Lall, XtestAll,
                                YtestAll, LtestAll, verbose):
        # Initial window to kick-off free simulation
        x_start = XtestAll[0, :][:, None].T

        # Free simulation
        ygp, varygp = utils.gp_narx(testModel[0].SAMObject.model, x_start,
                                    YtestAll.shape[0], LtestAll,
                                    self.windowSize)
        pb.figure()
        pb.plot(YtestAll, 'x-')
        pb.plot(ygp, 'ro-')
        pb.legend(('True', 'Pred'))
        pb.title('NARX-with-exogenous')

        err = np.sum((YtestAll - ygp)**2)

        return err
예제 #6
0
def singleRecall(thisModel,
                 testInstance,
                 verbose,
                 visualiseInfo=None,
                 optimise=100):
    """
        Method that performs classification for single model implementations.
    
        This method returns the classification label of a test instance by calculating the predictive mean and variance of the backwards mapping and subsequently decides whether the test instance is first known or unknown and if known its most probable classification label.
        
        Args:
            thisModel: SAMObject model to recall from.
            testInstance: Novel feature vector to test.
            verbose: Enable or disable logging to stdout.
            visualiseInfo: None to disable plotting and plotObject to display plot of recall.
            optimise: Number of optimisation iterations to perform during recall.

        Returns:
            Classification label and variance if __calibrateUnknown__ is set to `False` in the config file. Otherwise returns classification label and normalised classification probability.
    """
    #
    # mm,vv,pp=self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)
    # if verbose:
    # logging.info('single model recall'
    textStringOut = ''
    # normalize incoming data
    testValue = testInstance - thisModel.Ymean
    testValue /= thisModel.Ystd

    try:
        ret = thisModel.SAMObject.pattern_completion(
            testValue, visualiseInfo=visualiseInfo, optimise=optimise)
    except IndexError:
        return ['unknown', 0]
    mm = ret[0]
    vv = list(ret[1][0])
    svv = sum(vv)
    mvv = svv / len(vv)
    vv.append(svv)
    vv.append(mvv)

    # find nearest neighbour of mm and SAMObject.model.X

    k = np.matlib.repmat(mm[0].values,
                         thisModel.SAMObject.model.X.mean.shape[0], 1)
    pow2 = np.power(thisModel.SAMObject.model.X.mean - k, 2)
    s = np.power(np.sum(pow2, 1), 0.5)
    nn = np.argmin(s)
    min_value = s[nn]

    if thisModel.SAMObject.type == 'mrd':
        classLabel = thisModel.textLabels[int(
            thisModel.SAMObject.model.bgplvms[1].Y[nn, :])]
    elif thisModel.SAMObject.type == 'bgplvm':
        classLabel = thisModel.textLabels[int(thisModel.L[nn, :])]

    known = True
    if thisModel.calibrated:
        if thisModel.useMaxDistance:
            known = utils.varianceClass(
                thisModel.classificationDict['varianceDirection'],
                vv[thisModel.classificationDict['bestDistanceIDX']],
                thisModel.classificationDict['varianceThreshold'])

            details = str(thisModel.classificationDict['varianceThreshold']) + ' ' + \
                      str(thisModel.classificationDict['varianceDirection'])

            probClass = vv[thisModel.classificationDict['bestDistanceIDX']]
        else:
            P_Known_given_X = utils.PfromHist(
                vv[:-2], thisModel.classificationDict['histKnown'],
                thisModel.classificationDict['binWidth'])
            P_Unknown_given_X = utils.PfromHist(
                vv[:-2], thisModel.classificationDict['histUnknown'],
                thisModel.classificationDict['binWidth'])

            if thisModel.classificationDict['method'] == 'mulProb':
                s1 = reduce(lambda x, y: x * y, P_Known_given_X)
                s2 = reduce(lambda x, y: x * y, P_Unknown_given_X)
                known = s1 > s2
            else:
                s1 = np.sum(P_Known_given_X)
                s2 = np.sum(P_Unknown_given_X)
                known = s1 > s2

            if known:
                probClass = s1
                details = s1, ' > ', s2
            else:
                probClass = s2
                details = s2, ' > ', s1

    if thisModel.calibrated:
        if known:
            textStringOut = classLabel
        else:
            textStringOut = 'unknown'
            runnerUp = classLabel
    else:
        textStringOut = classLabel

    if verbose:
        if thisModel.calibrated:
            if textStringOut == 'unknown':
                logging.info("With " + str(probClass) +
                             " prob. error the new instance is " +
                             str(runnerUp))
                logging.info('But ' + str(details) + ' than ' +
                             str(probClass) + ' so class as ' +
                             str(textStringOut))
            else:
                logging.info("With " + str(probClass) +
                             " prob. error the new instance is " +
                             str(textStringOut))
        else:
            logging.info("With " + str(vv) +
                         " prob. error the new instance is " +
                         str(textStringOut))

    if thisModel.calibrated:
        return [textStringOut, probClass / len(vv)]
    else:
        return [textStringOut, vv]
예제 #7
0
def calibrateSingleModelRecall(thisModel):
    """
    Perform calibration for single model implementations.

    This method either uses the bhattacharyya distance to perform calibration of known and unknown or uses histograms to use the histogram distribution of known and unknown labels in the training data to carry out the classification. This method depends on the following parameters present in config.ini. \n

    1) __useMaxDistance__ : `False` or `True`. This enables the use of bhattacharyya distance method to recognise known and unknown. \n
    2) __calibrateUnknown__ : `True` or `False`. This turns on or off the calibration of the model for known and unknown inputs. \n
    3) __noBins__ : Integer number of bins to be used for the histogram method if __calibrateUnknown__ is `True` and __useMaxDistance__ is `False`. \n
    4) __method__ : String indicating the method used when histograms are used for calibration. When using histograms, the multi-dimensional probability of known and unknown are both calculated using the histogram. `sumProb` then performs a decision based on the largest sum after summing the probabilities of known and unknown independently. `mulProb` performs a decision based on the largest sum after multiplying the probabilities of known and unknown independently.\n

    Args:
        thisModel: SAMObject model to calibrate.

    Returns:
        None
    """
    yCalib = formatDataFunc(thisModel[0].allDataDict['Y'])
    logging.info('entering segment testing')
    labelList, confMatrix, ret, variancesKnown, variancesUnknown = segmentTesting(
        thisModel,
        yCalib,
        thisModel[0].allDataDict['L'],
        thisModel[0].verbose,
        'calib',
        serialMode=False,
        optimise=thisModel[0].optimiseRecall,
        calibrate=True)
    thisModel[0].classificationDict = dict()

    if thisModel[0].useMaxDistance:
        [mk, vk, rk] = utils.meanVar_varianceDistribution(variancesKnown)
        [muk, vuk, ruk] = utils.meanVar_varianceDistribution(variancesUnknown)

        distance = []
        for j in range(len(mk)):
            distance.append(
                utils.bhattacharyya_distance(mk[j], muk[j], vk[j], vuk[j]))

        if distance is not None:
            maxIdx = distance.index(max(distance))
        thisModel[0].classificationDict['bestDistanceIDX'] = maxIdx
        thisModel[0].classificationDict['bestDistance_props'] = {
            'KnownMean': mk[maxIdx],
            'UnknownMean': muk[maxIdx],
            'KnownVar': vk[maxIdx],
            'UnknownVar': vuk[maxIdx]
        }

        # if maxIdx < len(mk) - 2:
        #     thisModel[0].bestSegOperation = maxIdx
        # elif maxIdx == len(mk) - 2:
        #     thisModel[0].bestSegOperation = 'sum'
        # elif maxIdx == len(mk) - 1:
        #     thisModel[0].bestSegOperation = 'mean'

        intersection = utils.solve_intersections(mk[maxIdx], muk[maxIdx],
                                                 np.sqrt(vk[maxIdx]),
                                                 np.sqrt(vuk[maxIdx]))

        maxLim = max(rk[maxIdx][1], ruk[maxIdx][1])
        minLim = min(rk[maxIdx][0], ruk[maxIdx][0])

        delList = []
        for j in range(len(intersection)):
            if intersection[j] > maxLim or intersection[j] < minLim:
                delList.append(j)

        thisModel[0].classificationDict['segIntersections'] = np.delete(
            intersection, delList)
        thisModel[0].classificationDict['bhattaDistances'] = distance

        logging.info(
            'Num Intersections: ' +
            str(len(thisModel[0].classificationDict['segIntersections'])))

        [thisModel[0].classificationDict['varianceThreshold'],
         thisModel[0].classificationDict['varianceDirection']] = \
            calculateVarianceThreshold(thisModel[0].classificationDict['segIntersections'], mk[maxIdx], muk[maxIdx],
                                       vk[maxIdx], vuk[maxIdx])

        logging.info('varianceThreshold ' +
                     str(thisModel[0].classificationDict['varianceThreshold']))
        logging.info('varianceDirection ' +
                     str(thisModel[0].classificationDict['varianceDirection']))
    else:
        variancesKnownArray = np.asarray(variancesKnown)
        variancesUnknownArray = np.asarray(variancesUnknown)
        varianceAllArray = np.vstack(
            [variancesKnownArray, variancesUnknownArray])
        histKnown = [None] * (len(variancesKnownArray[0]) - 2)
        binEdges = [None] * (len(variancesKnownArray[0]) - 2)
        histUnknown = [None] * (len(variancesKnownArray[0]) - 2)

        thisModel[0].classificationDict['binWidth'] = thisModel[0].paramsDict[
            'binWidth']
        thisModel[0].classificationDict['method'] = thisModel[0].paramsDict[
            'method']

        numBins = np.ceil(
            np.max(varianceAllArray) /
            thisModel[0].classificationDict['binWidth'])

        bins = range(int(numBins))
        bins = np.multiply(bins, thisModel[0].classificationDict['binWidth'])

        for j in range(len(variancesKnown[0]) - 2):
            histKnown[j], binEdges[j] = np.histogram(variancesKnownArray[:, j],
                                                     bins=bins)
            histKnown[j] = 1.0 * histKnown[j] / np.sum(histKnown[j])

            histUnknown[j], _ = np.histogram(variancesUnknownArray[:, j],
                                             bins=bins)
            histUnknown[j] = 1.0 * histUnknown[j] / np.sum(histUnknown[j])

        thisModel[0].classificationDict['histKnown'] = histKnown
        thisModel[0].classificationDict['binEdgesKnown'] = binEdges
        thisModel[0].classificationDict['histUnknown'] = histUnknown

    thisModel[0].calibrated = True
예제 #8
0
    def configure(self, rf):

        print sys.argv
        stringCommand = 'from SAM.SAM_Drivers import ' + sys.argv[
            4] + ' as Driver'
        print stringCommand
        exec stringCommand

        self.mm = [Driver()]
        self.dataPath = sys.argv[1]
        self.modelPath = sys.argv[2]
        self.driverName = sys.argv[4]
        self.configPath = sys.argv[3]

        off = 17
        print '-------------------'
        print 'Interaction Settings:'
        print
        print 'Data Path: '.ljust(off), self.dataPath
        print 'Model Path: '.ljust(off), self.modelPath
        print 'Config Path: '.ljust(off), self.configPath
        print 'Driver:'.ljust(off), self.driverName
        print '-------------------'
        print 'Configuring Interaction...'
        print

        self.mm = SAM_utils.initialiseModels(
            [self.dataPath, self.modelPath, self.driverName], 'update',
            'interaction')

        # parse settings from config file
        parser2 = SafeConfigParser()
        parser2.read(self.configPath)
        self.portNameList = parser2.items(self.dataPath.split('/')[-1])
        print self.portNameList
        self.portsList = []
        for j in range(len(self.portNameList)):
            if self.portNameList[j][0] == 'rpcbase':
                self.portsList.append(yarp.Port())
                self.portsList[j].open(self.portNameList[j][1] + ":i")
                self.svPort = j
                self.attach(self.portsList[j])
            elif self.portNameList[j][0] == 'callsign':
                # should check for repeated call signs by getting list from samSupervisor
                self.callSignList = self.portNameList[j][1].split(',')
            elif self.portNameList[j][0] == 'collectionmethod':
                self.collectionMethod = self.portNameList[j][1].split(' ')[0]
                try:
                    if self.mm[0].model_mode != 'temporal':
                        self.bufferSize = int(
                            self.portNameList[j][1].split(' ')[1])
                    elif self.mm[0].model_mode == 'temporal':
                        self.bufferSize = self.mm[0].windowSize
                except ValueError:
                    print 'collectionMethod bufferSize is not an integer'
                    print 'Should be e.g: collectionMethod = buffered 3'
                    return False

                if self.collectionMethod not in [
                        'buffered', 'continuous', 'future_buffered'
                ]:
                    print 'collectionMethod should be set to buffered / continuous / future_buffered'
                    return False
            else:
                parts = self.portNameList[j][1].split(' ')
                print parts

                if parts[1].lower() == 'imagergb':
                    self.portsList.append(yarp.BufferedPortImageRgb())
                    self.portsList[j].open(parts[0])

                elif parts[1].lower() == 'imagemono':
                    self.portsList.append(yarp.BufferedPortImageMono())
                    self.portsList[j].open(parts[0])

                elif parts[1].lower() == 'bottle':
                    self.portsList.append(yarp.BufferedPortBottle())
                    self.portsList[j].open(parts[0])

                else:
                    print 'Data type ', parts[1], 'for ', self.portNameList[j][
                        0], ' unsupported'
                    return False
                # mrd models with label/instance training will always have:
                # 1 an input data line which is used when a label is requested
                # 2 an output data line which is used when a generated instance is required
                if parts[0][-1] == 'i':
                    self.labelPort = j
                elif parts[0][-1] == 'o':
                    self.instancePort = j

        if self.svPort is None or self.labelPort is None or self.instancePort is None:
            print 'Config file properties incorrect. Should look like this:'
            print '[Actions]'
            print 'dataIn = /sam/actions/actionData:i Bottle'
            print 'dataOut = /sam/actions/actionData:o Bottle'
            print 'rpcBase = /sam/actions/rpc'
            print 'callSign = ask_action_label, ask_action_instance'
            print 'collectionMethod = buffered 3'

        # self.mm[0].configInteraction(self)
        self.inputType = self.portNameList[self.labelPort][1].split(
            ' ')[1].lower()
        self.outputType = self.portNameList[self.labelPort][1].split(
            ' ')[1].lower()
        self.dataList = []
        self.classificationList = []
        yarp.Network.init()

        self.test()

        return True
예제 #9
0
def calibrateMultipleModelRecall(thisModel):
    """
        Perform calibration for multiple model implementations.

        In contrast with calibrateSingleModelRecall, in this method known and unknown are calibrated according to measures of familiarity between all model classes. The familiarity of each class with each other class and with itself are then used to perform a Bayesian decision depending on the resulting familiarity when testing a new instance.

        Args:
            SAMObject model to calibrate.

        Returns:
            None
    """
    cmSize = len(thisModel[0].textLabels)
    confMatrix = np.zeros((cmSize, cmSize))

    # Create Validation set
    Y_valid = []
    Y_testing = []
    for i in range(len(thisModel)):
        if thisModel[i].SAMObject.model:
            # De-normalize from the model which stored this test data
            yy_test = thisModel[i].Ytestn.copy()
            yy_test *= thisModel[i].Ystd
            yy_test += thisModel[i].Ymean
            y_valid_tmp, y_test_tmp, _, _ = utils.random_data_split(
                yy_test, [0.5, 0.5])
            Y_valid.append(y_valid_tmp.copy())
            Y_testing.append(y_test_tmp.copy())

    # Compute familiarities in VALIDATION SET
    familiarities = [None] * (len(thisModel) - 1)
    for i in range(len(thisModel)):
        if thisModel[i].SAMObject.model:
            # N_test x N_labels matrix.
            familiarities[i - 1] = np.zeros(
                (Y_valid[i - 1].shape[0], (len(thisModel) - 1)))
            logging.info("## True label is " + thisModel[i].modelLabel)
            for k in range(Y_valid[i - 1].shape[0]):
                sstest = []
                logging.info('# k= ' + str(k))
                for j in range(len(thisModel)):
                    if thisModel[j].SAMObject.model:
                        yy_test = Y_valid[i - 1][k, :][None, :].copy()
                        # Normalize according to the model to predict
                        yy_test -= thisModel[j].Ymean
                        yy_test /= thisModel[j].Ystd
                        sstest.append(thisModel[j].SAMObject.familiarity(
                            yy_test, optimise=thisModel[0].optimiseRecall))
                        familiarities[i - 1][k, j - 1] = sstest[-1]
                msg = ''
                for j in range(len(sstest)):
                    if j == np.argmax(sstest):
                        msg = '   *'
                    else:
                        msg = '    '
                    logging.info(msg + '      Familiarity of model ' +
                                 thisModel[j + 1].modelLabel +
                                 ' given label: ' + thisModel[i].modelLabel +
                                 ' in valid: ' + str(sstest[j]))

                confMatrix[i - 1, np.argmax(sstest)] += 1
    calculateData(thisModel[0].textLabels, confMatrix)

    # At this point we have:
    # familiarities[i][k,j] -> familiarity for true label i, instance k
    #                          predicted by model trained in label j
    # ############# Train Familiarity classifier in VALIDATION SET
    #
    classifiers = []
    classif_thresh = []
    familiarity_predictions = []
    tmp = []
    for i in range(len(thisModel[0].textLabels)):
        X_train = familiarities[0][:, i][:, None]
        y_train = np.zeros((familiarities[0][:, i][:, None].shape[0], 1))
        for j in range(1, len(thisModel[0].textLabels)):
            X_train = np.vstack((X_train, familiarities[j][:, i][:, None]))
            y_train = np.vstack((y_train, j + np.zeros(
                (familiarities[j][:, i][:, None].shape[0], 1))))
        tmp.append(X_train)
        n_classes = len(np.unique(y_train))

        # Try GMMs using different types of covariances.
        classifiers.append(
            GMM(n_components=n_classes,
                covariance_type='full',
                init_params='wc',
                n_iter=2000))

        # Since we have class labels for the training data, we can
        # initialize the GMM parameters in a supervised manner.
        classifiers[-1].means_ = np.array([
            X_train[y_train == kk].mean(axis=0) for kk in xrange(n_classes)
        ])[:, None]
        classifiers[-1].fit(X_train)
        familiarity_predictions.append(classifiers[-1].predict(X_train))

        # Find threshold of confident classification of model i predicting label i
        tmp_i = classifiers[i].predict_proba(X_train[y_train == i][:, None])[:,
                                                                             i]
        tmp_s = 0.8
        # If in the test phase we get a predict_proba which falls in the threshold i, then
        # model i is confident for this prediction.
        classif_thresh.append([
            tmp_i.mean() - tmp_s * tmp_i.std(),
            tmp_i.mean() + tmp_s * tmp_i.std()
        ])

    thisModel[0].classificationDict['classifiers'] = classifiers
    thisModel[0].classificationDict['classif_thresh'] = classif_thresh
    thisModel[0].calibrated = True
예제 #10
0
    def readData(self, root_data_dir, participant_index, *args, **kw):
        self.rawData, labelsList = self.diskDataToLiveData(root_data_dir)
        data2, jointsList, objectsList = self.convertToDict(
            self.rawData, 'testing', verbose=self.verbose)
        logging.info('unique labels' + str(set(labelsList)))
        # extract a set of labels
        labels = list(set(labelsList))
        labels.sort()

        logging.info('')
        # convert text labels into numbers
        labelNumsList = None
        for n, k in enumerate(labelsList):
            res = [m for m, l in enumerate(labels) if l == k]
            if n == 0:
                labelNumsList = np.array(res)
            else:
                labelNumsList = np.vstack([labelNumsList, res])
        logging.info('shape of number labels:' + str(labelNumsList.shape))

        uu, tmp = utils.transformTimeSeriesToSeq(
            labelNumsList, self.paramsDict['windowSize'],
            self.paramsDict['windowOffset'], False, False)
        data2NumLabels = uu
        logging.info('windowed number labels shape:' +
                     str(data2NumLabels.shape))

        # now that labels are in windowed form it is time to
        # assign them a text label again that describes them
        # the rule here is if the window appertains to the same label,
        # that label is assigned otherwise it is labelled as transition
        data2Labels = []
        for j in data2NumLabels:
            numItems = list(set(j))
            if len(numItems) == 1:
                l = labels[int(numItems[0])]
                data2Labels.append(l)
            else:
                # Another way to do this would be to label it according to 75% majority
                # This would decrease the region size of the transition blocks
                # which are currently dependant on windowSize
                data2Labels.append('transition')
        logging.info('after transition unique set ' + str(set(data2Labels)))
        logging.info('windowed data labels compressed: ' +
                     str(len(data2Labels)))

        logging.info('')
        # create list of specific joints to be used

        jointsToUse = []
        objectDict = dict()
        handDict = dict()
        for j in self.paramsDict['includeParts']:
            if j == 'object':
                for k in objectsList:
                    if k != 'partner':
                        objectDict[k] = (len(jointsToUse))
                        jointsToUse.append(k)
            elif 'hand' in j:
                handDict[j] = (len(jointsToUse))
                jointsToUse.append(j)
            else:
                jointsToUse.append(j)

        combineObjects = len(objectDict) > 1

        combineHands = len(handDict) > 1

        logging.info(jointsToUse)
        logging.info(objectDict)
        logging.info(handDict)

        # concatenate data for all joints in a single vector
        logging.info('')
        dataVecAll = None
        for j in jointsToUse:
            if dataVecAll is None:
                dataVecAll = data2[j]
            else:
                dataVecAll = np.hstack([dataVecAll, data2[j]])
        itemsPerJoint = dataVecAll.shape[1] / len(jointsToUse)
        logging.info(dataVecAll.shape)
        logging.info(itemsPerJoint)
        self.itemsPerJoint = itemsPerJoint
        logging.info('')
        # ------------------------------------------------------------------
        # it is now time to combine objects if multiple exist
        #

        logging.info('')
        self.featureSequence = []
        combinedObjs = dict()
        if combineObjects and 'object' in self.paramsDict['includeParts']:
            self.featureSequence.append('object')
            logging.info('Combining Objects')
            for n in objectDict:
                idxBase = objectDict[n] * itemsPerJoint
                combinedObjs[n] = dataVecAll[:,
                                             idxBase:idxBase + itemsPerJoint]

                logging.info(combinedObjs[n].shape)

        logging.info(dataVecAll.shape)

        logging.info('')
        # it is now time to combine hands if multiple exist
        combinedHands = dict()
        if combineHands and self.paramsDict['combineHands'] and \
           len([s for s in self.paramsDict['includeParts'] if 'hand' in s]) > 0:
            logging.info('Combining hands')
            self.handsCombined = True
            self.featureSequence.append('hand')
            for n in handDict:
                idxBase = handDict[n] * itemsPerJoint
                combinedHands[n] = dataVecAll[:,
                                              idxBase:idxBase + itemsPerJoint]

                logging.info(combinedHands[n].shape)
            logging.info(dataVecAll.shape)
        else:
            self.handsCombined = False

        logging.info(jointsToUse)
        otherJoints = None
        for j, item in enumerate(jointsToUse):
            if self.handsCombined:
                if item not in handDict and item not in objectDict:
                    self.featureSequence.append(item)
                    idxBase = j * itemsPerJoint

                    if otherJoints is None:
                        otherJoints = dataVecAll[:, idxBase:idxBase +
                                                 itemsPerJoint]
                    else:
                        otherJoints = np.hstack([
                            otherJoints,
                            dataVecAll[:, idxBase:idxBase + itemsPerJoint]
                        ])
            else:
                if item not in objectDict:
                    self.featureSequence.append(item)
                    idxBase = j * itemsPerJoint

                    if otherJoints is None:
                        otherJoints = dataVecAll[:, idxBase:idxBase +
                                                 itemsPerJoint]
                    else:
                        otherJoints = np.hstack([
                            otherJoints,
                            dataVecAll[:, idxBase:idxBase + itemsPerJoint]
                        ])
        if otherJoints is not None:
            logging.info(otherJoints.shape)

        self.listOfVectorsToClassify = []
        for j in self.featureSequence:
            if j == 'object':
                for k in objectsList:
                    if k != 'partner':
                        self.listOfVectorsToClassify.append([k])

            elif 'hand' in j:
                if self.handsCombined:
                    a = copy.deepcopy(self.listOfVectorsToClassify)
                    b = copy.deepcopy(self.listOfVectorsToClassify)
                    if len(self.listOfVectorsToClassify) > 0:
                        for l, m in enumerate(self.listOfVectorsToClassify):
                            a[l].append('handLeft')
                            b[l].append('handRight')
                            self.listOfVectorsToClassify = a + b
                    else:
                        self.listOfVectorsToClassify.append(['handLeft'])
                        self.listOfVectorsToClassify.append(['handRight'])

                else:
                    for l, m in enumerate(self.listOfVectorsToClassify):
                        self.listOfVectorsToClassify[l].append(j)

            else:
                for l, m in enumerate(self.listOfVectorsToClassify):
                    self.listOfVectorsToClassify[l].append(j)
        logging.info('Vectors to Classify:')
        for j in self.listOfVectorsToClassify:
            logging.info("\t" + str(j))

        dataVecReq = None
        objSection = None
        if combinedObjs:
            objSection = None
            for j in self.listOfVectorsToClassify:
                logging.info(str(j[0]))
                if objSection is None:
                    objSection = combinedObjs[j[0]]
                else:
                    objSection = np.vstack([objSection, combinedObjs[j[0]]])
            dataVecReq = objSection
            logging.info(str(objSection.shape))

        handsSection = None
        if combinedHands:
            for j in self.listOfVectorsToClassify:
                for l in j:
                    if 'hand' in l:
                        if handsSection is None:
                            handsSection = combinedHands[l]
                        else:
                            handsSection = np.vstack(
                                [handsSection, combinedHands[l]])
            if dataVecReq is None:
                dataVecReq = handsSection
            else:
                dataVecReq = np.hstack([dataVecReq, handsSection])
            logging.info(str(handsSection.shape))

        othersSection = None
        if otherJoints is not None:
            for j in self.listOfVectorsToClassify:
                logging.info(str(j[:]))
                if othersSection is None:
                    othersSection = otherJoints
                else:
                    othersSection = np.vstack([othersSection, otherJoints])

            if dataVecReq is None:
                dataVecReq = othersSection
            else:
                dataVecReq = np.hstack([dataVecReq, othersSection])

        logging.info(str(dataVecReq.shape))
        del handsSection, othersSection, objSection, combinedHands, combinedObjs, otherJoints

        # Also augment the labels list
        data2LabelsAugment = []
        for j in self.listOfVectorsToClassify:
            data2LabelsAugment.append([])

        for j in data2Labels:
            splitLabel = j.split('_')
            action = '_'.join(splitLabel[:2])

            if len(splitLabel) > 2:
                obj = splitLabel[2]
                hand = splitLabel[4]

                if combineHands:
                    handSubList = [
                        k for k in self.listOfVectorsToClassify
                        if 'hand' + hand.capitalize() in k
                    ]
                    if combineObjects:
                        vec = [f for f in handSubList if obj in f][0]
                    else:
                        vec = handSubList[0]
                else:
                    vec = [
                        f for f in self.listOfVectorsToClassify if obj in f
                    ][0]
                # logging.info(data2Labels.index(j), vec)

                # printStr = ''
                for n, k in enumerate(self.listOfVectorsToClassify):
                    if vec == k:
                        data2LabelsAugment[n].append(action)
                        # printStr += action + '\t'
                        # else:
                        data2LabelsAugment[n].append('idle')
                #         printStr += '\tidle'
                #     logging.info(data2LabelsAugment[n][-1],)
                # print
            else:
                obj = ''
                hand = ''
                printStr = ''
                for n, k in enumerate(self.listOfVectorsToClassify):
                    # printStr += action + '\t'
                    data2LabelsAugment[n].append(action)
        #             logging.info(data2LabelsAugment[n][-1],)
        #         print
        #     logging.info(action, obj, hand)
        #     logging.info('---------------------')
        # logging.info('before augment', set(data2Labels))
        data2Labels = []
        for j in data2LabelsAugment:
            data2Labels += j
        # logging.info('after augment', set(data2Labels)
        logging.info('labels ' + str(len(data2Labels)))
        logging.info('data ' + str(dataVecReq.shape))
        self.allDataDict = dict()
        self.allDataDict['Y'] = copy.deepcopy(dataVecReq)
        self.allDataDict['L'] = copy.deepcopy(data2Labels)

        # ---------------------------------------------------------------------------------

        data2ShortLabels = []
        for j in data2Labels:
            splitLabel = j.split('_')
            slabel = ('_'.join(splitLabel[:2]))

            if splitLabel[0] == 'push' or splitLabel[0] == 'pull':
                if splitLabel[-1] == 'no':
                    add = splitLabel[-2]
                else:
                    add = splitLabel[-1]

                if add == 'left' and self.paramsDict['flip']:
                    if splitLabel[0] == 'push':
                        splitLabel[0] = 'pull'
                    else:
                        splitLabel[0] = 'push'
                    slabel = ('_'.join(splitLabel[:2]))

                if self.paramsDict['sepRL']:
                    slabel += '_' + add

            data2ShortLabels.append(slabel)

        self.data2Labels = copy.deepcopy(data2ShortLabels)
        logging.info('shortLabels len ' + str(set(self.data2Labels)))

        if self.paramsDict['sepRL']:
            if 'pull_object' in self.paramsDict['actionsAllowedList']:
                self.paramsDict['actionsAllowedList'].index(
                    'pull_object') == 'pull_object_right'
                self.paramsDict['actionsAllowedList'].append(
                    'pull_object_left')

            if 'push_object' in self.paramsDict['actionsAllowedList']:
                self.paramsDict['actionsAllowedList'].index(
                    'push_object') == 'push_object_right'
                self.paramsDict['actionsAllowedList'].append(
                    'push_object_left')

        # remove labels which will not be trained
        logging.info('actions allowed: ' +
                     str(self.paramsDict['actionsAllowedList']))
        listToDelete = []
        for n in reversed(range(len(data2Labels))):
            if len([j for j in self.paramsDict['actionsAllowedList'] if j in data2Labels[n]]) == 0 or \
                            'no' in data2Labels[n]:
                listToDelete.append(n)

        dataVecReq = np.delete(dataVecReq, listToDelete, axis=0)
        npdata2ShortLabels = np.asarray(data2ShortLabels)
        npdata2ShortLabels = np.delete(npdata2ShortLabels,
                                       listToDelete,
                                       axis=0)
        # find left hand push and pull and label as pull and push respectively
        data2ShortLabels = np.ndarray.tolist(npdata2ShortLabels)

        self.Y = dataVecReq
        self.L = data2ShortLabels
        # logging.info('\n'.join(data2Labels))
        logging.info(self.Y.shape)
        logging.info(len(self.L))
예제 #11
0
    def convertToDict(self, rawData, mode, verbose):
        data = dict()
        firstPass = True
        jointsList = []
        objectsList = []

        # logging.info('*******************')
        # for j in self.paramsDict:
        #     logging.info(j, self.paramsDict[j]
        # logging.info('*******************')

        for t in rawData:
            # parse skeleton data which has 9 sections by (x,y,z)
            for i in range(self.numJoints):
                a = i * 4
                # if t[a] == 'shoulderCenter':
                #     t[a] = 'chest'

                if firstPass:
                    data[t[a]] = [None]
                    data[t[a]] = (np.array([float(t[a + 1]), float(t[a + 2]), float(t[a + 3])]))
                    jointsList.append(t[a])
                else:
                    arr = np.array([float(t[a + 1]), float(t[a + 2]), float(t[a + 3])])
                    if data[t[a]] is not None:
                        data[t[a]] = np.vstack((data[t[a]], arr))
                    else:
                        data[t[a]] = arr

            currIdx = (self.numJoints * 4 - 1)
            numObjs = (len(t) - currIdx) / 5

            for i in range(numObjs):
                a = currIdx + 1 + (i * 5)
                if t[a] in data:
                    arr = np.array([float(t[a + 1]), float(t[a + 2]), float(t[a + 3])])
                    if data[t[a]] is not None:
                        data[t[a]] = np.vstack((data[t[a]], arr))
                    else:
                        data[t[a]] = arr
                else:
                    data[t[a]] = [None]
                    data[t[a]] = np.array([float(t[a + 1]), float(t[a + 2]), float(t[a + 3])])
                    if mode == 'testing' or (mode != 'testing' and t[a+4] == '1'):
                        objectsList.append(t[a])

            firstPass = False
        if verbose:
            logging.info('data has length = ' + str(len(data)) + ' joints')
            logging.info('each joint has an array of shape ' + str(data['head'].shape))

        if self.paramsDict['filterData'] or 'vel' in self.paramsDict['components'] or \
                                            'acc' in self.paramsDict['components']:
            if verbose:
                logging.info('Filtering data with hamming window of size ' + str(self.paramsDict['filterWindow']))
            for j in data.keys():
                t1 = utils.smooth1D(data[j][:, 0], self.paramsDict['filterWindow'])
                t2 = utils.smooth1D(data[j][:, 1], self.paramsDict['filterWindow'])
                t3 = utils.smooth1D(data[j][:, 2], self.paramsDict['filterWindow'])
                data[j] = np.hstack([t1[:, None], t2[:, None], t3[:, None]])

        if verbose:
            logging.info('data has length = ' + str(len(data)) + ' joints')
            logging.info('each joint has an array of shape ' + str(data['head'].shape))
        # convert data and number labels into windows.
        # data is still in the form of a dictionary with the joints/objects as keys of the dict
        # Text labels contained in labels
        if verbose:
            logging.info('')
        noY = mode != 'testing'
        if mode == 'testing':
            offset = self.paramsDict['windowOffset']
        else:
            offset = 1

        data2 = dict()
        printExplanation = True
        for num, key in enumerate(data):
            data2[key] = None
            xx, yy = utils.transformTimeSeriesToSeq(data[key], timeWindow=self.paramsDict['windowSize'],
                                                    offset=offset,
                                                    normalised=self.paramsDict['normaliseWindow'],
                                                    reduced=self.paramsDict['reduce'], noY=noY)

            if self.paramsDict['thresholdMovement'] or 'vel' in self.paramsDict['components'] or 'acc' in \
                    self.paramsDict['components']:
                winSize = xx.shape[1] / 3
                g = xx.size / winSize
                xxshape1 = xx.shape[0]
                xxshape2 = xx.shape[1]

                flatxx = xx.flatten()
                f = flatxx.reshape([g, winSize])
                xx = f.reshape([xxshape1, xxshape2])

                if self.paramsDict['thresholdMovement']:
                    if printExplanation and verbose:
                        logging.info('thresholding movement <' + str(self.paramsDict['moveThresh']))
                    ranges = np.ptp(f, axis=1)
                    a = ranges < self.paramsDict['moveThresh']
                    b = ranges > -self.paramsDict['moveThresh']
                    res = list(np.where(np.logical_and(a, b))[0])
                    if self.paramsDict['normaliseWindow']:
                        f[res] = 0
                    else:
                        for ll in res:
                            f[ll] = f[ll][0]

                if 'vel' in self.paramsDict['components']:
                    if printExplanation and verbose:
                        logging.info('Adding velocity to the feature vector')
                    xxvel = np.diff(f)
                    xxvel = xxvel.reshape([xxshape1, xxshape2 - 3])
                    xx = np.hstack([xx, xxvel])

                if 'acc' in self.paramsDict['components']:
                    if printExplanation and verbose:
                        logging.info('Adding acceleration to the feature vector')
                    xxacc = np.diff(f, n=2)
                    xxacc = xxacc.reshape([xxshape1, xxshape2 - 6])
                    xx = np.hstack([xx, xxacc])

            data2[key] = xx
            printExplanation = False

        if verbose:
            logging.info('data has length = ' + str(len(data2)) + ' joints')
            logging.info('each joint has an array of shape ' + str(data2['head'].shape))

        return data2, jointsList, objectsList
예제 #12
0
def calibrateMultipleModelRecall(thisModel):

    cmSize = len(thisModel[0].textLabels)
    confMatrix = np.zeros((cmSize, cmSize))

    # Create Validation set
    Y_valid = []
    Y_testing = []
    for i in range(len(thisModel)):
        if thisModel[i].SAMObject.model:
            # De-normalize from the model which stored this test data
            yy_test = thisModel[i].Ytestn.copy()
            yy_test *= thisModel[i].Ystd
            yy_test += thisModel[i].Ymean
            y_valid_tmp, y_test_tmp, _, _ = SAM_utils.random_data_split(
                yy_test, [0.5, 0.5])
            Y_valid.append(y_valid_tmp.copy())
            Y_testing.append(y_test_tmp.copy())

    # Compute familiarities in VALIDATION SET
    familiarities = [None] * (len(thisModel) - 1)
    for i in range(len(thisModel)):
        if thisModel[i].SAMObject.model:
            # N_test x N_labels matrix.
            familiarities[i - 1] = np.zeros(
                (Y_valid[i - 1].shape[0], (len(thisModel) - 1)))
            print("## True label is " + thisModel[i].modelLabel)
            for k in range(Y_valid[i - 1].shape[0]):
                sstest = []
                print('# k=' + str(k))
                for j in range(len(thisModel)):
                    if thisModel[j].SAMObject.model:
                        yy_test = Y_valid[i - 1][k, :][None, :].copy()
                        # Normalize according to the model to predict
                        yy_test -= thisModel[j].Ymean
                        yy_test /= thisModel[j].Ystd
                        sstest.append(
                            thisModel[j].SAMObject.familiarity(yy_test))
                        familiarities[i - 1][k, j - 1] = sstest[-1]
                for j in range(len(sstest)):
                    if j == np.argmax(sstest):
                        print '   *',
                    else:
                        print '    ',
                    print('      Familiarity of model ' +
                          thisModel[j + 1].modelLabel + ' given label: ' +
                          thisModel[i].modelLabel + ' in valid: ' +
                          str(sstest[j]))

                confMatrix[i - 1, np.argmax(sstest)] += 1
    calculateData(thisModel[0].textLabels, confMatrix)

    # At this point we have:
    # familiarities[i][k,j] -> familiarity for true label i, instance k
    #                          predicted by model trained in label j
    # ############# Train Familiarity classifier in VALIDATION SET
    #
    classifiers = []
    classif_thresh = []
    familiarity_predictions = []
    tmp = []
    for i in range(len(thisModel[0].textLabels)):
        X_train = familiarities[0][:, i][:, None]
        y_train = np.zeros((familiarities[0][:, i][:, None].shape[0], 1))
        for j in range(1, len(thisModel[0].textLabels)):
            X_train = np.vstack((X_train, familiarities[j][:, i][:, None]))
            y_train = np.vstack((y_train, j + np.zeros(
                (familiarities[j][:, i][:, None].shape[0], 1))))
        tmp.append(X_train)
        n_classes = len(np.unique(y_train))

        # Try GMMs using different types of covariances.
        classifiers.append(
            GMM(n_components=n_classes,
                covariance_type='full',
                init_params='wc',
                n_iter=2000))

        # Since we have class labels for the training data, we can
        # initialize the GMM parameters in a supervised manner.
        classifiers[-1].means_ = np.array([
            X_train[y_train == kk].mean(axis=0) for kk in xrange(n_classes)
        ])[:, None]
        classifiers[-1].fit(X_train)
        familiarity_predictions.append(classifiers[-1].predict(X_train))

        # Find threshold of confident classification of model i predicting label i
        tmp_i = classifiers[i].predict_proba(X_train[y_train == i][:, None])[:,
                                                                             i]
        tmp_s = 0.8
        # If in the test phase we get a predict_proba which falls in the threshold i, then
        # model i is confident for this prediction.
        classif_thresh.append([
            tmp_i.mean() - tmp_s * tmp_i.std(),
            tmp_i.mean() + tmp_s * tmp_i.std()
        ])

    thisModel[0].classifiers = classifiers
    thisModel[0].classif_thresh = classif_thresh
예제 #13
0
    def readData1(self, root_data_dir, participant_index, *args, **kw):
        self.rawData, labelsList = self.diskDataToLiveData(root_data_dir)
        data2, jointsList, objectsList = self.convertToDict(
            self.rawData, 'testing', verbose=self.verbose)

        # extract a set of labels
        labels = list(set(labelsList))
        labels.sort()

        logging.info('')
        # convert text labels into numbers
        labelNumsList = None
        for n, k in enumerate(labelsList):
            res = [m for m, l in enumerate(labels) if l == k]
            if n == 0:
                labelNumsList = np.array(res)
            else:
                labelNumsList = np.vstack([labelNumsList, res])
        logging.info('shape of number labels:' + str(labelNumsList.shape))

        uu, tmp = utils.transformTimeSeriesToSeq(
            labelNumsList, self.paramsDict['windowSize'],
            self.paramsDict['windowOffset'], False, False)
        data2NumLabels = uu
        logging.info('windowed number labels shape:' +
                     str(data2NumLabels.shape))

        # now that labels are in windowed form it is time to
        # assign them a text label again that describes them
        # the rule here is if the window appertains to the same label,
        # that label is assigned otherwise it is labelled as transition
        data2Labels = []
        for j in data2NumLabels:
            numItems = list(set(j))
            if len(numItems) == 1:
                l = labels[int(numItems[0])]
                data2Labels.append(l)
            else:
                # Another way to do this would be to label it according to 75% majority
                # This would decrease the region size of the transition blocks
                # which are currently dependant on windowSize
                data2Labels.append('transition')

        logging.info('windowed data labels compressed:' +
                     str(len(data2Labels)))

        logging.info('')
        # create list of specific joints to be used

        jointsToUse = []
        objectDict = dict()
        handDict = dict()
        for j in self.paramsDict['includeParts']:
            if j == 'object':
                for k in objectsList:
                    if k != 'partner':
                        objectDict[k] = (len(jointsToUse))
                        jointsToUse.append(k)
            elif 'hand' in j:
                handDict[j] = (len(jointsToUse))
                jointsToUse.append(j)
            else:
                jointsToUse.append(j)

        combineObjects = len(objectDict) > 1

        combineHands = len(handDict) > 1

        logging.info(jointsToUse)
        logging.info(objectDict)
        logging.info(handDict)

        # concatenate data for all joints in a single vector
        logging.info('')
        dataVecAll = None
        for j in jointsToUse:
            if dataVecAll is None:
                dataVecAll = data2[j]
            else:
                dataVecAll = np.hstack([dataVecAll, data2[j]])
        itemsPerJoint = dataVecAll.shape[1] / len(jointsToUse)
        logging.info(dataVecAll.shape)
        logging.info(itemsPerJoint)
        self.itemsPerJoint = itemsPerJoint
        logging.info('')

        # it is now time to combine objects if multiple exist
        #
        self.featureSequence = ['object']
        logging.info('')
        combinedObjs = None
        if combineObjects:
            logging.info('Combining Objects')
            for j in range(len(data2Labels)):
                #         logging.info(data2Labels[j])
                if len(data2Labels[j].split('_')) > 2:
                    idxBase = objectDict[data2Labels[j].split('_')
                                         [2]] * itemsPerJoint
                else:
                    idxBase = objectDict[objectDict.keys()[0]] * itemsPerJoint

                if combinedObjs is None:
                    combinedObjs = dataVecAll[j,
                                              idxBase:idxBase + itemsPerJoint]
                else:
                    combinedObjs = np.vstack([
                        combinedObjs,
                        dataVecAll[j, idxBase:idxBase + itemsPerJoint]
                    ])
            logging.info(combinedObjs.shape)

        logging.info(dataVecAll.shape)

        logging.info('')
        # it is now time to combine hands if multiple exist
        combinedHands = None
        if combineHands and self.paramsDict['combineHands']:
            logging.info('Combining hands')
            self.handsCombined = True
            self.featureSequence.append('hand')
            for j in range(len(data2Labels)):
                if len(data2Labels[j].split('_')) > 2:
                    idxBase = handDict[data2Labels[j].split('_')[3] + data2Labels[j].split('_')[4].capitalize()] * \
                              itemsPerJoint
                else:
                    idxBase = handDict[handDict.keys()[0]] * itemsPerJoint

                if combinedHands is None:
                    combinedHands = dataVecAll[j,
                                               idxBase:idxBase + itemsPerJoint]
                else:
                    combinedHands = np.vstack([
                        combinedHands,
                        dataVecAll[j, idxBase:idxBase + itemsPerJoint]
                    ])
            logging.info(dataVecAll.shape)
            logging.info(combinedHands.shape)
        else:
            self.handsCombined = False

        dataVecReq = None

        if combinedHands is not None:
            dataVecReq = combinedHands

        if combinedObjs is not None:
            if dataVecReq is None:
                dataVecReq = combinedObjs
            else:
                dataVecReq = np.hstack([dataVecReq, combinedObjs])

        logging.info(jointsToUse)
        for j, item in enumerate(jointsToUse):
            if self.handsCombined:
                if item not in handDict and item not in objectDict:
                    self.featureSequence.append(item)
                    idxBase = j * itemsPerJoint

                    if dataVecReq is None:
                        dataVecReq = dataVecAll[:, idxBase:idxBase +
                                                itemsPerJoint]
                    else:
                        dataVecReq = np.hstack([
                            dataVecReq,
                            dataVecAll[:, idxBase:idxBase + itemsPerJoint]
                        ])
            else:
                if item not in objectDict:
                    self.featureSequence.append(item)
                    idxBase = j * itemsPerJoint

                    if dataVecReq is None:
                        dataVecReq = dataVecAll[:, idxBase:idxBase +
                                                itemsPerJoint]
                    else:
                        dataVecReq = np.hstack([
                            dataVecReq,
                            dataVecAll[:, idxBase:idxBase + itemsPerJoint]
                        ])

        logging.info(dataVecReq.shape)
        logging.info(len(data2Labels))
        logging.info('')
        self.dataVec = copy.deepcopy(dataVecReq)

        data2ShortLabels = []
        for j in data2Labels:
            splitLabel = j.split('_')
            slabel = ('_'.join(splitLabel[:2]))

            if splitLabel[0] == 'push' or splitLabel[0] == 'pull':
                if splitLabel[-1] == 'no':
                    add = splitLabel[-2]
                else:
                    add = splitLabel[-1]

                if add == 'left' and self.paramsDict['flip']:
                    if splitLabel[0] == 'push':
                        splitLabel[0] = 'pull'
                    else:
                        splitLabel[0] = 'push'
                    slabel = ('_'.join(splitLabel[:2]))

                if self.paramsDict['sepRL']:
                    slabel += '_' + add

            data2ShortLabels.append(slabel)

        self.data2Labels = copy.deepcopy(data2ShortLabels)

        if self.paramsDict['sepRL']:
            if 'pull_object' in self.paramsDict['actionsAllowedList']:
                self.paramsDict['actionsAllowedList'].index(
                    'pull_object') == 'pull_object_right'
                self.paramsDict['actionsAllowedList'].append(
                    'pull_object_left')

            if 'push_object' in self.paramsDict['actionsAllowedList']:
                self.paramsDict['actionsAllowedList'].index(
                    'push_object') == 'push_object_right'
                self.paramsDict['actionsAllowedList'].append(
                    'push_object_left')

        # remove labels which will not be trained
        listToDelete = []
        for n in reversed(range(len(data2Labels))):
            if len([j for j in self.paramsDict['actionsAllowedList'] if j in data2Labels[n]]) == 0 or \
                            'no' in data2Labels[n]:
                listToDelete.append(n)

        dataVecReq = np.delete(dataVecReq, listToDelete, axis=0)
        npdata2ShortLabels = np.asarray(data2ShortLabels)
        npdata2ShortLabels = np.delete(npdata2ShortLabels,
                                       listToDelete,
                                       axis=0)
        # find left hand push and pull and label as pull and push respectively
        data2ShortLabels = np.ndarray.tolist(npdata2ShortLabels)

        self.Y = dataVecReq
        self.L = data2ShortLabels
        # logging.info('\n'.join(data2Labels))
        logging.info(self.Y.shape)
        logging.info(len(self.L))

        # now that all joints are in the form of a window, time to create
        # all possible vectors to classify

        self.allDataDict = dict()
        self.allDataDict['Y'] = self.dataVec
        self.allDataDict['L'] = self.data2Labels

        listOfVectorsToClassify = self.listOfClassificationVectors(
            self.featureSequence, objectsList)
        for j in listOfVectorsToClassify:
            logging.info(j)
예제 #14
0
    def readData1(self, root_data_dir, participant_index, *args, **kw):
        self.rawData, labelsList = self.diskDataToLiveData(root_data_dir)
        data2, jointsList, objectsList = self.convertToDict(self.rawData, 'testing', verbose=self.verbose)

        # extract a set of labels
        labels = list(set(labelsList))
        labels.sort()

        logging.info('')
        # convert text labels into numbers 
        labelNumsList = None
        for n, k in enumerate(labelsList):
            res = [m for m, l in enumerate(labels) if l == k]
            if n == 0:
                labelNumsList = np.array(res)
            else:
                labelNumsList = np.vstack([labelNumsList, res])
        logging.info('shape of number labels:' + str(labelNumsList.shape))

        uu, tmp = utils.transformTimeSeriesToSeq(labelNumsList, self.paramsDict['windowSize'],
                                                 self.paramsDict['windowOffset'], False, False)
        data2NumLabels = uu
        logging.info('windowed number labels shape:' + str(data2NumLabels.shape))

        # now that labels are in windowed form it is time to
        # assign them a text label again that describes them
        # the rule here is if the window appertains to the same label,
        # that label is assigned otherwise it is labelled as transition
        data2Labels = []
        for j in data2NumLabels:
            numItems = list(set(j))
            if len(numItems) == 1:
                l = labels[int(numItems[0])]
                data2Labels.append(l)
            else:
                # Another way to do this would be to label it according to 75% majority
                # This would decrease the region size of the transition blocks
                # which are currently dependant on windowSize
                data2Labels.append('transition')

        logging.info('windowed data labels compressed:' + str(len(data2Labels)))

        logging.info('')
        # create list of specific joints to be used

        jointsToUse = []
        objectDict = dict()
        handDict = dict()
        for j in self.paramsDict['includeParts']:
            if j == 'object':
                for k in objectsList:
                    if k != 'partner':
                        objectDict[k] = (len(jointsToUse))
                        jointsToUse.append(k)
            elif 'hand' in j:
                handDict[j] = (len(jointsToUse))
                jointsToUse.append(j)
            else:
                jointsToUse.append(j)

        combineObjects = len(objectDict) > 1

        combineHands = len(handDict) > 1

        logging.info(jointsToUse)
        logging.info(objectDict)
        logging.info(handDict)

        # concatenate data for all joints in a single vector
        logging.info('')
        dataVecAll = None
        for j in jointsToUse:
            if dataVecAll is None:
                dataVecAll = data2[j]
            else:
                dataVecAll = np.hstack([dataVecAll, data2[j]])
        itemsPerJoint = dataVecAll.shape[1] / len(jointsToUse)
        logging.info(dataVecAll.shape)
        logging.info(itemsPerJoint)
        self.itemsPerJoint = itemsPerJoint
        logging.info('')

        # it is now time to combine objects if multiple exist
        #
        self.featureSequence = ['object']
        logging.info('')
        combinedObjs = None
        if combineObjects:
            logging.info('Combining Objects')
            for j in range(len(data2Labels)):
                #         logging.info(data2Labels[j])
                if len(data2Labels[j].split('_')) > 2:
                    idxBase = objectDict[data2Labels[j].split('_')[2]] * itemsPerJoint
                else:
                    idxBase = objectDict[objectDict.keys()[0]] * itemsPerJoint

                if combinedObjs is None:
                    combinedObjs = dataVecAll[j, idxBase:idxBase + itemsPerJoint]
                else:
                    combinedObjs = np.vstack([combinedObjs, dataVecAll[j, idxBase:idxBase + itemsPerJoint]])
            logging.info(combinedObjs.shape)

        logging.info(dataVecAll.shape)

        logging.info('')
        # it is now time to combine hands if multiple exist
        combinedHands = None
        if combineHands and self.paramsDict['combineHands']:
            logging.info('Combining hands')
            self.handsCombined = True
            self.featureSequence.append('hand')
            for j in range(len(data2Labels)):
                if len(data2Labels[j].split('_')) > 2:
                    idxBase = handDict[data2Labels[j].split('_')[3] + data2Labels[j].split('_')[4].capitalize()] * \
                              itemsPerJoint
                else:
                    idxBase = handDict[handDict.keys()[0]] * itemsPerJoint

                if combinedHands is None:
                    combinedHands = dataVecAll[j, idxBase:idxBase + itemsPerJoint]
                else:
                    combinedHands = np.vstack([combinedHands, dataVecAll[j, idxBase:idxBase + itemsPerJoint]])
            logging.info(dataVecAll.shape)
            logging.info(combinedHands.shape)
        else:
            self.handsCombined = False

        dataVecReq = None

        if combinedHands is not None:
            dataVecReq = combinedHands

        if combinedObjs is not None:
            if dataVecReq is None:
                dataVecReq = combinedObjs
            else:
                dataVecReq = np.hstack([dataVecReq, combinedObjs])

        logging.info(jointsToUse)
        for j, item in enumerate(jointsToUse):
            if self.handsCombined:
                if item not in handDict and item not in objectDict:
                    self.featureSequence.append(item)
                    idxBase = j * itemsPerJoint

                    if dataVecReq is None:
                        dataVecReq = dataVecAll[:, idxBase:idxBase + itemsPerJoint]
                    else:
                        dataVecReq = np.hstack([dataVecReq, dataVecAll[:, idxBase:idxBase + itemsPerJoint]])
            else:
                if item not in objectDict:
                    self.featureSequence.append(item)
                    idxBase = j * itemsPerJoint

                    if dataVecReq is None:
                        dataVecReq = dataVecAll[:, idxBase:idxBase + itemsPerJoint]
                    else:
                        dataVecReq = np.hstack([dataVecReq, dataVecAll[:, idxBase:idxBase + itemsPerJoint]])

        logging.info(dataVecReq.shape)
        logging.info(len(data2Labels))
        logging.info('')
        self.dataVec = copy.deepcopy(dataVecReq)

        data2ShortLabels = []
        for j in data2Labels:
            splitLabel = j.split('_')
            slabel = ('_'.join(splitLabel[:2]))

            if splitLabel[0] == 'push' or splitLabel[0] == 'pull':
                if splitLabel[-1] == 'no':
                    add = splitLabel[-2]
                else:
                    add = splitLabel[-1]

                if add == 'left' and self.paramsDict['flip']:
                    if splitLabel[0] == 'push':
                        splitLabel[0] = 'pull'
                    else:
                        splitLabel[0] = 'push'
                    slabel = ('_'.join(splitLabel[:2]))

                if self.paramsDict['sepRL']:
                    slabel += '_' + add

            data2ShortLabels.append(slabel)

        self.data2Labels = copy.deepcopy(data2ShortLabels)

        if self.paramsDict['sepRL']:
            if 'pull_object' in self.paramsDict['actionsAllowedList']:
                self.paramsDict['actionsAllowedList'].index('pull_object') == 'pull_object_right'
                self.paramsDict['actionsAllowedList'].append('pull_object_left')

            if 'push_object' in self.paramsDict['actionsAllowedList']:
                self.paramsDict['actionsAllowedList'].index('push_object') == 'push_object_right'
                self.paramsDict['actionsAllowedList'].append('push_object_left')

        # remove labels which will not be trained
        listToDelete = []
        for n in reversed(range(len(data2Labels))):
            if len([j for j in self.paramsDict['actionsAllowedList'] if j in data2Labels[n]]) == 0 or \
                            'no' in data2Labels[n]:
                listToDelete.append(n)

        dataVecReq = np.delete(dataVecReq, listToDelete, axis=0)
        npdata2ShortLabels = np.asarray(data2ShortLabels)
        npdata2ShortLabels = np.delete(npdata2ShortLabels, listToDelete, axis=0)
        # find left hand push and pull and label as pull and push respectively
        data2ShortLabels = np.ndarray.tolist(npdata2ShortLabels)

        self.Y = dataVecReq
        self.L = data2ShortLabels
        # logging.info('\n'.join(data2Labels))
        logging.info(self.Y.shape)
        logging.info(len(self.L))

        # now that all joints are in the form of a window, time to create
        # all possible vectors to classify

        self.allDataDict = dict()
        self.allDataDict['Y'] = self.dataVec
        self.allDataDict['L'] = self.data2Labels

        listOfVectorsToClassify = self.listOfClassificationVectors(self.featureSequence, objectsList)
        for j in listOfVectorsToClassify:
            logging.info(j)
예제 #15
0
def calibrateSingleModelRecall(thisModel):
    yCalib = formatDataFunc(thisModel[0].allDataDict['Y'])
    logging.info('entering segment testing')
    labelList, confMatrix, ret, variancesKnown, variancesUnknown = segmentTesting(thisModel, yCalib,
                                                                                  thisModel[0].allDataDict['L'],
                                                                                  thisModel[0].verbose, 'calib',
                                                                                  serialMode=False,
                                                                                  optimise=thisModel[0].optimiseRecall,
                                                                                  calibrate=True)
    thisModel[0].classificationDict = dict()

    if thisModel[0].useMaxDistance:
        [mk, vk, rk] = utils.meanVar_varianceDistribution(variancesKnown)
        [muk, vuk, ruk] = utils.meanVar_varianceDistribution(variancesUnknown)

        distance = []
        for j in range(len(mk)):
            distance.append(utils.bhattacharyya_distance(mk[j], muk[j], vk[j], vuk[j]))

        if distance is not None:
            maxIdx = distance.index(max(distance))
        thisModel[0].classificationDict['bestDistanceIDX'] = maxIdx
        thisModel[0].classificationDict['bestDistance_props'] = {'KnownMean': mk[maxIdx], 'UnknownMean': muk[maxIdx],
                                                                 'KnownVar': vk[maxIdx], 'UnknownVar': vuk[maxIdx]}

        # if maxIdx < len(mk) - 2:
        #     thisModel[0].bestSegOperation = maxIdx
        # elif maxIdx == len(mk) - 2:
        #     thisModel[0].bestSegOperation = 'sum'
        # elif maxIdx == len(mk) - 1:
        #     thisModel[0].bestSegOperation = 'mean'

        intersection = utils.solve_intersections(mk[maxIdx], muk[maxIdx], np.sqrt(vk[maxIdx]), np.sqrt(vuk[maxIdx]))

        maxLim = max(rk[maxIdx][1], ruk[maxIdx][1])
        minLim = min(rk[maxIdx][0], ruk[maxIdx][0])

        delList = []
        for j in range(len(intersection)):
            if intersection[j] > maxLim or intersection[j] < minLim:
                delList.append(j)

        thisModel[0].classificationDict['segIntersections'] = np.delete(intersection, delList)
        thisModel[0].classificationDict['bhattaDistances'] = distance

        logging.info('Num Intersections: ' + str(len(thisModel[0].classificationDict['segIntersections'])))

        [thisModel[0].classificationDict['varianceThreshold'],
         thisModel[0].classificationDict['varianceDirection']] = \
            calculateVarianceThreshold(thisModel[0].classificationDict['segIntersections'], mk[maxIdx], muk[maxIdx],
                                       vk[maxIdx], vuk[maxIdx])

        logging.info('varianceThreshold ' + str(thisModel[0].classificationDict['varianceThreshold']))
        logging.info('varianceDirection ' + str(thisModel[0].classificationDict['varianceDirection']))
    else:
        variancesKnownArray = np.asarray(variancesKnown)
        variancesUnknownArray = np.asarray(variancesUnknown)
        varianceAllArray = np.vstack([variancesKnownArray, variancesUnknownArray])
        histKnown = [None] * (len(variancesKnownArray[0]) - 2)
        binEdges = [None] * (len(variancesKnownArray[0]) - 2)
        histUnknown = [None] * (len(variancesKnownArray[0]) - 2)

        thisModel[0].classificationDict['binWidth'] = thisModel[0].paramsDict['binWidth']
        thisModel[0].classificationDict['method'] = thisModel[0].paramsDict['method']

        numBins = np.ceil(np.max(varianceAllArray) / thisModel[0].classificationDict['binWidth'])

        bins = range(int(numBins))
        bins = np.multiply(bins, thisModel[0].classificationDict['binWidth'])

        for j in range(len(variancesKnown[0]) - 2):
            histKnown[j], binEdges[j] = np.histogram(variancesKnownArray[:, j], bins=bins)
            histKnown[j] = 1.0 * histKnown[j] / np.sum(histKnown[j])

            histUnknown[j], _ = np.histogram(variancesUnknownArray[:, j], bins=bins)
            histUnknown[j] = 1.0 * histUnknown[j] / np.sum(histUnknown[j])

        thisModel[0].classificationDict['histKnown'] = histKnown
        thisModel[0].classificationDict['binEdgesKnown'] = binEdges
        thisModel[0].classificationDict['histUnknown'] = histUnknown

    thisModel[0].calibrated = True
예제 #16
0
def singleRecall(thisModel, testInstance, verbose, visualiseInfo=None, optimise=100):
    # Returns the predictive mean, the predictive variance and the axis (pp) of the latent space backwards mapping.
    # mm,vv,pp=self.SAMObject.pattern_completion(testFace, visualiseInfo=visualiseInfo)
    # if verbose:
    # logging.info('single model recall'
    textStringOut = ''
    # normalize incoming data
    testValue = testInstance - thisModel.Ymean
    testValue /= thisModel.Ystd

    try:
        ret = thisModel.SAMObject.pattern_completion(testValue, visualiseInfo=visualiseInfo, optimise=optimise)
    except IndexError:
        return ['unknown', 0]
    mm = ret[0]
    vv = list(ret[1][0])
    svv = sum(vv)
    mvv = svv/len(vv)
    vv.append(svv)
    vv.append(mvv)

    # find nearest neighbour of mm and SAMObject.model.X

    k = np.matlib.repmat(mm[0].values, thisModel.SAMObject.model.X.mean.shape[0], 1)
    pow2 = np.power(thisModel.SAMObject.model.X.mean - k, 2)
    s = np.power(np.sum(pow2, 1), 0.5)
    nn = np.argmin(s)
    min_value = s[nn]

    if thisModel.SAMObject.type == 'mrd':
        classLabel = thisModel.textLabels[int(thisModel.SAMObject.model.bgplvms[1].Y[nn, :])]
    elif thisModel.SAMObject.type == 'bgplvm':
        classLabel = thisModel.textLabels[int(thisModel.L[nn, :])]

    known = True
    if thisModel.calibrated:
        if thisModel.useMaxDistance:
            known = utils.varianceClass(thisModel.classificationDict['varianceDirection'],
                                vv[thisModel.classificationDict['bestDistanceIDX']],
                                thisModel.classificationDict['varianceThreshold'])

            details = str(thisModel.classificationDict['varianceThreshold']) + ' ' + \
                      str(thisModel.classificationDict['varianceDirection'])

            probClass = vv[thisModel.classificationDict['bestDistanceIDX']]
        else:
            P_Known_given_X = utils.PfromHist(vv[:-2], thisModel.classificationDict['histKnown'],
                                              thisModel.classificationDict['binWidth'])
            P_Unknown_given_X = utils.PfromHist(vv[:-2], thisModel.classificationDict['histUnknown'],
                                                thisModel.classificationDict['binWidth'])

            if thisModel.classificationDict['method'] == 'mulProb':
                s1 = reduce(lambda x, y: x * y, P_Known_given_X)
                s2 = reduce(lambda x, y: x * y, P_Unknown_given_X)
                known = s1 > s2
            else:
                s1 = np.sum(P_Known_given_X)
                s2 = np.sum(P_Unknown_given_X)
                known = s1 > s2

            if known:
                probClass = s1
                details = s1, ' > ', s2
            else:
                probClass = s2
                details = s2, ' > ', s1

    if thisModel.calibrated:
        if known:
            textStringOut = classLabel
        else:
            textStringOut = 'unknown'
            runnerUp = classLabel
    else:
        textStringOut = classLabel

    if verbose:
        if thisModel.calibrated:
            if textStringOut == 'unknown':
                logging.info("With " + str(probClass) + " prob. error the new instance is " + str(runnerUp))
                logging.info('But ' + str(details) + ' than ' + str(probClass) + ' so class as ' + str(textStringOut))
            else:
                logging.info("With " + str(probClass) + " prob. error the new instance is " + str(textStringOut))
        else:
            logging.info("With " + str(vv) + " prob. error the new instance is " + str(textStringOut))

    if thisModel.calibrated:
        return [textStringOut, probClass/len(vv)]
    else:
        return [textStringOut, vv]
예제 #17
0
def calibrateMultipleModelRecall(thisModel):
    cmSize = len(thisModel[0].textLabels)
    confMatrix = np.zeros((cmSize, cmSize))

    # Create Validation set
    Y_valid = []
    Y_testing = []
    for i in range(len(thisModel)):
        if thisModel[i].SAMObject.model:
            # De-normalize from the model which stored this test data
            yy_test = thisModel[i].Ytestn.copy()
            yy_test *= thisModel[i].Ystd
            yy_test += thisModel[i].Ymean
            y_valid_tmp, y_test_tmp, _, _ = utils.random_data_split(yy_test, [0.5, 0.5])
            Y_valid.append(y_valid_tmp.copy())
            Y_testing.append(y_test_tmp.copy())

    # Compute familiarities in VALIDATION SET
    familiarities = [None] * (len(thisModel) - 1)
    for i in range(len(thisModel)):
        if thisModel[i].SAMObject.model:
            # N_test x N_labels matrix.
            familiarities[i - 1] = np.zeros((Y_valid[i - 1].shape[0], (len(thisModel) - 1)))
            logging.info("## True label is " + thisModel[i].modelLabel)
            for k in range(Y_valid[i - 1].shape[0]):
                sstest = []
                logging.info('# k= ' + str(k))
                for j in range(len(thisModel)):
                    if thisModel[j].SAMObject.model:
                        yy_test = Y_valid[i - 1][k, :][None, :].copy()
                        # Normalize according to the model to predict
                        yy_test -= thisModel[j].Ymean
                        yy_test /= thisModel[j].Ystd
                        sstest.append(thisModel[j].SAMObject.familiarity(yy_test, optimise=thisModel[0].optimiseRecall))
                        familiarities[i - 1][k, j - 1] = sstest[-1]
                msg = ''
                for j in range(len(sstest)):
                    if j == np.argmax(sstest):
                        msg = '   *'
                    else:
                        msg = '    '
                    logging.info(msg + '      Familiarity of model ' + thisModel[j + 1].modelLabel + ' given label: ' +
                          thisModel[i].modelLabel + ' in valid: ' + str(sstest[j]))

                confMatrix[i - 1, np.argmax(sstest)] += 1
    calculateData(thisModel[0].textLabels, confMatrix)

    # At this point we have:
    # familiarities[i][k,j] -> familiarity for true label i, instance k
    #                          predicted by model trained in label j
    # ############# Train Familiarity classifier in VALIDATION SET
    #
    classifiers = []
    classif_thresh = []
    familiarity_predictions = []
    tmp = []
    for i in range(len(thisModel[0].textLabels)):
        X_train = familiarities[0][:, i][:, None]
        y_train = np.zeros((familiarities[0][:, i][:, None].shape[0], 1))
        for j in range(1, len(thisModel[0].textLabels)):
            X_train = np.vstack((X_train, familiarities[j][:, i][:, None]))
            y_train = np.vstack((y_train, j + np.zeros((familiarities[j][:, i][:, None].shape[0], 1))))
        tmp.append(X_train)
        n_classes = len(np.unique(y_train))

        # Try GMMs using different types of covariances.
        classifiers.append(GMM(n_components=n_classes, covariance_type='full', init_params='wc', n_iter=2000))

        # Since we have class labels for the training data, we can
        # initialize the GMM parameters in a supervised manner.
        classifiers[-1].means_ = np.array([X_train[y_train == kk].mean(axis=0)
                                           for kk in xrange(n_classes)])[:, None]
        classifiers[-1].fit(X_train)
        familiarity_predictions.append(classifiers[-1].predict(X_train))

        # Find threshold of confident classification of model i predicting label i
        tmp_i = classifiers[i].predict_proba(X_train[y_train == i][:, None])[:, i]
        tmp_s = 0.8
        # If in the test phase we get a predict_proba which falls in the threshold i, then
        # model i is confident for this prediction.
        classif_thresh.append([tmp_i.mean() - tmp_s * tmp_i.std(), tmp_i.mean() + tmp_s * tmp_i.std()])

    thisModel[0].classificationDict['classifiers'] = classifiers
    thisModel[0].classificationDict['classif_thresh'] = classif_thresh
    thisModel[0].calibrated = True
예제 #18
0
    def readData(self, root_data_dir, participant_index, *args, **kw):
        self.rawData, labelsList = self.diskDataToLiveData(root_data_dir)
        data2, jointsList, objectsList = self.convertToDict(self.rawData, 'testing', verbose=self.verbose)
        logging.info('unique labels' + str(set(labelsList)))
        # extract a set of labels
        labels = list(set(labelsList))
        labels.sort()

        logging.info('')
        # convert text labels into numbers
        labelNumsList = None
        for n, k in enumerate(labelsList):
            res = [m for m, l in enumerate(labels) if l == k]
            if n == 0:
                labelNumsList = np.array(res)
            else:
                labelNumsList = np.vstack([labelNumsList, res])
        logging.info('shape of number labels:' +str(labelNumsList.shape))

        uu, tmp = utils.transformTimeSeriesToSeq(labelNumsList, self.paramsDict['windowSize'],
                                                 self.paramsDict['windowOffset'], False, False)
        data2NumLabels = uu
        logging.info('windowed number labels shape:' + str(data2NumLabels.shape))

        # now that labels are in windowed form it is time to
        # assign them a text label again that describes them
        # the rule here is if the window appertains to the same label,
        # that label is assigned otherwise it is labelled as transition
        data2Labels = []
        for j in data2NumLabels:
            numItems = list(set(j))
            if len(numItems) == 1:
                l = labels[int(numItems[0])]
                data2Labels.append(l)
            else:
                # Another way to do this would be to label it according to 75% majority
                # This would decrease the region size of the transition blocks
                # which are currently dependant on windowSize
                data2Labels.append('transition')
        logging.info('after transition unique set ' + str(set(data2Labels)))
        logging.info('windowed data labels compressed: ' + str(len(data2Labels)))

        logging.info('')
        # create list of specific joints to be used

        jointsToUse = []
        objectDict = dict()
        handDict = dict()
        for j in self.paramsDict['includeParts']:
            if j == 'object':
                for k in objectsList:
                    if k != 'partner':
                        objectDict[k] = (len(jointsToUse))
                        jointsToUse.append(k)
            elif 'hand' in j:
                handDict[j] = (len(jointsToUse))
                jointsToUse.append(j)
            else:
                jointsToUse.append(j)

        combineObjects = len(objectDict) > 1

        combineHands = len(handDict) > 1

        logging.info(jointsToUse)
        logging.info(objectDict)
        logging.info(handDict)

        # concatenate data for all joints in a single vector
        logging.info('')
        dataVecAll = None
        for j in jointsToUse:
            if dataVecAll is None:
                dataVecAll = data2[j]
            else:
                dataVecAll = np.hstack([dataVecAll, data2[j]])
        itemsPerJoint = dataVecAll.shape[1] / len(jointsToUse)
        logging.info(dataVecAll.shape)
        logging.info(itemsPerJoint)
        self.itemsPerJoint = itemsPerJoint
        logging.info('')
        # ------------------------------------------------------------------
        # it is now time to combine objects if multiple exist
        #

        logging.info('')
        self.featureSequence = []
        combinedObjs = dict()
        if combineObjects and 'object' in self.paramsDict['includeParts']:
            self.featureSequence.append('object')
            logging.info('Combining Objects')
            for n in objectDict:
                idxBase = objectDict[n] * itemsPerJoint
                combinedObjs[n] = dataVecAll[:, idxBase:idxBase + itemsPerJoint]

                logging.info(combinedObjs[n].shape)

        logging.info(dataVecAll.shape)

        logging.info('')
        # it is now time to combine hands if multiple exist
        combinedHands = dict()
        if combineHands and self.paramsDict['combineHands'] and \
           len([s for s in self.paramsDict['includeParts'] if 'hand' in s]) > 0:
            logging.info('Combining hands')
            self.handsCombined = True
            self.featureSequence.append('hand')
            for n in handDict:
                idxBase = handDict[n] * itemsPerJoint
                combinedHands[n] = dataVecAll[:, idxBase:idxBase + itemsPerJoint]

                logging.info(combinedHands[n].shape)
            logging.info(dataVecAll.shape)
        else:
            self.handsCombined = False

        logging.info(jointsToUse)
        otherJoints = None
        for j, item in enumerate(jointsToUse):
            if self.handsCombined:
                if item not in handDict and item not in objectDict:
                    self.featureSequence.append(item)
                    idxBase = j * itemsPerJoint

                    if otherJoints is None:
                        otherJoints = dataVecAll[:, idxBase:idxBase + itemsPerJoint]
                    else:
                        otherJoints = np.hstack([otherJoints, dataVecAll[:, idxBase:idxBase + itemsPerJoint]])
            else:
                if item not in objectDict:
                    self.featureSequence.append(item)
                    idxBase = j * itemsPerJoint

                    if otherJoints is None:
                        otherJoints = dataVecAll[:, idxBase:idxBase + itemsPerJoint]
                    else:
                        otherJoints = np.hstack([otherJoints, dataVecAll[:, idxBase:idxBase + itemsPerJoint]])
        if otherJoints is not None:
            logging.info(otherJoints.shape)

        self.listOfVectorsToClassify = []
        for j in self.featureSequence:
            if j == 'object':
                for k in objectsList:
                    if k != 'partner':
                        self.listOfVectorsToClassify.append([k])

            elif 'hand' in j:
                if self.handsCombined:
                    a = copy.deepcopy(self.listOfVectorsToClassify)
                    b = copy.deepcopy(self.listOfVectorsToClassify)
                    if len(self.listOfVectorsToClassify) > 0:
                        for l, m in enumerate(self.listOfVectorsToClassify):
                            a[l].append('handLeft')
                            b[l].append('handRight')
                            self.listOfVectorsToClassify = a + b
                    else:
                        self.listOfVectorsToClassify.append(['handLeft'])
                        self.listOfVectorsToClassify.append(['handRight'])

                else:
                    for l, m in enumerate(self.listOfVectorsToClassify):
                        self.listOfVectorsToClassify[l].append(j)

            else:
                for l, m in enumerate(self.listOfVectorsToClassify):
                    self.listOfVectorsToClassify[l].append(j)
        logging.info('Vectors to Classify:')
        for j in self.listOfVectorsToClassify:
            logging.info("\t" + str(j))

        dataVecReq = None
        objSection = None
        if combinedObjs:
            objSection = None
            for j in self.listOfVectorsToClassify:
                logging.info(str(j[0]))
                if objSection is None:
                    objSection = combinedObjs[j[0]]
                else:
                    objSection = np.vstack([objSection, combinedObjs[j[0]]])
            dataVecReq = objSection
            logging.info(str(objSection.shape))

        handsSection = None
        if combinedHands:
            for j in self.listOfVectorsToClassify:
                for l in j:
                    if 'hand' in l:
                        if handsSection is None:
                            handsSection = combinedHands[l]
                        else:
                            handsSection = np.vstack([handsSection, combinedHands[l]])
            if dataVecReq is None:
                dataVecReq = handsSection
            else:
                dataVecReq = np.hstack([dataVecReq, handsSection])
            logging.info(str(handsSection.shape))

        othersSection = None
        if otherJoints is not None:
            for j in self.listOfVectorsToClassify:
                logging.info(str(j[:]))
                if othersSection is None:
                    othersSection = otherJoints
                else:
                    othersSection = np.vstack([othersSection, otherJoints])

            if dataVecReq is None:
                dataVecReq = othersSection
            else:
                dataVecReq = np.hstack([dataVecReq, othersSection])

        logging.info(str(dataVecReq.shape))
        del handsSection, othersSection, objSection, combinedHands, combinedObjs, otherJoints

        # Also augment the labels list
        data2LabelsAugment = []
        for j in self.listOfVectorsToClassify:
            data2LabelsAugment.append([])

        for j in data2Labels:
            splitLabel = j.split('_')
            action = '_'.join(splitLabel[:2])

            if len(splitLabel) > 2:
                obj = splitLabel[2]
                hand = splitLabel[4]

                if combineHands:
                    handSubList = [k for k in self.listOfVectorsToClassify if 'hand' + hand.capitalize() in k]
                    if combineObjects:
                        vec = [f for f in handSubList if obj in f][0]
                    else:
                        vec = handSubList[0]
                else:
                    vec = [f for f in self.listOfVectorsToClassify if obj in f][0]
                # logging.info(data2Labels.index(j), vec)

                # printStr = ''
                for n, k in enumerate(self.listOfVectorsToClassify):
                    if vec == k:
                        data2LabelsAugment[n].append(action)
                        # printStr += action + '\t'
                    # else:
                        data2LabelsAugment[n].append('idle')
                #         printStr += '\tidle'
                #     logging.info(data2LabelsAugment[n][-1],)
                # print
            else:
                obj = ''
                hand = ''
                printStr = ''
                for n, k in enumerate(self.listOfVectorsToClassify):
                    # printStr += action + '\t'
                    data2LabelsAugment[n].append(action)
        #             logging.info(data2LabelsAugment[n][-1],)
        #         print
        #     logging.info(action, obj, hand)
        #     logging.info('---------------------')
        # logging.info('before augment', set(data2Labels))
        data2Labels = []
        for j in data2LabelsAugment:
            data2Labels += j
        # logging.info('after augment', set(data2Labels)
        logging.info('labels ' + str(len(data2Labels)))
        logging.info('data ' + str(dataVecReq.shape))
        self.allDataDict = dict()
        self.allDataDict['Y'] = copy.deepcopy(dataVecReq)
        self.allDataDict['L'] = copy.deepcopy(data2Labels)

        # ---------------------------------------------------------------------------------

        data2ShortLabels = []
        for j in data2Labels:
            splitLabel = j.split('_')
            slabel = ('_'.join(splitLabel[:2]))

            if splitLabel[0] == 'push' or splitLabel[0] == 'pull':
                if splitLabel[-1] == 'no':
                    add = splitLabel[-2]
                else:
                    add = splitLabel[-1]

                if add == 'left' and self.paramsDict['flip']:
                    if splitLabel[0] == 'push':
                        splitLabel[0] = 'pull'
                    else:
                        splitLabel[0] = 'push'
                    slabel = ('_'.join(splitLabel[:2]))

                if self.paramsDict['sepRL']:
                    slabel += '_' + add

            data2ShortLabels.append(slabel)

        self.data2Labels = copy.deepcopy(data2ShortLabels)
        logging.info('shortLabels len ' + str(set(self.data2Labels)))

        if self.paramsDict['sepRL']:
            if 'pull_object' in self.paramsDict['actionsAllowedList']:
                self.paramsDict['actionsAllowedList'].index('pull_object') == 'pull_object_right'
                self.paramsDict['actionsAllowedList'].append('pull_object_left')

            if 'push_object' in self.paramsDict['actionsAllowedList']:
                self.paramsDict['actionsAllowedList'].index('push_object') == 'push_object_right'
                self.paramsDict['actionsAllowedList'].append('push_object_left')

        # remove labels which will not be trained
        logging.info('actions allowed: ' + str(self.paramsDict['actionsAllowedList']))
        listToDelete = []
        for n in reversed(range(len(data2Labels))):
            if len([j for j in self.paramsDict['actionsAllowedList'] if j in data2Labels[n]]) == 0 or \
                            'no' in data2Labels[n]:
                listToDelete.append(n)

        dataVecReq = np.delete(dataVecReq, listToDelete, axis=0)
        npdata2ShortLabels = np.asarray(data2ShortLabels)
        npdata2ShortLabels = np.delete(npdata2ShortLabels, listToDelete, axis=0)
        # find left hand push and pull and label as pull and push respectively
        data2ShortLabels = np.ndarray.tolist(npdata2ShortLabels)

        self.Y = dataVecReq
        self.L = data2ShortLabels
        # logging.info('\n'.join(data2Labels))
        logging.info(self.Y.shape)
        logging.info(len(self.L))
예제 #19
0
    def convertToDict(self, rawData, mode, verbose):
        data = dict()
        firstPass = True
        jointsList = []
        objectsList = []

        # logging.info('*******************')
        # for j in self.paramsDict:
        #     logging.info(j, self.paramsDict[j]
        # logging.info('*******************')

        for t in rawData:
            # parse skeleton data which has 9 sections by (x,y,z)
            for i in range(self.numJoints):
                a = i * 4
                # if t[a] == 'shoulderCenter':
                #     t[a] = 'chest'

                if firstPass:
                    data[t[a]] = [None]
                    data[t[a]] = (np.array(
                        [float(t[a + 1]),
                         float(t[a + 2]),
                         float(t[a + 3])]))
                    jointsList.append(t[a])
                else:
                    arr = np.array(
                        [float(t[a + 1]),
                         float(t[a + 2]),
                         float(t[a + 3])])
                    if data[t[a]] is not None:
                        data[t[a]] = np.vstack((data[t[a]], arr))
                    else:
                        data[t[a]] = arr

            currIdx = (self.numJoints * 4 - 1)
            numObjs = (len(t) - currIdx) / 5

            for i in range(numObjs):
                a = currIdx + 1 + (i * 5)
                if t[a] in data:
                    arr = np.array(
                        [float(t[a + 1]),
                         float(t[a + 2]),
                         float(t[a + 3])])
                    if data[t[a]] is not None:
                        data[t[a]] = np.vstack((data[t[a]], arr))
                    else:
                        data[t[a]] = arr
                else:
                    data[t[a]] = [None]
                    data[t[a]] = np.array(
                        [float(t[a + 1]),
                         float(t[a + 2]),
                         float(t[a + 3])])
                    if mode == 'testing' or (mode != 'testing'
                                             and t[a + 4] == '1'):
                        objectsList.append(t[a])

            firstPass = False
        if verbose:
            logging.info('data has length = ' + str(len(data)) + ' joints')
            logging.info('each joint has an array of shape ' +
                         str(data['head'].shape))

        if self.paramsDict['filterData'] or 'vel' in self.paramsDict['components'] or \
                                            'acc' in self.paramsDict['components']:
            if verbose:
                logging.info('Filtering data with hamming window of size ' +
                             str(self.paramsDict['filterWindow']))
            for j in data.keys():
                t1 = utils.smooth1D(data[j][:, 0],
                                    self.paramsDict['filterWindow'])
                t2 = utils.smooth1D(data[j][:, 1],
                                    self.paramsDict['filterWindow'])
                t3 = utils.smooth1D(data[j][:, 2],
                                    self.paramsDict['filterWindow'])
                data[j] = np.hstack([t1[:, None], t2[:, None], t3[:, None]])

        if verbose:
            logging.info('data has length = ' + str(len(data)) + ' joints')
            logging.info('each joint has an array of shape ' +
                         str(data['head'].shape))
        # convert data and number labels into windows.
        # data is still in the form of a dictionary with the joints/objects as keys of the dict
        # Text labels contained in labels
        if verbose:
            logging.info('')
        noY = mode != 'testing'
        if mode == 'testing':
            offset = self.paramsDict['windowOffset']
        else:
            offset = 1

        data2 = dict()
        printExplanation = True
        for num, key in enumerate(data):
            data2[key] = None
            xx, yy = utils.transformTimeSeriesToSeq(
                data[key],
                timeWindow=self.paramsDict['windowSize'],
                offset=offset,
                normalised=self.paramsDict['normaliseWindow'],
                reduced=self.paramsDict['reduce'],
                noY=noY)

            if self.paramsDict['thresholdMovement'] or 'vel' in self.paramsDict['components'] or 'acc' in \
                    self.paramsDict['components']:
                winSize = xx.shape[1] / 3
                g = xx.size / winSize
                xxshape1 = xx.shape[0]
                xxshape2 = xx.shape[1]

                flatxx = xx.flatten()
                f = flatxx.reshape([g, winSize])
                xx = f.reshape([xxshape1, xxshape2])

                if self.paramsDict['thresholdMovement']:
                    if printExplanation and verbose:
                        logging.info('thresholding movement <' +
                                     str(self.paramsDict['moveThresh']))
                    ranges = np.ptp(f, axis=1)
                    a = ranges < self.paramsDict['moveThresh']
                    b = ranges > -self.paramsDict['moveThresh']
                    res = list(np.where(np.logical_and(a, b))[0])
                    if self.paramsDict['normaliseWindow']:
                        f[res] = 0
                    else:
                        for ll in res:
                            f[ll] = f[ll][0]

                if 'vel' in self.paramsDict['components']:
                    if printExplanation and verbose:
                        logging.info('Adding velocity to the feature vector')
                    xxvel = np.diff(f)
                    xxvel = xxvel.reshape([xxshape1, xxshape2 - 3])
                    xx = np.hstack([xx, xxvel])

                if 'acc' in self.paramsDict['components']:
                    if printExplanation and verbose:
                        logging.info(
                            'Adding acceleration to the feature vector')
                    xxacc = np.diff(f, n=2)
                    xxacc = xxacc.reshape([xxshape1, xxshape2 - 6])
                    xx = np.hstack([xx, xxacc])

            data2[key] = xx
            printExplanation = False

        if verbose:
            logging.info('data has length = ' + str(len(data2)) + ' joints')
            logging.info('each joint has an array of shape ' +
                         str(data2['head'].shape))

        return data2, jointsList, objectsList
예제 #20
0
# #----------- Train a standard model
# m1=SAM.SAM_Core.LFM()
# m1.store(observed={'Y':Y1[0:Ntr,:]}, inputs=X1[0:Ntr,:], Q=None, kernel=None, num_inducing=num_inducing)
# m1.learn()
# ret = m1.visualise()
# y_pred_standard = m1.pattern_completion(X1[Ntr:,:])[0]
# pb.figure()
# pb.plot(X1[Ntr:,:],Y1[Ntr:,:], 'x-')
# pb.plot(X1[Ntr:,:],y_pred_standard, 'ro-')
# pb.legend(('True','Pred'))
# pb.title('Standard GP')
# # ---------------------------------------

# Create transformed data (autoregressive dataset)
ws = 10  # Windowsize
xx, yy = autoregressive.transformTimeSeriesToSeq(Y1, ws)

# uu,tmp = transformTimeSeriesToSeq(U1, ws)
# Test the above: np.sin(uu) - xx

# uu = yy**2 -2*yy + 5 + np.random.randn(*yy.shape) * 0.005
U1 = Y1**2 - 2 * Y1 + 5 + np.random.randn(*Y1.shape) * 0.005
uu, tmp = autoregressive.transformTimeSeriesToSeq(U1, ws)

Xtr = xx[0:Ntr, :]
Xts = xx[Ntr:, :]
Ytr = yy[0:Ntr, :]
Yts = yy[Ntr:, :]
Utr = uu[0:Ntr, :]
Uts = uu[Ntr:, :]