def testMultiModalityDBN(opts): """show how to use multi-modality DBN to do classification test multi-modality DBN In this example, I just use two modalities, but you can extend this code segment to let it work for more modalities""" # load data nHid = [[2000, 1000], [5000, 2000]] nJoint = 3000 trainViewBasedData = np.load(opts.trainViewBasedFeature) trainShapeBasedData = np.load(opts.trainShapeBasedFeature) trainLabel = np.load(opts.trainLabel) testViewBasedData = np.load(opts.testViewBasedFeature) testShapeBasedData = np.load(opts.testShapeBasedFeature) testLabel = np.load(opts.testLabel) nTrain = trainLabel.size nTest = testLabel.size # set parameters for each layer # view based layer1 pV1 = {"maxEpoch": opts.maxEpoch, "modelType": "BB"} # view based layer2 pV2 = {"maxEpoch": opts.maxEpoch} p1 = {"layer1": pV1, "layer2": pV2} # shape based layer1 pS1 = {"maxEpoch": opts.maxEpoch, "modelType": "BB"} # shape based layer2 pS2 = {"maxEpoch": opts.maxEpoch} p2 = {"layer1": pS1, "layer2": pS2} # joint layer pJ = {"maxEpoch": opts.maxEpoch} p = {"modality1": p1, "modality2": p2, "joint": pJ} # train the multi-modality model model = multiModalityDBNFit.multiModalityDBNFit([trainViewBasedData, trainShapeBasedData],\ trainLabel, nHid, nJoint, isSaveModel=True, modelName = opts.model, **p) # do prediction for training set and testing set [trainR, F1] = multiModalityDBNPredict.multiModalityDBNPredict( model, [trainViewBasedData, trainShapeBasedData]) [testR, F2] = multiModalityDBNPredict.multiModalityDBNPredict( model, [testViewBasedData, testShapeBasedData]) # calculate the classification accuracy trainK = 0 for x in range(nTrain): if trainLabel[x] != trainR[x]: trainK = trainK + 1 testK = 0 for x in range(nTest): if testLabel[x] != testR[x]: testK = testK + 1 print "---------------------------------------" print "train classification rate : %f " % (1 - trainK * 1.0 / nTrain) print "test classification rate : %f " % (1 - testK * 1.0 / nTest) print "---------------------------------------" if opts.isSaveResult: result = shelve.open(opts.resultName) result["nHid"] = nHid result["nJoint"] = nJoint result["maxEpoch"] = opts.maxEpoch result["trainAcc"] = 1 - trainK * 1.0 / nTrain result["testAcc"] = 1 - testK * 1.0 / nTest result.close()
def testMultiModalityDBN(opts) : """show how to use multi-modality DBN to do classification test multi-modality DBN In this example, I just use two modalities, but you can extend this code segment to let it work for more modalities""" # load data viewBasedData = np.load(opts.viewBasedFeature) shapeBasedData = np.load(opts.shapeBasedFeature) label = np.load(opts.label) if viewBasedData.shape[0] != shapeBasedData.shape[0] : print "different modalities must have the same number of samples" sys.exit() nHid = [[1000, 800], [5000, 2000]] nJoint = 2800 # shuffle all data and label [viewBasedData, shapeBasedData, label] = util.shuffleMore(viewBasedData, shapeBasedData, label) percent = opts.trainPercent nCase = viewBasedData.shape[0] nTrain = int(nCase*percent) nTest = nCase - nTrain trainViewBasedData = viewBasedData[0:nTrain, :] trainShapeBasedData = shapeBasedData[0:nTrain, :] trainLabel = label[0:nTrain, :] testViewBasedData = viewBasedData[nTrain:, :] testShapeBasedData = shapeBasedData[nTrain:, :] testLabel = label[nTrain:, :] # set parameters for each layer # view based layer1 pV1 = {"maxEpoch" : opts.maxEpoch, "modelType" : "BB"} # view based layer2 pV2 = {"maxEpoch" : opts.maxEpoch} p1 = {"layer1" : pV1, "layer2" : pV2} # shape based layer1 pS1 = {"maxEpoch" : opts.maxEpoch, "modelType" : "BB"} # shape based layer2 pS2 = {"maxEpoch" : opts.maxEpoch} p2 = {"layer1" : pS1, "layer2" : pS2} # joint layer pJ = {"maxEpoch" : opts.maxEpoch} p = {"modality1" : p1, "modality2" : p2, "joint" : pJ} # train the multi-modality model model = multiModalityDBNFit.multiModalityDBNFit([trainViewBasedData, trainShapeBasedData],\ trainLabel, nHid, nJoint, isSaveModel=True, modelName = opts.model, **p) # do prediction for training set and testing set [trainR, F1] = multiModalityDBNPredict.multiModalityDBNPredict(model, [trainViewBasedData, trainShapeBasedData]) [testR, F2] = multiModalityDBNPredict.multiModalityDBNPredict(model, [testViewBasedData, testShapeBasedData]) # calculate the classification accuracy trainK = 0 for x in range(nTrain) : if trainLabel[x] != trainR[x] : trainK = trainK+1 testK = 0 for x in range(nTest) : if testLabel[x] != testR[x] : testK = testK+1 print "---------------------------------------" print "train classification rate : %f " % (1-trainK*1.0/nTrain) print "test classification rate : %f " % (1-testK*1.0/nTest) print "---------------------------------------" if opts.isSaveResult : result = shelve.open(opts.resultName) result["nHid"] = nHid result["nJoint"] = nJoint result["maxEpoch"] = opts.maxEpoch result["trainPercent"] = opts.trainPercent result["trainAcc"] = 1-trainK*1.0/nTrain result["testAcc"] = 1-testK*1.0/nTest result["trainLabel"] = trainLabel result["trainR"] = trainR result["testLabel"] = testLabel result["testR"] = testR result.close()
def testMultiModalityDBN(opts) : """show how to use multi-modality DBN to do classification test multi-modality DBN In this example, I just use two modalities, but you can extend this code segment to let it work for more modalities""" # load data nHid = [[2000, 1000], [5000, 2000]] nJoint = 3000 trainViewBasedData = np.load(opts.trainViewBasedFeature) trainShapeBasedData = np.load(opts.trainShapeBasedFeature) trainLabel = np.load(opts.trainLabel) testViewBasedData = np.load(opts.testViewBasedFeature) testShapeBasedData = np.load(opts.testShapeBasedFeature) testLabel = np.load(opts.testLabel) nTrain = trainLabel.size nTest = testLabel.size # set parameters for each layer # view based layer1 pV1 = {"maxEpoch" : opts.maxEpoch, "modelType" : "BB"} # view based layer2 pV2 = {"maxEpoch" : opts.maxEpoch} p1 = {"layer1" : pV1, "layer2" : pV2} # shape based layer1 pS1 = {"maxEpoch" : opts.maxEpoch, "modelType" : "BB"} # shape based layer2 pS2 = {"maxEpoch" : opts.maxEpoch} p2 = {"layer1" : pS1, "layer2" : pS2} # joint layer pJ = {"maxEpoch" : opts.maxEpoch} p = {"modality1" : p1, "modality2" : p2, "joint" : pJ} # train the multi-modality model model = multiModalityDBNFit.multiModalityDBNFit([trainViewBasedData, trainShapeBasedData],\ trainLabel, nHid, nJoint, isSaveModel=True, modelName = opts.model, **p) # do prediction for training set and testing set [trainR, F1] = multiModalityDBNPredict.multiModalityDBNPredict(model, [trainViewBasedData, trainShapeBasedData]) [testR, F2] = multiModalityDBNPredict.multiModalityDBNPredict(model, [testViewBasedData, testShapeBasedData]) # calculate the classification accuracy trainK = 0 for x in range(nTrain) : if trainLabel[x] != trainR[x] : trainK = trainK+1 testK = 0 for x in range(nTest) : if testLabel[x] != testR[x] : testK = testK+1 print "---------------------------------------" print "train classification rate : %f " % (1-trainK*1.0/nTrain) print "test classification rate : %f " % (1-testK*1.0/nTest) print "---------------------------------------" if opts.isSaveResult : result = shelve.open(opts.resultName) result["nHid"] = nHid result["nJoint"] = nJoint result["maxEpoch"] = opts.maxEpoch result["trainAcc"] = 1-trainK*1.0/nTrain result["testAcc"] = 1-testK*1.0/nTest result.close()