numpy.random.seed(21) logging.debug(maxLocalAuc) #maxLocalAuc.learningRateSelect(trainX) U, V, trainMeasures, testMeasures, iterations, time = maxLocalAuc.learnModel(trainX, U=U, V=V, verbose=True) fprTrain, tprTrain = MCEvaluator.averageRocCurve(trainX, U, V) fprTest, tprTest = MCEvaluator.averageRocCurve(testX, U, V) return fprTrain, tprTrain, fprTest, tprTest if saveResults: paramList = [] chunkSize = 1 U, V = maxLocalAuc.initUV(X) for loss, rho in losses: for trainX, testX in trainTestXs: maxLocalAuc.loss = loss maxLocalAuc.rho = rho paramList.append((trainX, testX, maxLocalAuc.copy(), U.copy(), V.copy())) pool = multiprocessing.Pool(maxtasksperchild=100, processes=multiprocessing.cpu_count()) resultsIterator = pool.imap(computeTestAuc, paramList, chunkSize) #import itertools #resultsIterator = itertools.imap(computeTestAuc, paramList) meanFprTrains = [] meanTprTrains = []
def computeObjectives(args): trainX, maxLocalAuc, U, V = args numpy.random.seed(21) logging.debug(maxLocalAuc) U, V, trainMeasures, testMeasures, iterations, time = maxLocalAuc.learnModel(trainX, U=U, V=V, verbose=True) return trainMeasures[-1, 0] if saveResults: #First run with low learning rate to get a near-optimal solution U, V = maxLocalAuc.initUV(trainX) maxLocalAuc.maxIterations = 5000 U2, V2, trainMeasures, testMeasures, iterations, time = maxLocalAuc.learnModel(trainX, U=U, V=V, verbose=True) idealTrainMeasures = trainMeasures[:, 0] maxLocalAuc.maxIterations = 100 paramList = [] objectives1 = numpy.zeros((t0s.shape[0], alphas.shape[0], etas.shape[0], startAverages.shape[0], folds)) for t0 in t0s: for alpha in alphas: for eta in etas: for startAverage in startAverages: for trainX, testX in trainTestXs: