Example #1
0
def postTreatDev(cccs, preds, gs, nDim):
	#First we calculate the bias
	gsMean = np.nanmean(gs['dev'][nDim])
	predMean = np.nanmean(preds['dev'])
	bias = gsMean - predMean
	#We add the bias to the prediction and save if there is an improvement
	predCenter = preds['dev'] + bias
	cccBias = cccCalc(predCenter,gs['dev'][nDim])
	if (cccBias > cccs['dev']):
		cccs['dev'] = cccBias
		preds['dev'] = predCenter
	else :
		bias = 0.0
	#We now scale the prediction and do the same thing
	#First we calculate the scale
	stdGs = np.nanstd(gs['dev'][nDim])
	stdPred = np.nanstd(preds['dev'])
	scale = stdGs/stdPred
	#We apply the scale and save if improvement
	predScale = np.multiply(preds['dev'],scale)
	cccScale = cccCalc(predScale,gs['dev'][nDim])
	if (cccScale > cccs['dev']) :
		cccs['dev'] = cccScale
		preds['dev'] = predScale
	else :
		scale = 0.0
	return cccs['dev'], preds['dev'], bias, scale
Example #2
0
def postTreatTest(gs, pred, ccc, bias, scale, nDim):
    for s in v.aPart:
        gspt = np.array(gs[s])[nDim]
        if (bias != 0.0):
            #We add the bias to the prediction and save if there is an improvement
            pred[s] = np.array(pred[s]) + bias
            ccc[s] = cccCalc(pred[s], gspt)
        if (scale != 0.0):
            #We apply the scale and save if improvement
            pred[s] = np.multiply(pred[s], scale)
            ccc[s] = cccCalc(pred[s], gspt)
    return ccc, pred
Example #3
0
def unimodalPredDev(gs, feats, nDim):
	parts = ['dev']
	[cccs, preds] = [{} for i in range(2)]
	for s in parts:
		cccs[s] = -1.0
	warnings.filterwarnings('ignore', category=ConvergenceWarning)
	#Liblinear
	for comp in v.C:
		#Options for liblinear
		options = "-s "+str(v.sVal)+" -c "+str(comp)+" -B 1 -q"
		#We learn the model on train
		model = train(gs['train'][nDim],feats['train'],options)
		#We predict on data
		for s in parts:
			pred = np.array(predict(gs[s][nDim],feats[s],model,"-q"))[0]
			#We calculate the correlation and store it
			ccc = cccCalc(np.array(pred),gs[s][nDim])
			if (ccc > cccs[s]):
				preds[s] = pred
				cccs[s] = ccc
				function = "SVR"
				alpha = comp
	if (v.fullMode == True):
		#We see if we can do better with sklearn
		for nbFunc in range(len(v.lFunc)):
			for c in v.parFunc[nbFunc]:
				func = v.lFunc[nbFunc]
				reg = func[0](alpha=c)
				#One task prediction
				if (func[1] == 0):
					reg.fit(feats['train'],gs['train'][nDim])
					for s in parts:
						p = reg.predict(feats['dev'])
						ccc = cccCalc(p,gs[s][nDim])
						if (ccc > cccs[s]) : 
							preds[s] = p
							cccs[s] = ccc
							function = func[2]
							alpha = c
				#Multi task prediction
				else :
					reg.fit(feats['train'],np.transpose(gs['train']))
					for s in parts:
						p = reg.predict(feats['dev'])[:,nDim]
						ccc = cccCalc(p,gs[s][nDim])
						if (ccc > cccs[s]) : 
							preds[s] = p
							cccs[s] = ccc
							function = func[2]
							alpha = c
	return cccs, preds, function, alpha
Example #4
0
def linRegMult(datas, func, c, part, cMode, cSize):
    res = [func, c, [], [], {}, cMode, cSize]
    warnings.filterwarnings('ignore', category=ConvergenceWarning)
    #Getting the coefficient for each modality on Dev
    if (c != 0):
        reg = func[0](alpha=c)
    else:
        reg = func[0]()
    for nDim in range(len(v.eName)):
        for nMod in range(len(datas['dev'][nDim])):
            if (nMod == 0):
                preds = datas['dev'][nDim][nMod]
            else:
                preds = np.concatenate((preds, datas['dev'][nDim][nMod]),
                                       axis=1)
    reg.fit(preds, np.transpose(datas['gsdev']))
    res[2] = reg.coef_
    #Doing the new prediction
    cccs = []
    for nDim in range(len(v.eName)):
        cccs = []
        for s in part:
            if (res[4].get(s, None) == None):
                res[4][s] = []
            res[4][s].append(predMulti(reg.coef_, datas[s], nDim, 1, cSize))
            cccs.append(
                round(cccCalc(res[4][s][nDim], datas['gs' + s][nDim]), 3))
        res[3].append(cccs)
    return res
Example #5
0
def unimodalPredTest(gs, feats, nDim, func, c):
    [cccs, preds] = [{} for i in range(2)]
    for s in v.aPart:
        cccs[s] = -1.0
    warnings.filterwarnings('ignore', category=ConvergenceWarning)
    if (func == "SVR"):
        #Options for liblinear
        options = "-s " + str(v.sVal) + " -c " + str(c) + " -B 1 -q"
        #We learn the model on train
        model = train(gs['train'][nDim], feats['train'], options)
        #We predict on data
        for s in v.aPart:
            pred = np.array(predict(gs[s][nDim], feats[s], model, "-q"))[0]
            #We calculate the correlation and store it
            ccc = cccCalc(np.array(pred), gs[s][nDim])
            if (ccc > cccs[s]):
                preds[s] = pred
                cccs[s] = ccc
    else:
        for f in v.lFunc:
            if (f[2] == func):
                fun = f
        reg = fun[0](alpha=c)
        if (fun[1] == 0):
            reg.fit(feats['train'], gs['train'][nDim])
            for s in v.aPart:
                p = reg.predict(feats[s])
                ccc = cccCalc(p, gs[s][nDim])
                if (ccc > cccs[s]):
                    preds[s] = p
                    cccs[s] = ccc
        else:
            reg.fit(feats['train'], np.transpose(gs['train']))
            for s in v.aPart:
                p = reg.predict(feats[s])[:, nDim]
                ccc = cccCalc(p, gs[s][nDim])
                if (ccc > cccs[s]):
                    preds[s] = p
                    cccs[s] = ccc
    return cccs, preds, func, c