def evaluate(Y, Yhat, S2=None, mY=None, sY=None, metrics=['Rho', 'RMSE', 'SMSE', 'EXPV', 'MSLL']): feature_num = Y.shape[1] # find and remove bad variables from the response variables nz = np.where( np.bitwise_and(np.isfinite(Y).any(axis=0), np.var(Y, axis=0) != 0))[0] MSE = np.mean((Y - Yhat)**2, axis=0) results = dict() if 'RMSE' in metrics: RMSE = np.sqrt(MSE) results['RMSE'] = RMSE if 'Rho' in metrics: Rho = np.zeros(feature_num) pRho = np.ones(feature_num) Rho[nz], pRho[nz] = compute_pearsonr(Y[:, nz], Yhat[:, nz]) results['Rho'] = Rho results['pRho'] = pRho if 'SMSE' in metrics: SMSE = np.zeros_like(MSE) SMSE[nz] = MSE[nz] / np.var(Y[:, nz], axis=0) results['SMSE'] = SMSE if 'EXPV' in metrics: EXPV = np.zeros(feature_num) EXPV[nz] = explained_var(Y[:, nz], Yhat[:, nz]) results['EXPV'] = EXPV if 'MSLL' in metrics: if ((S2 is not None) and (mY is not None) and (sY is not None)): MSLL = np.zeros(feature_num) MSLL[nz] = compute_MSLL(Y[:, nz], Yhat[:, nz], S2[:, nz], mY[nz].reshape(-1, 1).T, (sY[nz]**2).reshape(-1, 1).T) results['MSLL'] = MSLL return results
def estimate(args): torch.set_default_dtype(torch.float32) args.type = 'MT' print('Loading the input Data ...') responses = fileio.load_nifti(args.respfile, vol=True).transpose([3, 0, 1, 2]) response_shape = responses.shape with open(args.covfile, 'rb') as handle: covariates = pickle.load(handle)['covariates'] with open(args.testcovfile, 'rb') as handle: test_covariates = pickle.load(handle)['test_covariates'] if args.mask is not None: mask = fileio.load_nifti(args.mask, vol=True) mask = fileio.create_mask(mask, mask=None) else: mask = fileio.create_mask(responses[0, :, :, :], mask=None) if args.testrespfile is not None: test_responses = fileio.load_nifti(args.testrespfile, vol=True).transpose([3, 0, 1, 2]) test_responses_shape = test_responses.shape print('Normalizing the input Data ...') covariates_scaler = StandardScaler() covariates = covariates_scaler.fit_transform(covariates) test_covariates = covariates_scaler.transform(test_covariates) response_scaler = MinMaxScaler() responses = unravel_2D(response_scaler.fit_transform(ravel_2D(responses)), response_shape) if args.testrespfile is not None: test_responses = unravel_2D( response_scaler.transform(ravel_2D(test_responses)), test_responses_shape) test_responses = np.expand_dims(test_responses, axis=1) factor = args.m x_context = np.zeros([covariates.shape[0], factor, covariates.shape[1]], dtype=np.float32) y_context = np.zeros([ responses.shape[0], factor, responses.shape[1], responses.shape[2], responses.shape[3] ], dtype=np.float32) x_all = np.zeros([covariates.shape[0], factor, covariates.shape[1]], dtype=np.float32) x_context_test = np.zeros( [test_covariates.shape[0], factor, test_covariates.shape[1]], dtype=np.float32) y_context_test = np.zeros([ test_covariates.shape[0], factor, responses.shape[1], responses.shape[2], responses.shape[3] ], dtype=np.float32) print('Estimating the fixed-effects ...') for i in range(factor): x_context[:, i, :] = covariates[:, :] x_context_test[:, i, :] = test_covariates[:, :] idx = np.random.randint(0, covariates.shape[0], covariates.shape[0]) if args.estimator == 'ST': for j in range(responses.shape[1]): for k in range(responses.shape[2]): for l in range(responses.shape[3]): reg = LinearRegression() reg.fit(x_context[idx, i, :], responses[idx, j, k, l]) y_context[:, i, j, k, l] = reg.predict(x_context[:, i, :]) y_context_test[:, i, j, k, l] = reg.predict(x_context_test[:, i, :]) elif args.estimator == 'MT': reg = MultiTaskLasso(alpha=0.1) reg.fit( x_context[idx, i, :], np.reshape(responses[idx, :, :, :], [covariates.shape[0], np.prod(responses.shape[1:])])) y_context[:, i, :, :, :] = np.reshape( reg.predict(x_context[:, i, :]), [ x_context.shape[0], responses.shape[1], responses.shape[2], responses.shape[3] ]) y_context_test[:, i, :, :, :] = np.reshape( reg.predict(x_context_test[:, i, :]), [ x_context_test.shape[0], responses.shape[1], responses.shape[2], responses.shape[3] ]) print('Fixed-effect %d of %d is computed!' % (i + 1, factor)) x_all = x_context responses = np.expand_dims(responses, axis=1).repeat(factor, axis=1) ################################## TRAINING ################################# encoder = Encoder(x_context, y_context, args).to(args.device) args.cnn_feature_num = encoder.cnn_feature_num decoder = Decoder(x_context, y_context, args).to(args.device) model = NP(encoder, decoder, args).to(args.device) print('Estimating the Random-effect ...') k = 1 epochs = [ int(args.epochs / 4), int(args.epochs / 2), int(args.epochs / 5), int(args.epochs - args.epochs / 4 - args.epochs / 2 - args.epochs / 5) ] mini_batch_num = args.batchnum batch_size = int(x_context.shape[0] / mini_batch_num) model.train() for e in range(len(epochs)): optimizer = optim.Adam(model.parameters(), lr=10**(-e - 2)) for j in range(epochs[e]): train_loss = 0 rand_idx = np.random.permutation(x_context.shape[0]) for i in range(mini_batch_num): optimizer.zero_grad() idx = rand_idx[i * batch_size:(i + 1) * batch_size] y_hat, z_all, z_context, dummy = model( torch.tensor(x_context[idx, :, :], device=args.device), torch.tensor(y_context[idx, :, :, :, :], device=args.device), torch.tensor(x_all[idx, :, :], device=args.device), torch.tensor(responses[idx, :, :, :, :], device=args.device)) loss = np_loss( y_hat, torch.tensor(responses[idx, :, :, :, :], device=args.device), z_all, z_context) loss.backward() train_loss += loss.item() optimizer.step() print('Epoch: %d, Loss:%f, Average Loss:%f' % (k, train_loss, train_loss / responses.shape[0])) k += 1 ################################## Evaluation ################################# print('Predicting on Test Data ...') model.eval() model.apply(apply_dropout_test) with torch.no_grad(): y_hat, z_all, z_context, y_sigma = model( torch.tensor(x_context_test, device=args.device), torch.tensor(y_context_test, device=args.device), n=15) if args.testrespfile is not None: test_loss = np_loss(y_hat[0:test_responses_shape[0], :], torch.tensor(test_responses, device=args.device), z_all, z_context).item() print('Average Test Loss:%f' % (test_loss / test_responses_shape[0])) RMSE = np.sqrt( np.mean((test_responses - y_hat[0:test_responses_shape[0], :].cpu().numpy())**2, axis=0)).squeeze() * mask SMSE = RMSE**2 / np.var(test_responses, axis=0).squeeze() Rho, pRho = compute_pearsonr( test_responses.squeeze(), y_hat[0:test_responses_shape[0], :].cpu().numpy().squeeze()) EXPV = explained_var( test_responses.squeeze(), y_hat[0:test_responses_shape[0], :].cpu().numpy().squeeze()) * mask MSLL = compute_MSLL( test_responses.squeeze(), y_hat[0:test_responses_shape[0], :].cpu().numpy().squeeze(), y_sigma[0:test_responses_shape[0], :].cpu().numpy().squeeze()**2, train_mean=test_responses.mean(0), train_var=test_responses.var(0)).squeeze() * mask NPMs = (test_responses - y_hat[0:test_responses_shape[0], :].cpu().numpy()) / ( y_sigma[0:test_responses_shape[0], :].cpu().numpy()) NPMs = NPMs.squeeze() NPMs = NPMs * mask NPMs = np.nan_to_num(NPMs) temp = NPMs.reshape( [NPMs.shape[0], NPMs.shape[1] * NPMs.shape[2] * NPMs.shape[3]]) EVD_params = extreme_value_prob_fit(temp, 0.01) abnormal_probs = extreme_value_prob(EVD_params, temp, 0.01) ############################## SAVING RESULTS ################################# print('Saving Results to: %s' % (args.outdir)) exfile = args.respfile y_hat = y_hat.squeeze().cpu().numpy() y_hat = response_scaler.inverse_transform(ravel_2D(y_hat)) y_hat = y_hat[:, mask.flatten()] fileio.save(y_hat.T, args.outdir + '/yhat.nii.gz', example=exfile, mask=mask) ys2 = y_sigma.squeeze().cpu().numpy() ys2 = ravel_2D(ys2) * (response_scaler.data_max_ - response_scaler.data_min_) ys2 = ys2**2 ys2 = ys2[:, mask.flatten()] fileio.save(ys2.T, args.outdir + '/ys2.nii.gz', example=exfile, mask=mask) if args.testrespfile is not None: NPMs = ravel_2D(NPMs)[:, mask.flatten()] fileio.save(NPMs.T, args.outdir + '/Z.nii.gz', example=exfile, mask=mask) fileio.save(Rho.flatten()[mask.flatten()], args.outdir + '/Rho.nii.gz', example=exfile, mask=mask) fileio.save(pRho.flatten()[mask.flatten()], args.outdir + '/pRho.nii.gz', example=exfile, mask=mask) fileio.save(RMSE.flatten()[mask.flatten()], args.outdir + '/rmse.nii.gz', example=exfile, mask=mask) fileio.save(SMSE.flatten()[mask.flatten()], args.outdir + '/smse.nii.gz', example=exfile, mask=mask) fileio.save(EXPV.flatten()[mask.flatten()], args.outdir + '/expv.nii.gz', example=exfile, mask=mask) fileio.save(MSLL.flatten()[mask.flatten()], args.outdir + '/msll.nii.gz', example=exfile, mask=mask) with open(args.outdir + 'model.pkl', 'wb') as handle: pickle.dump( { 'model': model, 'covariates_scaler': covariates_scaler, 'response_scaler': response_scaler, 'EVD_params': EVD_params, 'abnormal_probs': abnormal_probs }, handle, protocol=pickle.HIGHEST_PROTOCOL) ############################################################################### print('DONE!')
def estimate(respfile, covfile, maskfile=None, cvfolds=None, testcov=None, testresp=None, alg='gpr', configparam=None, saveoutput=True, outputsuffix=None, standardize=True): """ Estimate a normative model This will estimate a model in one of two settings according to the particular parameters specified (see below): * under k-fold cross-validation required settings 1) respfile 2) covfile 3) cvfolds>2 * estimating a training dataset then applying to a second test dataset required sessting 1) respfile 2) covfile 3) testcov 4) testresp * estimating on a training dataset ouput of forward maps mean and se required sessting 1) respfile 2) covfile 3) testcov The models are estimated on the basis of data stored on disk in ascii or neuroimaging data formats (nifti or cifti). Ascii data should be in tab or space delimited format with the number of subjects in rows and the number of variables in columns. Neuroimaging data will be reshaped into the appropriate format Basic usage:: estimate(respfile, covfile, [extra_arguments]) where the variables are defined below. Note that either the cfolds parameter or (testcov, testresp) should be specified, but not both. :param respfile: response variables for the normative model :param covfile: covariates used to predict the response variable :param maskfile: mask used to apply to the data (nifti only) :param cvfolds: Number of cross-validation folds :param testcov: Test covariates :param testresp: Test responses :param alg: Algorithm for normative model :param configparam: Parameters controlling the estimation algorithm :param saveoutput: Save the output to disk? Otherwise returned as arrays :param outputsuffix: Text string to add to the output filenames All outputs are written to disk in the same format as the input. These are: :outputs: * yhat - predictive mean * ys2 - predictive variance * Hyp - hyperparameters * Z - deviance scores * Rho - Pearson correlation between true and predicted responses * pRho - parametric p-value for this correlation * rmse - root mean squared error between true/predicted responses * smse - standardised mean squared error The outputsuffix may be useful to estimate multiple normative models in the same directory (e.g. for custom cross-validation schemes) """ # load data print("Processing data in " + respfile) X = fileio.load(covfile) Y, maskvol = load_response_vars(respfile, maskfile) if len(Y.shape) == 1: Y = Y[:, np.newaxis] if len(X.shape) == 1: X = X[:, np.newaxis] Nmod = Y.shape[1] if testcov is not None: # we have a separate test dataset Xte = fileio.load(testcov) testids = range(X.shape[0], X.shape[0] + Xte.shape[0]) if len(Xte.shape) == 1: Xte = Xte[:, np.newaxis] if testresp is not None: Yte, testmask = load_response_vars(testresp, maskfile) if len(Yte.shape) == 1: Yte = Yte[:, np.newaxis] else: sub_te = Xte.shape[0] Yte = np.zeros([sub_te, Nmod]) # Initialise normative model nm = norm_init(X, alg=alg, configparam=configparam) # treat as a single train-test split splits = CustomCV((range(0, X.shape[0]), ), (testids, )) Y = np.concatenate((Y, Yte), axis=0) X = np.concatenate((X, Xte), axis=0) # force the number of cross-validation folds to 1 if cvfolds is not None and cvfolds != 1: print("Ignoring cross-valdation specification (test data given)") cvfolds = 1 else: # we are running under cross-validation splits = KFold(n_splits=cvfolds) testids = range(0, X.shape[0]) # Initialise normative model nm = norm_init(X, alg=alg, configparam=configparam) # find and remove bad variables from the response variables # note: the covariates are assumed to have already been checked nz = np.where( np.bitwise_and(np.isfinite(Y).any(axis=0), np.var(Y, axis=0) != 0))[0] # run cross-validation loop Yhat = np.zeros_like(Y) S2 = np.zeros_like(Y) Hyp = np.zeros((Nmod, nm.n_params, cvfolds)) Z = np.zeros_like(Y) nlZ = np.zeros((Nmod, cvfolds)) for idx in enumerate(splits.split(X)): fold = idx[0] tr = idx[1][0] te = idx[1][1] # standardize responses and covariates, ignoring invalid entries iy, jy = np.ix_(tr, nz) mY = np.mean(Y[iy, jy], axis=0) sY = np.std(Y[iy, jy], axis=0) if standardize: Yz = np.zeros_like(Y) Yz[:, nz] = (Y[:, nz] - mY) / sY mX = np.mean(X[tr, :], axis=0) sX = np.std(X[tr, :], axis=0) Xz = (X - mX) / sX else: Yz = Y Xz = X # estimate the models for all subjects for i in range(0, len(nz)): # range(0, Nmod): print("Estimating model ", i + 1, "of", len(nz)) try: nm = norm_init(Xz[tr, :], Yz[tr, nz[i]], alg=alg, configparam=configparam) Hyp[nz[i], :, fold] = nm.estimate(Xz[tr, :], Yz[tr, nz[i]]) if (alg == 'hbr'): if nm.configs['new_site'] == True: nm.estimate_on_new_sites( Xz[te, :], Y[te, nz[i]] ) # The test/train division is done internally yhat, s2 = nm.predict_on_new_sites(Xz[te, :]) else: yhat, s2 = nm.predict(Xz[te, :], Xz[tr, :], Yz[tr, nz[i]], Hyp[nz[i], :, fold]) else: yhat, s2 = nm.predict(Xz[te, :], Xz[tr, :], Yz[tr, nz[i]], Hyp[nz[i], :, fold]) if standardize: Yhat[te, nz[i]] = yhat * sY[i] + mY[i] S2[te, nz[i]] = s2 * sY[i]**2 else: Yhat[te, nz[i]] = yhat S2[te, nz[i]] = s2 nlZ[nz[i], fold] = nm.neg_log_lik if testcov is None: Z[te, nz[i]] = (Y[te, nz[i]] - Yhat[te, nz[i]]) / \ np.sqrt(S2[te, nz[i]]) else: if testresp is not None: Z[te, nz[i]] = (Y[te, nz[i]] - Yhat[te, nz[i]]) / \ np.sqrt(S2[te, nz[i]]) except Exception as e: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] print("Model ", i + 1, "of", len(nz), "FAILED!..skipping and writing NaN to outputs") print("Exception:") print(e) print(exc_type, fname, exc_tb.tb_lineno) Hyp[nz[i], :, fold] = float('nan') Yhat[te, nz[i]] = float('nan') S2[te, nz[i]] = float('nan') nlZ[nz[i], fold] = float('nan') if testcov is None: Z[te, nz[i]] = float('nan') else: if testresp is not None: Z[te, nz[i]] = float('nan') # compute performance metrics if testcov is None: MSE = np.mean((Y[testids, :] - Yhat[testids, :])**2, axis=0) RMSE = np.sqrt(MSE) # for the remaining variables, we need to ignore zero variances SMSE = np.zeros_like(MSE) Rho = np.zeros(Nmod) pRho = np.ones(Nmod) EXPV = np.zeros(Nmod) MSLL = np.zeros(Nmod) iy, jy = np.ix_(testids, nz) # ids for tested samples nonzero values SMSE[nz] = MSE[nz] / np.var(Y[iy, jy], axis=0) Rho[nz], pRho[nz] = compute_pearsonr(Y[iy, jy], Yhat[iy, jy]) EXPV[nz] = explained_var(Y[iy, jy], Yhat[iy, jy]) MSLL[nz] = compute_MSLL(Y[iy, jy], Yhat[iy, jy], S2[iy, jy], mY.reshape(-1, 1).T, (sY**2).reshape(-1, 1).T) else: if testresp is not None: MSE = np.mean((Y[testids, :] - Yhat[testids, :])**2, axis=0) RMSE = np.sqrt(MSE) # for the remaining variables, we need to ignore zero variances SMSE = np.zeros_like(MSE) Rho = np.zeros(Nmod) pRho = np.ones(Nmod) EXPV = np.zeros(Nmod) MSLL = np.zeros(Nmod) iy, jy = np.ix_(testids, nz) # ids tested samples nonzero values SMSE[nz] = MSE[nz] / np.var(Y[iy, jy], axis=0) Rho[nz], pRho[nz] = compute_pearsonr(Y[iy, jy], Yhat[iy, jy]) EXPV[nz] = explained_var(Y[iy, jy], Yhat[iy, jy]) MSLL[nz] = compute_MSLL(Y[iy, jy], Yhat[iy, jy], S2[iy, jy], mY.reshape(-1, 1).T, (sY**2).reshape(-1, 1).T) # Set writing options if saveoutput: print("Writing output ...") if fileio.file_type(respfile) == 'cifti' or \ fileio.file_type(respfile) == 'nifti': exfile = respfile else: exfile = None if outputsuffix is not None: ext = str(outputsuffix) + fileio.file_extension(respfile) else: ext = fileio.file_extension(respfile) # Write output if testcov is None: fileio.save(Yhat[testids, :], 'yhat' + ext, example=exfile, mask=maskvol) fileio.save(S2[testids, :], 'ys2' + ext, example=exfile, mask=maskvol) fileio.save(Z[testids, :], 'Z' + ext, example=exfile, mask=maskvol) fileio.save(Rho, 'Rho' + ext, example=exfile, mask=maskvol) fileio.save(pRho, 'pRho' + ext, example=exfile, mask=maskvol) fileio.save(RMSE, 'rmse' + ext, example=exfile, mask=maskvol) fileio.save(SMSE, 'smse' + ext, example=exfile, mask=maskvol) fileio.save(EXPV, 'expv' + ext, example=exfile, mask=maskvol) fileio.save(MSLL, 'msll' + ext, example=exfile, mask=maskvol) if cvfolds is None: fileio.save(Hyp[:, :, 0], 'Hyp' + ext, example=exfile, mask=maskvol) else: for idx in enumerate(splits.split(X)): fold = idx[0] fileio.save(Hyp[:, :, fold].T, 'Hyp_' + str(fold + 1) + ext, example=exfile, mask=maskvol) else: if testresp is None: fileio.save(Yhat[testids, :], 'yhat' + ext, example=exfile, mask=maskvol) fileio.save(S2[testids, :], 'ys2' + ext, example=exfile, mask=maskvol) fileio.save(Hyp[:, :, 0], 'Hyp' + ext, example=exfile, mask=maskvol) else: fileio.save(Yhat[testids, :], 'yhat' + ext, example=exfile, mask=maskvol) fileio.save(S2[testids, :], 'ys2' + ext, example=exfile, mask=maskvol) fileio.save(Z[testids, :], 'Z' + ext, example=exfile, mask=maskvol) fileio.save(Rho, 'Rho' + ext, example=exfile, mask=maskvol) fileio.save(pRho, 'pRho' + ext, example=exfile, mask=maskvol) fileio.save(RMSE, 'rmse' + ext, example=exfile, mask=maskvol) fileio.save(SMSE, 'smse' + ext, example=exfile, mask=maskvol) fileio.save(EXPV, 'expv' + ext, example=exfile, mask=maskvol) fileio.save(MSLL, 'msll' + ext, example=exfile, mask=maskvol) if cvfolds is None: fileio.save(Hyp[:, :, 0].T, 'Hyp' + ext, example=exfile, mask=maskvol) else: for idx in enumerate(splits.split(X)): fold = idx[0] fileio.save(Hyp[:, :, fold].T, 'Hyp_' + str(fold + 1) + ext, example=exfile, mask=maskvol) else: if testcov is None: output = (Yhat[testids, :], S2[testids, :], Hyp, Z[testids, :], Rho, pRho, RMSE, SMSE, EXPV, MSLL) else: if testresp is None: output = (Yhat[testids, :], S2[testids, :], Hyp) else: output = (Yhat[testids, :], S2[testids, :], Hyp, Z[testids, :], Rho, pRho, RMSE, SMSE, EXPV, MSLL) return output