refDir = valR[i].replace("R_no_atoms", "A_no_atoms") Y = np.array(np.array( io.imread( valR[i])[int(inL / 2 - outL / 2):int(inL / 2 + outL / 2), int(inL / 2 - outL / 2):int(inL / 2 + outL / 2)]), dtype=np.dtype('float32')) / 4294967295 ref = np.array(np.array( io.imread(refDir)[int(inL / 2 - outL / 2):int(inL / 2 + outL / 2), int(inL / 2 - outL / 2):int(inL / 2 + outL / 2)]), dtype=np.dtype('float32')) / 4294967295 valRloss[i] = np.average((ref - Y)**2) np.save('referenceMSEvalR.npy', valRloss) referenceMSE = np.mean( np.concatenate((trainAloss, trainRloss, valAloss, valRloss))) print('reference MSE is ' + str(referenceMSE)) return referenceMSE if __name__ == '__main__': from unet import unet_model model = unet_model(476, 192) model = initialize_model(model) from preprocessing import prepare_datasets trainList, valList, testList = prepare_datasets(476, 442, 804, 0.2) referenceMSE = generate_referance_loss(476, 192, trainList, valList)
valR = [s for s in valList if "R_no_atoms" in s] valRloss = np.empty(len(valR)) for i in tqdm(range(len(valR))): refDir = valR[i].replace("R_no_atoms", "A_no_atoms") Y = np.array(np.array(io.imread(valR[i])[int(inL / 2 - outL / 2):int(inL / 2 + outL / 2), int(inL / 2 - outL / 2):int(inL / 2 + outL / 2)]), dtype=np.dtype('float32')) / 4294967295 ref = np.array(np.array(io.imread(refDir)[int(inL / 2 - outL / 2):int(inL / 2 + outL / 2), int(inL / 2 - outL / 2):int(inL / 2 + outL / 2)]), dtype=np.dtype('float32')) / 4294967295 valRloss[i] = np.average((ref - Y) ** 2) np.save('referenceMSEvalR.npy', valRloss) referenceMSE = np.mean(np.concatenate((trainAloss, trainRloss, valAloss, valRloss))) print('reference MSE is ' + str(referenceMSE)) return referenceMSE if __name__ == '__main__': from unet import unet_model model = unet_model(476, 192) model = initialize_model(model) from preprocessing import prepare_datasets # trainList, valList, testList = prepare_datasets(476, 442, 804, 0.2) trainList, valList, testList = prepare_datasets(804, 0.2) referenceMSE = generate_reference_loss(476, 192, trainList, valList)
'use this flag if you don\'t have the same dataset structure as the original: ' 'A_no_atoms, R_no_atoms, A_with_atoms, R_with_atoms') return parser if __name__ == '__main__': ## params parser = get_parser() args = parser.parse_args() outL = 2 * args.maskR # Output size mask = generate_mask(args.inL, args.maskR) ## get data trainList, valList, testList = prepare_datasets(args.inL, args.centerVer, args.centerHor, 0.2) ## build model K.clear_session() model = unet_model(args.inL, outL) model.summary() # display model summary model, epochNum, trainLoss, valLoss = initialize_model(model) if args.SGD: opt = SGD(lr=1e-2, momentum=0.9, decay=1e-4 / args.max_epochs) else: opt = Adam(lr=args.learning_rate) #model.compile(optimizer=opt, loss='mse') model.compile(optimizer=opt, loss='mse') ## calculate referance loss
'use this flag if you don\'t have the same dataset structure as the original: ' 'A_no_atoms, R_no_atoms, A_with_atoms, R_with_atoms') return parser if __name__ == '__main__': ## params parser = get_parser() args = parser.parse_args() outL = 2 * args.maskR # Output size mask = generate_mask(args.inL, args.maskR) ## get data trainList, valList, testList = prepare_datasets(args, 0.2) ## build model K.clear_session() model = unet_model(args.inL, outL) model.summary() # display model summary model, epochNum, trainLoss, valLoss = initialize_model(model) if args.SGD: opt = SGD(lr=1e-2, momentum=0.9, decay=1e-4 / args.max_epochs) else: opt = Adam(lr=args.learning_rate) model.compile(optimizer=opt, loss='mse') model.compile(optimizer=opt, loss='mse') ## calculate referance loss if not args.skip_reference_comparison: