def initialize_parameters(): t29_common = candle.Benchmark(file_path, 't29_default_model.txt', 'keras', prog='t29res.py', desc='resnet') # Need a pointer to the docs showing what is provided # by default additional_definitions = [{ 'name': 'connections', 'default': 1, 'type': int, 'help': 'The number of residual connections.' }, { 'name': 'distance', 'default': 1, 'type': int, 'help': 'Residual connection distance between dense layers.' }] t29_common.additional_definitions = additional_definitions gParameters = candle.initialize_parameters(t29_common) return gParameters
def initialize_parameters(): unet_common = unet.UNET(unet.file_path, 'unet_params.txt', 'keras', prog='unet_example', desc='UNET example') # Initialize parameters gParameters = candle.initialize_parameters(unet_common) return gParameters
def initialize_parameters(): # Build benchmark object p3b1Bmk = bmk.BenchmarkP3B1(bmk.file_path, 'p3b1_default_model.txt', 'keras', prog='p3b1_baseline', desc='Multi-task (DNN) for data extraction from clinical reports - Pilot 3 Benchmark 1') # Initialize parameters gParameters = candle.initialize_parameters(p3b1Bmk) #bmk.logger.info('Params: {}'.format(gParameters)) return gParameters
def initialize_parameters(): # Build benchmark object p3b3Bmk = bmk.BenchmarkP3B3(bmk.file_path, 'p3b4_default_model.txt', 'keras', prog='p3b4_baseline', desc='Hierarchical Convolutional Attention Networks for data extraction from clinical reports - Pilot 3 Benchmark 4') # Initialize parameters gParameters = candle.initialize_parameters(p3b3Bmk) #bmk.logger.info('Params: {}'.format(gParameters)) return gParameters
def initialize_parameters(): mnist_common = mnist.MNIST(mnist.file_path, 'mnist_params.txt', 'keras', prog='mnist_mlp', desc='MNIST example') # Initialize parameters gParameters = candle.initialize_parameters(mnist_common) csv_logger = CSVLogger('{}/params.log'.format(gParameters)) return gParameters
def initialize_parameters(): # Build benchmark object p1b2Bmk = p1b2.BenchmarkP1B2(p1b2.file_path, 'p1b2_default_model.txt', 'keras', prog='p1b2_baseline', desc='Train Classifier - Pilot 1 Benchmark 2') # Initialize parameters gParameters = candle.initialize_parameters(p1b2Bmk) #p1b2.logger.info('Params: {}'.format(gParameters)) return gParameters
def initialize_parameters(): # Build benchmark object unoBmk = benchmark.BenchmarkUno( benchmark.file_path, 'uno_default_model.txt', 'keras', prog='uno_baseline', desc= 'Build neural network based models to predict tumor response to single and paired drugs.' ) # Initialize parameters gParameters = candle.initialize_parameters(unoBmk) # benchmark.logger.info('Params: {}'.format(gParameters)) return gParameters
def initialize_parameters(): t29_common = candle.Benchmark(file_path, 't29_default_model.txt', 'keras', prog='t29res.py', desc='resnet') # Need a pointer to the docs showing what is provided # by default additional_definitions = [{ 'name': 'connections', 'default': 1, 'type': int, 'help': 'The number of residual connections.' }, { 'name': 'distance', 'default': 1, 'type': int, 'help': 'Residual connection distance between dense layers.' }, { 'name': 'model', 'default': 'model.json', 'type': str, 'help': 'Name of json model description file.' }, { 'name': 'weights', 'default': 'model.h5', 'type': str, 'help': 'Name of h5 weights file.' }, { 'name': 'n_pred', 'default': 1, 'type': int, 'help': 'Number of predictions to do on each sample.' }] t29_common.additional_definitions = additional_definitions gParameters = candle.initialize_parameters(t29_common) return gParameters
def initialize_parameters(): # Build benchmark object comboBmk = combo.BenchmarkCombo( combo.file_path, 'combo_default_model.txt', 'keras', prog='combo_baseline', desc= 'Build neural network based models to predict tumor response to drug pairs.' ) # Initialize parameters gParameters = candle.initialize_parameters(comboBmk) #combo.logger.info('Params: {}'.format(gParameters)) return gParameters
def initialize_parameters(): # Build benchmark object unoMTb = unoMT.unoMTBk( unoMT.file_path, 'unoMT_default_model.txt', 'pytorch', prog='unoMT_baseline', desc= 'Multi-task combined single and combo drug prediction for cross-study data - Pilot 1' ) print("Created unoMT benchmark") # Initialize parameters gParameters = candle.initialize_parameters(unoMTb) print("Parameters initialized") return gParameters
def initialize_parameters(): # Build benchmark object p2b1Bmk = p2b1.BenchmarkP2B1( p2b1.file_path, 'p2b1_default_model.txt', 'keras', prog='p2b1_baseline', desc='Train Molecular Frame Autoencoder - Pilot 2 Benchmark 1') # Initialize parameters GP = candle.initialize_parameters(p2b1Bmk) #p2b1.logger.info('Params: {}'.format(gParameters)) print('\nTraining parameters:') for key in sorted(GP): print("\t%s: %s" % (key, GP[key])) # print json.dumps(GP, indent=4, skipkeys=True, sort_keys=True) if GP['backend'] != 'theano' and GP['backend'] != 'tensorflow': sys.exit('Invalid backend selected: %s' % GP['backend']) os.environ['KERAS_BACKEND'] = GP['backend'] reload(K) ''' if GP['backend'] == 'theano': K.set_image_dim_ordering('th') elif GP['backend'] == 'tensorflow': K.set_image_dim_ordering('tf') ''' K.set_image_data_format('channels_last') #"th" format means that the convolutional kernels will have the shape (depth, input_depth, rows, cols) #"tf" format means that the convolutional kernels will have the shape (rows, cols, input_depth, depth) print("Image data format: ", K.image_data_format()) # print "Image ordering: ", K.image_dim_ordering() return GP