def test_resnet3d_18(resnet3d_test): """Test 18.""" K.set_image_data_format('channels_last') model = Resnet3DBuilder.build_resnet_18((224, 224, 224, 1), 2) resnet3d_test(model) K.set_image_data_format('channels_first') model = Resnet3DBuilder.build_resnet_18((1, 512, 512, 256), 2) resnet3d_test(model)
def test_resnet3d_18(): K.set_image_data_format('channels_last') model = Resnet3DBuilder.build_resnet_18((224, 224, 224, 1), 2) model.compile(loss="categorical_crossentropy", optimizer="sgd") assert True, "Failed to build with tensorflow" K.set_image_data_format('channels_first') model = Resnet3DBuilder.build_resnet_18((1, 512, 512, 256), 2) model.compile(loss="categorical_crossentropy", optimizer="sgd") assert True, "Failed to build with theano"
def create_reg_resnet18_3D(img_x, img_y, ch_n, seq_len, tgt_size): resnet18_3D = Resnet3DBuilder.build_resnet_18( (seq_len, img_x, img_y, ch_n), tgt_size) resnet18_3D.layers.pop() layer = Dense(32, activation='relu')(resnet18_3D.output) resnet18_3D.layers[-1].outbound_nodes = [] resnet18_3D.outputs = [resnet18_3D.layers[-1].output] output = resnet18_3D.get_layer('flatten_1').output output = Dense(32, activation='relu')(output) output = Dense(1, activation='linear')(output) reg_resnet18_3D = Model(resnet18_3D.input, output) return reg_resnet18_3D
**init_args['volume_image_data_generator']['val']['init']) train_vol_loader = NPYDataLoader( **init_args['volume_image_data_loader']['train']) val_vol_loader = NPYDataLoader(**init_args['volume_image_data_loader']['val']) train_iter_args = init_args['volume_image_data_generator']['train'][ 'flow_from_loader'] train_iter_args['volume_image_data_loader'] = train_vol_loader val_iter_args = init_args['volume_image_data_generator']['val'][ 'flow_from_loader'] val_iter_args['volume_image_data_loader'] = val_vol_loader image_shape = train_datagen.image_shape regularization_factor = 1 model = Resnet3DBuilder.build_resnet_18(image_shape, nb_classes, regularization_factor) model.load_weights(weights) compile_args = init_args['model']['compile'] compile_args['optimizer'] = Adam(lr=1e-4) model.compile(**compile_args) model_fit_args = init_args['model']['fit_generator'] model_fit_args['generator'] = train_datagen.flow_from_loader(**train_iter_args) model_fit_args['validation_data'] = val_datagen.flow_from_loader( **val_iter_args) model_fit_args['callbacks'] = [ checkpointer, lr_reducer, early_stopper, csv_logger ] model.fit_generator(**model_fit_args) model.save('output/resnet18_{}_ctd.h5'.format(title))
print("Valid hdf5 file in 'read' mode: " + str(hdf5_file)) file_size = os.path.getsize(path_to_hdf5) print('Size of hdf5 file: {:.3f} GB'.format(file_size/2.0**30)) num_rows = hdf5_file['input'].shape[0] print("There are {} images in the dataset.".format(num_rows)) print("The datasets within the HDF5 file are:\n {}".format(list(hdf5_file.values()))) input_shape = tuple(list(hdf5_file["input"].attrs["lshape"])) batch_size = args.batchsize # Batch size to use print ("Input shape of tensor = {}".format(input_shape)) from resnet3d import Resnet3DBuilder model = Resnet3DBuilder.build_resnet_18((64, 64, 64, 1), 1) # (input tensor shape, number of outputs) tb_log = keras.callbacks.TensorBoard(log_dir=TB_LOG_DIR, histogram_freq=0, batch_size=batch_size, write_graph=True, write_grads=True, write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None) checkpointer = keras.callbacks.ModelCheckpoint(filepath=CHECKPOINT_FILENAME, monitor="val_loss", verbose=1,
import numpy as np from resnet3d import Resnet3DBuilder # pseudo volumetric data X_train = np.random.rand(10, 64, 64, 32, 1) labels = np.random.randint(0, 2, size=[10]) y_train = np.eye(2)[labels] # train model = Resnet3DBuilder.build_resnet_18((64, 64, 32, 1), 2) model.compile(loss="categorical_crossentropy", optimizer="sgd") model.fit(X_train, y_train, batch_size=10)