def top_level_task(): backend.set_image_data_format('channels_first') num_classes = 10 num_samples = 10000 (x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples) x_train = x_train.astype('float32') x_train /= 255 y_train = y_train.astype('int32') print("shape: ", x_train.shape) input_tensor1 = Input(shape=(3, 32, 32), dtype="float32") o1 = Conv2D(filters=32, input_shape=(3,32,32), kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu")(input_tensor1) o2 = Conv2D(filters=32, input_shape=(3,32,32), kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu")(input_tensor1) output_tensor = Concatenate(axis=1)([o1, o2]) output_tensor = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu")(output_tensor) output_tensor = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid")(output_tensor) output_tensor = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu")(output_tensor) output_tensor = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding="valid", activation="relu")(output_tensor) output_tensor = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid")(output_tensor) output_tensor = Flatten()(output_tensor) output_tensor = Dense(512, activation="relu")(output_tensor) output_tensor = Dense(num_classes)(output_tensor) output_tensor = Activation("softmax")(output_tensor) model = Model({1: input_tensor1}, output_tensor) opt = optimizers.SGD(learning_rate=0.01) model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy']) print(model.summary()) model.fit(x_train, y_train, epochs=1)
def CreateGraph(model, input_quantizers=None, default_source_quantizer=cfg.default_source_quantizer, debug=False): """create graph.""" K.set_image_data_format("channels_last") (graph, source_quantizer_list) = GenerateGraphFromModel(model, input_quantizers, default_source_quantizer) GraphAddSingleSourceSingleSink(graph) GraphRemoveNodeWithNodeType(graph, "Dropout") GraphRemoveNodeWithNodeType(graph, "InputLayer") scheduler = list(nx.topological_sort(graph)) if debug: for vertex in scheduler[1:-1]: for _, v in graph.edges(vertex): if v == SINK: continue print("... calling", graph.nodes[v]["layer"][0].name, graph.nodes[v]["type"]) return (graph, source_quantizer_list)
def test_bn_feature_net_2D(self, include_top, padding, padding_mode, shape, dilated, multires, location, data_format): n_features = 3 n_dense_filters = 200 # BAD: dilated=True, include_top=False # BAD: inputs != None with self.cached_session(): K.set_image_data_format(data_format) model = featurenet.bn_feature_net_2D( include_top=include_top, dilated=dilated, input_shape=shape, n_features=n_features, n_dense_filters=n_dense_filters, padding=padding, padding_mode=padding_mode, multires=multires, VGG_mode=multires, location=location) self.assertEqual(len(model.output_shape), 4) output = n_features if include_top else n_dense_filters axis = 1 if data_format == 'channels_first' else -1 self.assertEqual(model.output_shape[axis], output)
def test_bn_feature_net_3D(self, include_top, padding, padding_mode, shape, dilated, multires, location, data_format, temporal, residual, temporal_kernel_size): n_features = 3 n_dense_filters = 200 n_frames = 5 # input_shape = (10, 32, 32, 1) with self.cached_session(): K.set_image_data_format(data_format) model = featurenet.bn_feature_net_3D( include_top=include_top, dilated=dilated, n_frames=n_frames, input_shape=shape, n_features=n_features, n_dense_filters=n_dense_filters, padding=padding, padding_mode=padding_mode, multires=multires, VGG_mode=multires, location=location, temporal=temporal, residual=residual, temporal_kernel_size=temporal_kernel_size) self.assertEqual(len(model.output_shape), 5 if dilated else 2) channel_axis = 1 if data_format == 'channels_first' else -1 self.assertEqual(model.output_shape[channel_axis], n_features)
def top_level_task(): backend.set_image_data_format('channels_first') num_classes = 10 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000, 784) x_train = x_train.astype('float32') x_train /= 255 y_train = y_train.astype('int32') y_train = np.reshape(y_train, (len(y_train), 1)) print("shape: ", x_train.shape) input_tensor = Input(shape=(784)) output = Dense(512, activation="relu")(input_tensor) output = Dense(512, activation="relu")(output) output = Dense(num_classes)(output) output = Activation("softmax")(output) model = Model(inputs={1: input_tensor}, outputs=output) print(model.summary()) opt = optimizers.SGD(learning_rate=0.01) model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy']) model.fit(x_train, y_train, batch_size=64, epochs=1)
def test_bn_feature_net_2D_skip(self, data_format): receptive_field = 61 n_features = 3 n_dense_filters = 300 input_shape = (256, 256, 1) n_skips = 1 with self.cached_session(): K.set_image_data_format(data_format) axis = 1 if data_format == 'channels_first' else -1 fgbg_model = featurenet.bn_feature_net_skip_2D( receptive_field=receptive_field, input_shape=input_shape, n_features=n_features, n_dense_filters=n_dense_filters, n_skips=n_skips, last_only=False) self.assertIsInstance(fgbg_model.output, list) self.assertEqual(len(fgbg_model.output), n_skips + 1) model = featurenet.bn_feature_net_skip_2D( receptive_field=receptive_field, input_shape=input_shape, fgbg_model=fgbg_model, n_features=n_features, n_dense_filters=n_dense_filters, n_skips=n_skips, last_only=True) self.assertEqual(len(model.output_shape), 4) self.assertEqual(model.output_shape[axis], n_features)
def wrapper(*args, **kwargs): for data_format in {'channels_first', 'channels_last'}: K.set_image_data_format(data_format) func(*args, **kwargs) if K.backend() == 'tensorflow': K.clear_session() tf.reset_default_graph()
def __init__(self): super().__init__() self.ui = Ui_Dialog() self.ui.setupUi(self) #prepare face_detection self.detector = MTCNN() K.set_image_data_format('channels_first') self.FRmodel = faceRecoModel(input_shape=(3, 96, 96)) self.FRmodel.compile(optimizer='adam', loss=self.triplet_loss, metrics=['accuracy']) load_weights_from_FaceNet(self.FRmodel) #connect database-server self.myclient = pymongo.MongoClient( "mongodb+srv://VuGiaBao:[email protected]/face_recognition?retryWrites=true&w=majority" ) self.mydb = self.myclient["Attendance_checking"] self.CSDL_col = self.mydb["CSDL"] self.Cham_cong_col = self.mydb["Cham_cong"] #call database func self.data = self.prepare_database() # create a timer self.timer = QTimer() # set timer timeout callback function self.timer.timeout.connect(self.recog_pushdata) # set control_bt callback clicked function self.ui.Open_bt.clicked.connect(self.controlTimer)
def __init__(self, input_shape=(28, 28, 1), num_class=2, loss='sparse_categorical_crossentropy', epochs=200, batch_size=100, optimizer=Adam(beta_1=0.9, beta_2=0.999, epsilon=1e-08), lr=1e-5, min_lr=1e-5, factor=0.25, patience=10, es_patience=20, verbose=1, log_path='logs', model_name='SpectralSpatialCNN', **kwargs): self.input_shape = input_shape self.num_class = num_class self.loss = loss self.epochs = epochs self.batch_size = batch_size self.optimizer = optimizer self.optimizer.lr = lr self.lr = lr self.min_lr = min_lr self.factor = factor self.patience = patience self.es_patience = es_patience self.verbose = verbose self.log_path = log_path self.model_name = model_name self.weights_dir = log_path + '/' + model_name + '_out_weights.h5' self.csv_dir = log_path + '/' + model_name + '_out_log.log' self.time_log = log_path + '/' + model_name + '_time_log.csv' # use **kwargs to set the new value of below args. self.n_subbands = 20 self.dropout_rate = 0.5 self.f1_average = 'binary' if self.num_class == 2 else 'macro' self.data_format = 'channels_last' self.shuffle = False self.metrics = 'accuracy' self.monitor = 'val_loss' self.mode = 'min' self.save_best_only = True self.save_weight_only = True self.seed = 1234 self.class_balancing = False self.class_weight = None for k in kwargs.keys(): self.__setattr__(k, kwargs[k]) np.random.seed(self.seed) tf.random.set_seed(self.seed) K.set_image_data_format(self.data_format) if not os.path.exists(self.log_path): os.makedirs(self.log_path)
def load_digits8x8(): """Load image 8x8 dataset.""" data = load_digits() data.data = data.data.reshape([data.data.shape[0], 1, 8, 8]) / 16.0 # Convert NCHW to NHWC # Convert back to numpy or sklearn funcs (GridSearchCV, etc.) WILL fail data.data = np.transpose(data.data, [0, 2, 3, 1]) K.set_image_data_format("channels_last") return data
def test_get_backbone(self, backbone): with self.cached_session(): K.set_image_data_format('channels_last') inputs = Input(shape=(256, 256, 3)) model, output_dict = backbone_utils.get_backbone(backbone, inputs, return_dict=True) assert isinstance(output_dict, dict) assert all(k.startswith('C') for k in output_dict) assert isinstance(model, Model)
def CNN_B3(dropout=0.5): K.set_image_data_format("channels_first") clear_session() channels = 17 timesteps = 87 # upsampled 58 fps inputs = Input(shape=(channels, timesteps)) input_permute = Permute((1, 2), input_shape=(channels, timesteps))(inputs) x = Reshape((1, channels, timesteps))(input_permute) x = Conv2D( 16, (4, 4), activation="linear", input_shape=(channels, timesteps), padding="same", )(x) x = BatchNormalization()(x) x = Conv2D(16, (4, 4), activation="linear", padding="same")(x) x = BatchNormalization()(x) x = AveragePooling2D((2, 2))(x) x = Dropout(rate=dropout)(x) x = Conv2D(32, (4, 4), activation="linear", padding="same")(x) x = BatchNormalization()(x) x = Conv2D(32, (4, 4), activation="linear", padding="same")(x) x = BatchNormalization()(x) x = AveragePooling2D((2, 2))(x) x = Dropout(rate=dropout)(x) x = Conv2D(64, (4, 4), activation="linear", padding="same")(x) x = BatchNormalization()(x) x = Conv2D(64, (4, 4), activation="linear", padding="same")(x) x = BatchNormalization()(x) x = AveragePooling2D((2, 2))(x) x = Flatten()(x) x = Dense(256, activation="relu", name="d1")(x) x = Dense(128, activation="relu", name="d2")(x) x = Dense(64, activation="relu", name="embedding")(x) predictions = Dense(1, activation="sigmoid", name="predictions")(x) return Model(inputs=inputs, outputs=predictions)
def face_detector(): if CNN_DETECTOR == True: K.set_image_data_format('channels_last') model = MTCNN() return model model = cv.CascadeClassifier( str( Path(cv.data.haarcascades) / 'haarcascade_frontalface_default.xml')) model = MTCNN() return model
def test_get_img_shape_on_2d_image(): n = 5 channels = 4 dim1 = 1 dim2 = 2 K.set_image_data_format('channels_first') assert (n, channels, dim1, dim2) == utils.get_img_shape( K.ones(shape=(n, channels, dim1, dim2))) K.set_image_data_format('channels_last') assert (n, channels, dim1, dim2) == utils.get_img_shape( K.ones(shape=(n, dim1, dim2, channels)))
def top_level_task(): backend.set_image_data_format('channels_first') num_classes = 10 (x_train, y_train), (x_test, y_test) = mnist.load_data() x_train = x_train.reshape(60000, 784) x_train = x_train.astype('float32') x_train /= 255 y_train = y_train.astype('int32') y_train = np.reshape(y_train, (len(y_train), 1)) print("shape: ", x_train.shape) input_tensor1 = Input(shape=(784, )) input_tensor2 = Input(shape=(784, )) input_tensor3 = Input(shape=(784, )) input_tensor4 = Input(shape=(784, )) t1 = Dense(512, activation="relu", name="dense1")(input_tensor1) t1 = Dense(512, activation="relu", name="dense12")(t1) model1 = Model(input_tensor1, t1) t2 = Dense(512, activation="relu", name="dense2")(input_tensor2) t2 = Dense(512, activation="relu", name="dense22")(t2) model2 = Model(input_tensor2, t2) t3 = Dense(512, activation="relu", name="dense3")(input_tensor3) t3 = Dense(512, activation="relu", name="dense33")(t3) model3 = Model(input_tensor3, t3) t4 = Dense(512, activation="relu", name="dense4")(input_tensor4) t4 = Dense(512, activation="relu", name="dense44")(t4) model4 = Model(input_tensor4, t4) input_tensor1 = Input(shape=(784, )) input_tensor2 = Input(shape=(784, )) t1 = model1(input_tensor1) t2 = model2(input_tensor1) t3 = model3(input_tensor2) t4 = model4(input_tensor2) output = Concatenate(axis=1)([t1, t2, t3, t4]) output = Dense(num_classes)(output) output = Activation("softmax")(output) model = Model({5: input_tensor1, 6: input_tensor2}, output) opt = optimizers.SGD(learning_rate=0.01) model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy']) print(model.summary()) model.fit([x_train, x_train], y_train, epochs=1)
def get_models(trial_type, nb_classes, samples, use_cpu): if use_cpu: K.set_image_data_format('channels_last') else: K.set_image_data_format('channels_first') return { 'EEGNet_fusion': model.Model('EEGNet_fusion', trial_type, [(0, 8), (14, 22), (28, 36)], EEGNet_fusion(nb_classes, Samples=samples, cpu=use_cpu), multi_branch=True), 'EEGNet': model.Model('EEGNet', trial_type, [(0, 8)], EEGNet(nb_classes, Samples=samples, cpu=use_cpu)), 'ShallowConvNet': model.Model('ShallowConvNet', trial_type, [(0, 2)], ShallowConvNet(nb_classes, Samples=samples, cpu=use_cpu)), 'DeepConvNet': model.Model('DeepConvNet', trial_type, [(0, 8), (14, 22), (28, 36)], DeepConvNet(nb_classes, Samples=samples, cpu=use_cpu)), }
def test_trim_padding(self): # test 2d image K.set_image_data_format('channels_last') img_size = 512 win_x, win_y = 30, 30 x_trim = img_size - 2 * win_x y_trim = img_size - 2 * win_y K.set_image_data_format('channels_last') arr = np.zeros((1, img_size, img_size, 1)) arr_trim = data_utils.trim_padding(arr, win_x, win_y) self.assertEqual(arr_trim.shape, (1, x_trim, y_trim, 1)) # test channels_first K.set_image_data_format('channels_first') arr = np.zeros((1, 1, img_size, img_size)) arr_trim = data_utils.trim_padding(arr, win_x, win_y) self.assertEqual(arr_trim.shape, (1, 1, x_trim, y_trim)) # test 3d image stack img_size = 256 frames = 30 win_x, win_y = 20, 30 win_z = 2 x_trim = img_size - 2 * win_x y_trim = img_size - 2 * win_y z_trim = frames - 2 * win_z K.set_image_data_format('channels_last') arr = np.zeros((1, frames, img_size, img_size, 1)) # trim win_z arr_trim = data_utils.trim_padding(arr, win_x, win_y, win_z) self.assertEqual(arr_trim.shape, (1, z_trim, x_trim, y_trim, 1)) # don't trim win_z arr_trim = data_utils.trim_padding(arr, win_x, win_y) self.assertEqual(arr_trim.shape, (1, frames, x_trim, y_trim, 1)) # test channels_first K.set_image_data_format('channels_first') arr = np.zeros((1, 1, 30, img_size, img_size)) # trim win_z arr_trim = data_utils.trim_padding(arr, win_x, win_y, win_z) self.assertEqual(arr_trim.shape, (1, 1, z_trim, x_trim, y_trim)) # don't trim win_z arr_trim = data_utils.trim_padding(arr, win_x, win_y) self.assertEqual(arr_trim.shape, (1, 1, frames, x_trim, y_trim)) # test bad input with self.assertRaises(ValueError): small_arr = np.zeros((img_size, img_size, 1)) data_utils.trim_padding(small_arr, 10, 10) with self.assertRaises(ValueError): big_arr = np.zeros((1, 1, 30, img_size, img_size, 1)) data_utils.trim_padding(big_arr, 10, 10)
def test_save_model_output(self): temp_dir = self.get_temp_dir() batches = 1 features = 3 img_w, img_h, frames = 30, 30, 5 # test channels_last K.set_image_data_format('channels_last') # test 2D output output = np.random.random((batches, img_w, img_h, features)) io_utils.save_model_output(output, temp_dir, 'test', channel=None) # test saving only one channel io_utils.save_model_output(output, temp_dir, 'test', channel=1) # test 3D output output = np.random.random((batches, frames, img_w, img_h, features)) io_utils.save_model_output(output, temp_dir, 'test', channel=None) # test saving only one channel io_utils.save_model_output(output, temp_dir, 'test', channel=1) # test channels_first 2D output = np.random.random((batches, features, img_w, img_h)) io_utils.save_model_output(output, temp_dir, 'test', channel=None, data_format='channels_first') # test channels_first 3D output = np.random.random((batches, features, frames, img_w, img_h)) io_utils.save_model_output(output, temp_dir, 'test', channel=None, data_format='channels_first') # test bad channel with self.assertRaises(ValueError): output = np.random.random((batches, features, img_w, img_h)) io_utils.save_model_output(output, temp_dir, 'test', channel=-1) io_utils.save_model_output(output, temp_dir, 'test', channel=features + 1) # test no output directory with self.assertRaises(IOError): bad_dir = os.path.join(temp_dir, 'test') io_utils.save_model_output(output, bad_dir, 'test', channel=None)
def main(): # parse args -------------------------------------------------------------- args = _parse_args() # set device -------------------------------------------------------------- os.environ['CUDA_VISIBLE_DEVICES'] = args.devices # set learning phase ------------------------------------------------------ K.set_learning_phase(0) # set data format --------------------------------------------------------- if args.devices == '' or args.model == 'mobilenet_v2': # note: tensorflow supports b01c pooling on cpu only K.set_image_data_format('channels_last') else: K.set_image_data_format('channels_first') # set dtype --------------------------------------------------------------- K.set_floatx(args.dtype) # load model -------------------------------------------------------------- model_module = globals()[args.model] model_kwargs = {} if args.model == 'mobilenet_v2': model_kwargs['alpha'] = args.mobilenet_v2_alpha model = model_module.get_model(input_type=args.input_type, input_shape=(args.input_height, args.input_width), output_type=args.output_type, n_classes=args.n_classes, sampling=False, **model_kwargs) # create frozen graph sess = K.get_session() out_name = [out.op.name for out in model.outputs] frozen_graph = _freeze_session(sess, output_names=out_name) dirname = os.path.dirname(args.output_filepath) filename = os.path.basename(args.output_filepath) assert os.path.splitext(filename)[1] == '.pb' tf.train.write_graph(frozen_graph, dirname, filename, as_text=False) # tf.train.write_graph(frozen_graph, dirname, filename, as_text=False) # store input and output names as json file write_json( args.output_filepath + '.json', { 'input_names': [input.op.name for input in model.inputs], 'output_names': [output.op.name for output in model.outputs] })
def test_retinanet(self, pooling, panoptic, location, frames, pyramid_levels, data_format): num_classes = 3 norm_method = None # not all backbones work with channels_first backbone = 'featurenet' # TODO: TimeDistributed is incompatible with channels_first if frames > 1 and data_format == 'channels_first': return with self.cached_session(): K.set_image_data_format(data_format) if data_format == 'channels_first': axis = 1 input_shape = (1, 32, 32) else: axis = -1 input_shape = (32, 32, 1) num_semantic_classes = [3, 4] if frames > 1: # TODO: 3D and semantic heads is not implemented. num_semantic_classes = [] model = RetinaNet( backbone=backbone, num_classes=num_classes, input_shape=input_shape, norm_method=norm_method, location=location, pooling=pooling, panoptic=panoptic, frames_per_batch=frames, num_semantic_heads=len(num_semantic_classes), num_semantic_classes=num_semantic_classes, backbone_levels=['C3', 'C4', 'C5'], pyramid_levels=pyramid_levels, ) expected_size = 2 + panoptic * len(num_semantic_classes) self.assertIsInstance(model.output_shape, list) self.assertEqual(len(model.output_shape), expected_size) self.assertEqual(model.output_shape[0][-1], 4) self.assertEqual(model.output_shape[1][-1], num_classes) if panoptic: for i, n in enumerate(num_semantic_classes): self.assertEqual(model.output_shape[i + 2][axis], n)
def test_inner_distance_transform_movie(self): mask_stack = np.array(_generate_test_masks()) unique = np.zeros(mask_stack.shape) for i, mask in enumerate(_generate_test_masks()): unique[i] = label(mask) K.set_image_data_format('channels_last') bins = None distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) self.assertEqual(np.expand_dims(distance, axis=-1).shape, unique.shape) bins = 3 distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=-1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(distance.shape, unique.shape) bins = 4 distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=-1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(distance.shape, unique.shape) K.set_image_data_format('channels_first') unique = np.rollaxis(unique, -1, 1) bins = None distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) self.assertEqual(np.expand_dims(distance, axis=1).shape, unique.shape) bins = 3 distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2])) self.assertEqual(distance.shape, unique.shape) bins = 4 distance = transform_utils.inner_distance_transform_movie(unique, bins=bins) distance = np.expand_dims(distance, axis=1) self.assertAllEqual(np.unique(distance), np.array([0, 1, 2, 3])) self.assertEqual(distance.shape, unique.shape)
def IntinialDefine(self): print("Function IntinialDefine") #K.set_image_dim_ordering("th") K.set_image_data_format('channels_first') global img_size global label_num global LR global smooth global imageIndex global OutputPath global SubOutputPath_No global SubOutputPath_Yes #initial folder path and file path for this project #global BRAT2019_DATA_PATH_HGG global WEIGHTS_FULL_BEST_FILE_PATH global WEIGHTS_CORE_BEST_FILE_PATH global WEIGHTS_ET_BEST_FILE_PATH OutputPath='data/' SubOutputPath_Yes='yes/' SubOutputPath_No='no/' if not os.path.isdir(OutputPath): os.makedirs(OutputPath) if not os.path.isdir(OutputPath+SubOutputPath_Yes): os.makedirs(OutputPath+SubOutputPath_Yes) if not os.path.isdir(OutputPath+SubOutputPath_No): os.makedirs(OutputPath+SubOutputPath_No) BRAT2019_DATA_PATH_HGG = "projectClone\\MICCAI_BraTS2020_TrainingData\\"#HGG\\" WEIGHTS_FULL_BEST_FILE_PATH= "BraTSDataModel\weighsts\\weights-full-best.h5" WEIGHTS_CORE_BEST_FILE_PATH= "BraTSDataModel\weighsts\\weights-core-best.h5" WEIGHTS_ET_BEST_FILE_PATH= "BraTSDataModel\weighsts\\weights-ET-best.h5" print("WEIGHTS_FULL_BEST_FILE_PATH=",WEIGHTS_FULL_BEST_FILE_PATH) imageIndex=3 img_size = 240 #original img size is 240*240 smooth = 0.005 num_of_aug = 2 num_epoch = 30 pul_seq = 'Flair' sharp = False # sharpen filter LR = 1e-4 num_of_patch = 4 #must be a square number label_num = 5 # 1 = necrosis+NET, 2 = tumor core,3= original, 4 = ET, 5 = complete tumor '''
def __init__(self, transfer_learning, run_type, subject_id, selected_model, models): self.DISABLED_LAYERS = { 'EEGNet_Fusion': [(0, 8), (14, 22), (28, 36)], 'EEGNet': [(0, 8)], 'ShallowConvNet': [(0, 2)], 'DeepConvNet': [(0, 15)] } self.transfer_learning = transfer_learning self.run_type = run_type self.subject_id = subject_id self.selected_model = selected_model self.selected_model_name = selected_model.get_name() self.models = models self.tl_file_name = self.get_tl_file_name(self.selected_model_name) K.set_image_data_format('channels_last')
def run_model(args): if args.lms: tf.config.experimental.set_lms_enabled(True) image_dim = args.image_size opt = tf.keras.optimizers.RMSprop() if hvd: # Horovod: pin GPU to be used to process local rank (one GPU per process) gpus = tf.config.list_physical_devices('GPU') tf.config.experimental.set_memory_growth(gpus[hvd.local_rank()], True) tf.config.set_visible_devices(gpus[hvd.local_rank()], 'GPU') # Horovod: add Horovod DistributedOptimizer. opt = hvd.DistributedOptimizer(opt) steps_per_epoch = max(1, args.steps // hvd.size()) experimental_run_tf_function = False else: steps_per_epoch = args.steps experimental_run_tf_function = True if args.channels_last: K.set_image_data_format('channels_last') input_shape = (image_dim, image_dim, 3) else: K.set_image_data_format('channels_first') input_shape = (3, image_dim, image_dim) num_classes = 15 batch_size = args.batch_size model_class = model_choices.get(args.model) model = model_class(weights=None, include_top=True, input_shape=input_shape, classes=num_classes) model.compile(optimizer=opt, loss='categorical_crossentropy', experimental_run_tf_function=experimental_run_tf_function) random_generator = random_image_generator(batch_size, num_classes, input_shape) model.fit(random_generator, steps_per_epoch=steps_per_epoch, verbose=1 if not hvd or hvd.rank() == 0 else 0, epochs=args.epochs, callbacks=get_callbacks(args))
def test_get_featurenet3d_backbone(self, data_format): backbone = 'featurenet3d' input_shape = (40, 256, 256, 3) inputs = Input(shape=input_shape) with self.cached_session(): K.set_image_data_format(data_format) model, output_dict = backbone_utils.get_backbone(backbone, inputs, return_dict=True) assert isinstance(output_dict, dict) assert all(k.startswith('C') for k in output_dict) assert isinstance(model, Model) # No imagenet weights for featurenet backbone with self.assertRaises(ValueError): backbone_utils.get_backbone(backbone, inputs, use_imagenet=True)
def initialise_tf(experiment_number): hyper_parameters = EXPERIMENT_HYPER_PARAMETERS[experiment_number] os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" """ 0 = all messages are logged (default behavior) 1 = INFO messages are not printed 2 = INFO and WARNING messages are not printed 3 = INFO, WARNING, and ERROR messages are not printed """ # Set which GPUs will be visible. gpus = "".join([str(_) for _ in hyper_parameters["visible_gpus"]]) # TODO needs to be moved to above tf import os.environ["CUDA_VISIBLE_DEVICES"] = gpus # Set this to the number of physical CPU cores in the system. # This will greatly speed up ICP and asynchronous data loading for PyTorch. os.environ["OMP_NUM_THREADS"] = str(int(mp.cpu_count() / 2)) if hyper_parameters["enable_xla"]: os.environ["TF_XLA_FLAGS"] = "--tf_xla_cpu_global_jit" # The imports must be performed after setting the environment variables # for them to take effect. import tensorflow as tf from tensorflow.keras.mixed_precision import ( experimental as mixed_precision, ) import tensorflow.keras.backend as K if hyper_parameters["enable_xla"]: tf.config.optimizer.set_jit(True) print("Enabled XLA (Accelerated Linear Algebra)") if hyper_parameters["enable_amp"]: policy = mixed_precision.Policy("mixed_float16") mixed_precision.set_policy(policy) print("Enabled AMP (Automatic Mixed Precision)") # Set the Keras image data format. K.set_image_data_format(hyper_parameters["image_data_format"])
def test_panopticnet(self, pooling, location, frames_per_batch, data_format, upsample_type, pyramid_levels): norm_method = None # not all backbones work with channels_first backbone = 'featurenet' # TODO: PanopticNet fails with channels_first and frames_per_batch > 1 if frames_per_batch > 1 and data_format == 'channels_first': return with self.cached_session(): K.set_image_data_format(data_format) if data_format == 'channels_first': axis = 1 input_shape = (1, 32, 32) else: axis = -1 input_shape = (32, 32, 1) num_semantic_classes = [1, 3] # temporal_mode=None, # lite=False, # interpolation='bilinear', model = PanopticNet( backbone=backbone, input_shape=input_shape, frames_per_batch=frames_per_batch, pyramid_levels=pyramid_levels, norm_method=norm_method, location=location, pooling=pooling, upsample_type=upsample_type, num_semantic_classes=num_semantic_classes, use_imagenet=False, ) self.assertIsInstance(model.output_shape, list) self.assertEqual(len(model.output_shape), len(num_semantic_classes)) for i, s in enumerate(num_semantic_classes): self.assertEqual(model.output_shape[i][axis], s)
def train(self): print("[INFO] Training stage...") K.set_image_data_format("channels_last") x_train, x_test, y_train, y_test = self.preprocess() model = self.base_model() model.compile(loss=keras_batch_hard_triplet_loss, optimizer=tf.keras.optimizers.Adam(1e-4)) # model.compile(loss=tf.keras.losses.CosineSimilarity(), optimizer=tf.keras.optimizers.Adam(1e-4)) #model.compile(loss=triplet_loss, optimizer=Adam(lr=0.0001)) # Uses 'dummy' embeddings + dummy gt labels. Will be removed as soon as # loaded, to free memory model.summary() filepath = "../../checkpoints/checkpoint_%s_ep50_BS%d.hdf5" % ( self.data_dir, self.batch_size) # checkpoint = ModelCheckpoint( # filepath, monitor='val_loss', verbose=1, save_best_only=False, period=50) # callbacks_list = [checkpoint] x_train = np.reshape( x_train, (len(x_train), x_train.shape[1], x_train.shape[1], 3)) H = model.fit( x_train, y_train, batch_size=self.batch_size, epochs=self.epochs, validation_data=(x_test, y_test), # callbacks=callbacks_list ) print("[INFO] Plotting the loss...") plt.figure(figsize=(8, 8)) plt.plot(H.history['loss'], label='training loss') plt.plot(H.history['val_loss'], label='validation loss') plt.legend() plt.title('Train/validation loss') plt.savefig("../../plotting_results/%s (%d epochs) loss.jpg" % (time, self.epochs)) # plt.show() # model = load_model('checkpoints/checkpoint_%s_ep50_BS%d.hdf5' % (self.data_dir, self.batch_size), # custom_objects={'keras_batch_hard_triplet_loss': keras_batch_hard_triplet_loss}) testing_embeddings = self.base_model() model.save("%s_model.h5" % self.data_name)
def test_vgg19(): for data_format in ['channels_first', 'channels_last']: K.set_image_data_format(data_format) if K.image_data_format() == 'channels_first': x = Input(shape=(3, 500, 500)) pool1_shape = (None, 64, 250, 250) pool2_shape = (None, 128, 125, 125) pool3_shape = (None, 256, 63, 63) pool4_shape = (None, 512, 32, 32) drop7_shape = (None, 4096, 16, 16) conv1_weight = -0.35009676 else: x = Input(shape=(500, 500, 3)) pool1_shape = (None, 250, 250, 64) pool2_shape = (None, 125, 125, 128) pool3_shape = (None, 63, 63, 256) pool4_shape = (None, 32, 32, 512) drop7_shape = (None, 16, 16, 4096) conv1_weight = 0.429471 encoder = VGG19(x, weights='imagenet', trainable=False) feat_pyramid = encoder.outputs assert len(feat_pyramid) == 5 assert K.int_shape(feat_pyramid[0]) == drop7_shape assert K.int_shape(feat_pyramid[1]) == pool4_shape assert K.int_shape(feat_pyramid[2]) == pool3_shape assert K.int_shape(feat_pyramid[3]) == pool2_shape assert K.int_shape(feat_pyramid[4]) == pool1_shape for layer in encoder.layers: if layer.name == 'block1_conv1': assert layer.trainable is False weights = K.eval(layer.weights[0]) assert np.allclose(weights[0, 0, 0, 0], conv1_weight) encoder_from_scratch = VGG19(x, weights=None, trainable=True) for layer in encoder_from_scratch.layers: if layer.name == 'block1_conv1': assert layer.trainable is True weights = K.eval(layer.weights[0]) assert not np.allclose(weights[0, 0, 0, 0], conv1_weight)
def test_pixelwise_transform_2d(self): with self.cached_session(): K.set_image_data_format('channels_last') # test single edge class for img in _generate_test_masks(): img = label(img) img = np.squeeze(img) pw_img = transform_utils.pixelwise_transform( img, data_format=None, separate_edge_classes=False) pw_img_dil = transform_utils.pixelwise_transform( img, dilation_radius=1, data_format='channels_last', separate_edge_classes=False) self.assertEqual(pw_img.shape[-1], 3) self.assertEqual(pw_img_dil.shape[-1], 3) assert (np.all( np.equal(pw_img[..., 0] + pw_img[..., 1], img > 0))) self.assertGreater( pw_img_dil[..., 0].sum() + pw_img_dil[..., 1].sum(), pw_img[..., 0].sum() + pw_img[..., 1].sum()) # test separate edge classes for img in _generate_test_masks(): img = label(img) img = np.squeeze(img) pw_img = transform_utils.pixelwise_transform( img, data_format=None, separate_edge_classes=True) pw_img_dil = transform_utils.pixelwise_transform( img, dilation_radius=1, data_format='channels_last', separate_edge_classes=True) self.assertEqual(pw_img.shape[-1], 4) self.assertEqual(pw_img_dil.shape[-1], 4) assert (np.all( np.equal(pw_img[..., 0] + pw_img[..., 1] + pw_img[..., 2], img > 0))) self.assertGreater( pw_img_dil[..., 0].sum() + pw_img_dil[..., 1].sum(), pw_img[..., 0].sum() + pw_img[..., 1].sum())