def train(dataset, model_name, learning_rate, weight_decay, num_epochs, max_patience, batch_size, optimizer, savepath, train_path, valid_path, test_path, crop_size=(224, 224), in_shape=(3, None, None), n_classes=5, gtSet=None, void_class=[4], w_balance=None, weights_file=False, show_model=False, plot_hist=True, train_model=True): # Remove void classes from number of classes n_classes = n_classes - len(void_class) # Mask folder (For different polyp groundtruths) if gtSet is not None: mask_floder = 'masks' + str(gtSet) else: mask_floder = 'masks' # TODO: Get the number of images directly from data loader n_images_train = 30 # 547 n_images_val = 20 # 183 n_images_test = 20 # 182 # Normalization mean and std computed on training set for RGB pixel values print ('\n > Computing mean and std for normalization...') if False: rgb_mean, rgb_std = compute_mean_std(os.path.join(train_path, 'images'), os.path.join(train_path, mask_floder), n_classes) rescale = None else: rgb_mean = None rgb_std = None rescale = 1/255. print ('Mean: ' + str(rgb_mean)) print ('Std: ' + str(rgb_std)) # Compute class balance weights if w_balance is not None: class_balance_weights = compute_class_balance(masks_path=train_path + mask_floder, n_classes=n_classes, method=w_balance, void_labels=void_class ) print ('Class balance weights: ' + str(class_balance_weights)) else: class_balance_weights = None # Build model print ('\n > Building model (' + model_name + ')...') if model_name == 'fcn8': model = build_fcn8(in_shape, l2_reg=weight_decay, nclasses=n_classes, weights_file=weights_file, deconv='deconv') model.output else: raise ValueError('Unknown model') # Create the optimizer print ('\n > Creating optimizer ({}) with lr ({})...'.format(optimizer, learning_rate)) if optimizer == 'rmsprop': opt = RMSprop(lr=learning_rate, rho=0.9, epsilon=1e-8, clipnorm=10) else: raise ValueError('Unknown optimizer') # Compile model print ('\n > Compiling model...') model.compile(loss=cce_flatt(void_class, class_balance_weights), optimizer=opt) # Show model structure if show_model: model.summary() plot(model, to_file=savepath+'model.png') # Create the data generators print ('\n > Reading training set...') dg_tr = ImageDataGenerator(crop_size=crop_size, # Crop the image to a fixed size featurewise_center=False, # Substract mean - dataset samplewise_center=False, # Substract mean - sample featurewise_std_normalization=False, # Divide std - dataset samplewise_std_normalization=False, # Divide std - sample rgb_mean=rgb_mean, rgb_std=rgb_std, gcn=False, # Global contrast normalization zca_whitening=False, # Apply ZCA whitening rotation_range=180, # Rnd rotation degrees 0-180 width_shift_range=0.0, # Rnd horizontal shift height_shift_range=0.0, # Rnd vertical shift shear_range=0.5, # 0.5, # Shear in radians zoom_range=0.1, # Zoom channel_shift_range=0., # Channel shifts fill_mode='constant', # Fill mode cval=0., # Void image value void_label=void_class[0], # Void class value horizontal_flip=True, # Rnd horizontal flip vertical_flip=True, # Rnd vertical flip rescale=rescale, # Rescaling factor spline_warp=False, # Enable elastic deformation warp_sigma=10, # Elastic deformation sigma warp_grid_size=3 # Elastic deformation gridSize ) train_gen = dg_tr.flow_from_directory(train_path + 'images', batch_size=batch_size, gt_directory=train_path + mask_floder, target_size=crop_size, class_mode='seg_map', classes=n_classes, # save_to_dir=savepath, # Save DA save_prefix='data_augmentation', save_format='png') print ('\n > Reading validation set...') dg_va = ImageDataGenerator(rgb_mean=rgb_mean, rgb_std=rgb_std, rescale=rescale) valid_gen = dg_va.flow_from_directory(valid_path + 'images', batch_size=1, gt_directory=valid_path + mask_floder, target_size=None, class_mode='seg_map', classes=n_classes) print ('\n > Reading testing set...') dg_ts = ImageDataGenerator(rgb_mean=rgb_mean, rgb_std=rgb_std, rescale=rescale) test_gen = dg_ts.flow_from_directory(test_path + 'images', batch_size=1, gt_directory=test_path + mask_floder, target_size=None, class_mode='seg_map', classes=n_classes, shuffle=False) # Define the jaccard validation callback eval_model = Evaluate_model(n_classes=n_classes, void_label=void_class[0], save_path=savepath, valid_gen=valid_gen, valid_epoch_length=n_images_val, valid_metrics=['val_loss', 'val_jaccard', 'val_acc', 'val_jaccard_perclass']) # Define early stopping callbacks early_stop_jac = EarlyStopping(monitor='val_jaccard', mode='max', patience=max_patience, verbose=0) early_stop_jac_class = [] for i in range(n_classes): early_stop_jac_class += [EarlyStopping(monitor=str(i)+'_val_jacc_percl', mode='max', patience=max_patience, verbose=0)] # Define model saving callbacks checkp_jac = ModelCheckpoint(filepath=savepath+"weights.hdf5", verbose=0, monitor='val_jaccard', mode='max', save_best_only=True, save_weights_only=True) checkp_jac_class = [] for i in range(n_classes): checkp_jac_class += [ModelCheckpoint(filepath=savepath+"weights"+str(i)+".hdf5", verbose=0, monitor=str(i)+'_val_jacc_percl', mode='max', save_best_only=True, save_weights_only=True)] # Train the model if (train_model): print('\n > Training the model...') cb = [eval_model, early_stop_jac, checkp_jac] + checkp_jac_class hist = model.fit_generator(train_gen, samples_per_epoch=n_images_train, nb_epoch=num_epochs, callbacks=cb) # Compute test metrics print('\n > Testing the model...') model.load_weights(savepath + "weights.hdf5") color_map = [ (255/255., 0, 0), # Background (192/255., 192/255., 128/255.), # Polyp (128/255., 64/255., 128/255.), # Lumen (0, 0, 255/255.), # Specularity (0, 255/255., 0), # (192/255., 128/255., 128/255.), # (64/255., 64/255., 128/255.), # ] test_metrics = compute_metrics(model, test_gen, n_images_test, n_classes, metrics=['test_loss', 'test_jaccard', 'test_acc', 'test_jaccard_perclass'], color_map=color_map, tag="test", void_label=void_class[0], out_images_folder=savepath, epoch=0, save_all_images=True, useCRF=False) for k in sorted(test_metrics.keys()): print('{}: {}'.format(k, test_metrics[k])) if (train_model): # Save the results print ("\n > Saving history...") with open(savepath + "history.pickle", 'w') as f: pickle.dump([hist.history, test_metrics], f) # Load the results print ("\n > Loading history...") with open(savepath + "history.pickle") as f: history, test_metrics = pickle.load(f) # print (str(test_metrics)) # Show the trained model history if plot_hist: print('\n > Show the trained model history...') plot_history(history, savepath, n_classes)
# plot data if(not args.nodisplay): for idx in range(25): plt.subplot(5,10,2*idx+1) plt.imshow(x_train[idx,:,:,0]) plt.subplot(5,10,2*idx+2) plt.imshow(y_train[idx,:,:,0]) plt.show() if(not args.nomodel): loss = cce_flatt(void_class, None) metrics = [IoU(n_classes, void_class)] #opt = RMSprop(lr=0.001, clipnorm=10) opt = Nadam(lr=0.002) model = build_fcn8(in_shape, n_classes, 0.) model.compile(loss=loss, metrics=metrics, optimizer=opt) cb = [EarlyStopping(monitor='val_loss', min_delta = 0.0001, patience=2)] model.fit(x_train, y_train, epochs=1000, batch_size=16, callbacks=cb, validation_data=(x_valid,y_valid)) score = model.evaluate(x_test, y_test) #, batch_size=128) y_pred = model.predict(x_test) print(score) for sample in range(y_test.shape[0]): print('sample: ' + str(sample)) print('actual:') print(y_test[sample,:,:,0]) print('predicted:')
def make_one_net_model(self, cf, in_shape, loss, metrics, optimizer): # Create the *Keras* model if cf.model_name == 'fcn8': model = build_fcn8(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=cf.load_imageNet) elif cf.model_name == 'unet': model = build_unet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'segnet_basic': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None, basic=True) elif cf.model_name == 'segnet_vgg': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None, basic=False) elif cf.model_name == 'resnetFCN': model = build_resnetFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=cf.load_imageNet) elif cf.model_name == 'densenetFCN': model = build_densenetFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'lenet': model = build_lenet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'alexNet': model = build_alexNet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'vgg16': model = build_vgg(in_shape, cf.dataset.n_classes, 16, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'vgg19': model = build_vgg(in_shape, cf.dataset.n_classes, 19, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'resnet50': model = build_resnet50(in_shape, cf.dataset.n_classes, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'InceptionV3': model = build_inceptionV3(in_shape, cf.dataset.n_classes, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, tiny=False) elif cf.model_name == 'tiny-yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, tiny=True) else: raise ValueError('Unknown model') # Load pretrained weights if cf.load_pretrained: print(' loading model weights from: ' + cf.weights_file + '...') model.load_weights(cf.weights_file, by_name=True) # Compile model model.compile(loss=loss, metrics=metrics, optimizer=optimizer) # Show model structure if cf.show_model: model.summary() plot(model, to_file=os.path.join(cf.savepath, 'model.png')) # Output the model print (' Model: ' + cf.model_name) # model is a keras model, Model is a class wrapper so that we can have # other models (like GANs) made of a pair of keras models, with their # own ways to train, test and predict return One_Net_Model(model, cf, optimizer)
def make_one_net_model(self, cf, in_shape, loss, metrics, optimizer): # Assertions if 'tiramisu' in cf.model_name: input_rows, input_cols = cf.target_size_train[0], cf.target_size_train[1] multiple = 2 ** 5 # 5 transition blocks if input_rows is not None: if input_rows % multiple != 0: raise ValueError('The number of rows of the input data must be a multiple of {}'.format(multiple)) if input_cols is not None: if input_cols % multiple != 0: raise ValueError( 'The number of columns of the input data must be a multiple of {}'.format(multiple)) # Create the *Keras* model if cf.model_name == 'fcn8': model = build_fcn8(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, # path_weights='weights/pascal-fcn8s-dag.mat') path_weights=None) elif cf.model_name == 'dilation': model = build_dilation(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, # path_weights='weights/pascal-fcn8s-dag.mat') path_weights=None) elif cf.model_name == 'segnet_basic': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None, basic=True) elif cf.model_name == 'segnet_vgg': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None, basic=False) elif cf.model_name == 'densenetFCN': model = build_densenetFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'vgg16': model = build_vgg(in_shape, cf.dataset.n_classes, 16, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'vgg19': model = build_vgg(in_shape, cf.dataset.n_classes, 19, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'resnet50': model = build_resnet50(in_shape, cf.dataset.n_classes, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, tiny=False) elif cf.model_name == 'tiny-yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, tiny=True) elif cf.model_name == 'ssd300': model = build_ssd300(in_shape, cf.dataset.n_classes + 1, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'deeplabV2': model = build_deeplabv2(in_shape, nclasses=cf.dataset.n_classes, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, weight_decay=cf.weight_decay) elif cf.model_name == 'ssd300': model = build_ssd300(in_shape, cf.dataset.n_classes + 1, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'tiramisu_fc56': model = build_tiramisu_fc56(in_shape, cf.dataset.n_classes, cf.weight_decay, compression=0, dropout=0.2, nb_filter=48, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'tiramisu_fc67': model = build_tiramisu_fc67(in_shape, cf.dataset.n_classes, cf.weight_decay, compression=0, dropout=0.2, nb_filter=48, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'tiramisu_fc103': model = build_tiramisu_fc103(in_shape, cf.dataset.n_classes, cf.weight_decay, compression=0, dropout=0.2, nb_filter=48, freeze_layers_from=cf.freeze_layers_from) else: raise ValueError('Unknown model') # Load pretrained weights if cf.load_pretrained: print(' loading model weights from: ' + cf.weights_file + '...') model.load_weights(cf.weights_file, by_name=True) # Compile model model.compile(loss=loss, metrics=metrics, optimizer=optimizer) # Show model structure if cf.show_model: model.summary() plot(model, to_file=os.path.join(cf.savepath, 'model.png')) # Output the model print (' Model: ' + cf.model_name) # model is a keras model, Model is a class wrapper so that we can have # other models (like GANs) made of a pair of keras models, with their # own ways to train, test and predict return One_Net_Model(model, cf, optimizer)
def make_one_net_model(self, cf, in_shape, loss, metrics, optimizer): # Create the *Keras* model if cf.model_name == 'fcn8': model = build_fcn8(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=cf.load_imageNet) elif cf.model_name == 'unet': model = build_unet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'segnet_basic': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, basic=True) elif cf.model_name == 'segnet_vgg': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, basic=False) elif cf.model_name == 'resnetFCN': model = build_resnetFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'inceptionFCN': model = build_inceptionFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'densenet': model = build_densenet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'lenet': model = build_lenet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'alexNet': model = build_alexNet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'vgg16': model = build_vgg(in_shape, cf.dataset.n_classes, 16, cf.weight_decay, load_imageNet=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'vgg19': model = build_vgg(in_shape, cf.dataset.n_classes, 19, cf.weight_decay, load_imageNet=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'resnet50': model = build_resnet50(in_shape, cf.dataset.n_classes, cf.weight_decay, load_imageNet=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'InceptionV3': model = build_inceptionV3(in_shape, cf.dataset.n_classes, cf.weight_decay, load_imageNet=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_imageNet=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, tiny=False) elif cf.model_name == 'tiny-yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_imageNet=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, tiny=True) elif cf.model_name == 'ssd': model = build_ssd(in_shape, cf.dataset.n_classes+1, cf.dataset.n_priors, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'densenet_segmentation': model = build_densenet_segmentation(in_shape, cf.dataset.n_classes, weight_decay = cf.weight_decay, freeze_layers_from = cf.freeze_layers_from, path_weights = cf.load_imageNet) else: raise ValueError('Unknown model') # Load pretrained weights if cf.load_pretrained: print(' loading model weights from: ' + cf.weights_file + '...') # If the weights are from different datasets if cf.different_datasets: if cf.freeze_layers_from == 'base_model': raise TypeError('Please, enter the layer id instead of "base_model"' ' for the freeze_layers_from config parameter') croppedmodel = model_from_json(model.to_json()) # Remove not frozen layers for i in range(len(model.layers[cf.freeze_layers_from:])): croppedmodel.layers.pop() # Load weights only for the frozen layers croppedmodel.load_weights(cf.weights_file, by_name=True) model.set_weights(croppedmodel.get_weights()) else: model.load_weights(cf.weights_file, by_name=True) # Compile model model.compile(loss=loss, metrics=metrics, optimizer=optimizer) # Show model structure if cf.show_model: model.summary() plot(model, to_file=os.path.join(cf.savepath, 'model.png')) # Output the model print (' Model: ' + cf.model_name) # model is a keras model, Model is a class wrapper so that we can have # other models (like GANs) made of a pair of keras models, with their # own ways to train, test and predict return One_Net_Model(model, cf, optimizer)
def make_one_net_model(self, cf, in_shape, loss, metrics, optimizer): # Create the *Keras* model if cf.model_name == 'fcn8': model = build_fcn8( in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, #path_weights='weights/pascal-fcn8s-dag.mat') path_weights=None) elif cf.model_name == 'unet': model = build_unet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'segnet_basic': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None, basic=True) elif cf.model_name == 'segnet_vgg': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None, basic=False) elif cf.model_name == 'resnetFCN': model = build_resnetFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'densenetFCN': model = build_densenetFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'lenet': model = build_lenet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'alexNet': model = build_alexNet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'vgg16': model = build_vgg(in_shape, cf.dataset.n_classes, 16, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'vgg19': model = build_vgg(in_shape, cf.dataset.n_classes, 19, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'resnet50Keras': model = build_resnet50(in_shape, cf.dataset.n_classes, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'resnet18': model = ResnetBuilder.build_resnet_18(in_shape, cf.dataset.n_classes) elif cf.model_name == 'resnet34': model = ResnetBuilder.build_resnet_34(in_shape, cf.dataset.n_classes) elif cf.model_name == 'resnet50': model = ResnetBuilder.build_resnet_50(in_shape, cf.dataset.n_classes) elif cf.model_name == 'resnet101': model = ResnetBuilder.build_resnet_101(in_shape, cf.dataset.n_classes) elif cf.model_name == 'resnet152': model = ResnetBuilder.build_resnet_152(in_shape, cf.dataset.n_classes) elif cf.model_name == 'InceptionV3': model = build_inceptionV3(in_shape, cf.dataset.n_classes, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'densenet': model = build_densenet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, tiny=False) elif cf.model_name == 'tiny-yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, tiny=True) elif cf.model_name == 'ssd': model = build_SSD300(in_shape, cf.dataset.n_classes) if cf.load_imageNet: # Rename last layer to not load pretrained weights model.layers[-1].name += '_new' model.load_weights('weights/weights_SSD300.hdf5', by_name=True) else: raise ValueError('Unknown model') # Load pretrained weights if cf.load_pretrained: print(' loading model weights from: ' + cf.weights_file) #old_name=model.layers[-2].name #model.layers[-2].name=model.layers[-2].name+'_replaced' model.load_weights(cf.weights_file, by_name=True) #model.layers[-2].name=old_name # Compile model model.compile(loss=loss, metrics=metrics, optimizer=optimizer) # Show model structure if cf.show_model: model.summary() plot(model, to_file=os.path.join(cf.savepath, 'model.png')) # Output the model print(' Model: ' + cf.model_name) # model is a keras model, Model is a class wrapper so that we can have # other models (like GANs) made of a pair of keras models, with their # own ways to train, test and predict return One_Net_Model(model, cf, optimizer)
def make_one_net_model(self, cf, in_shape, loss, metrics, optimizer): # Create the *Keras* model model_name = cf.model_name if cf.model_name == 'fcn8': model = build_fcn8(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=cf.load_imageNet) elif cf.model_name == 'unet': model = build_unet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'segnet_basic': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None, basic=True) elif cf.model_name == 'segnet_vgg': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None, basic=False) elif cf.model_name == 'resnetFCN': model = build_resnetFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'densenetFCN': model = build_densenetFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'densenet_fc': model = DenseNetFCN((224, 224, 3), nb_dense_block=5, growth_rate=16, nb_layers_per_block=4, upsampling_type='upsampling', classes=cf.dataset.n_classes) elif cf.model_name == 'lenet': model = build_lenet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'alexNet': model = build_alexNet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'vgg16': model = build_vgg(in_shape, cf.dataset.n_classes, 16, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'vgg19': model = build_vgg(in_shape, cf.dataset.n_classes, 19, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'resnet50Keras': model = build_resnet50(in_shape, cf.dataset.n_classes, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'resnet18': model = ResnetBuilder.build_resnet_18(in_shape, cf.dataset.n_classes) elif cf.model_name == 'resnet34': model = ResnetBuilder.build_resnet_34(in_shape, cf.dataset.n_classes) elif cf.model_name == 'resnet50': model = ResnetBuilder.build_resnet_50(in_shape, cf.dataset.n_classes) elif cf.model_name == 'resnet101': model = ResnetBuilder.build_resnet_101(in_shape, cf.dataset.n_classes) elif cf.model_name == 'resnet152': model = ResnetBuilder.build_resnet_152(in_shape, cf.dataset.n_classes) elif cf.model_name == 'InceptionV3': model = build_inceptionV3(in_shape, cf.dataset.n_classes, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'densenet': model = build_densenet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, typeNet='Regular') elif cf.model_name == 'tiny-yolo': if hasattr(cf, 'lookTwice'): yolt = cf.lookTwice if yolt: model_name = 'Tiny-YOLT' else: yolt = False model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, typeNet='Tiny', lookTwice=yolt) elif cf.model_name == 'yolt': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, typeNet='YOLT') elif cf.model_name == 'ssd': model = Build_SSD(in_shape, cf.dataset.n_classes + 1, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) else: raise ValueError('Unknown model') # Load pretrained weights if cf.load_pretrained: print(' loading model weights from: ' + cf.weights_file) model.load_weights(cf.weights_file, by_name=True) else: try: if cf.load_transferlearning: print(' loading model weights from: ' + cf.weights_file) old_name = model.layers[-2].name model.layers[-2].name = model.layers[-2].name + '_replaced' model.load_weights(cf.weights_file, by_name=True) model.layers[-2].name = old_name except: pass # Compile model model.compile(loss=loss, metrics=metrics, optimizer=optimizer) # Show model structure if cf.show_model: model.summary() plot(model, to_file=os.path.join(cf.savepath, 'model.png')) # Output the model print(' Model: ' + model_name) # model is a keras model, Model is a class wrapper so that we can have # other models (like GANs) made of a pair of keras models, with their # own ways to train, test and predict return One_Net_Model(model, cf, optimizer)
def make_one_net_model(self, cf, in_shape, loss, metrics, optimizer): # Create the *Keras* model if cf.model_name == 'fcn8': model = build_fcn8(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=('weights/pascal-fcn8s-dag.mat' if cf.load_pascalVOC else None)) elif cf.model_name == 'unet': model = build_unet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'segnet_basic': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None, basic=True) elif cf.model_name == 'segnet_vgg': model = build_segnet(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None, basic=False) elif cf.model_name == 'resnetFCN': model = build_resnetFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'densenetFCN': model = build_densenetFCN(in_shape, cf.dataset.n_classes, cf.weight_decay, freeze_layers_from=cf.freeze_layers_from, path_weights=None) elif cf.model_name == 'lenet': model = build_lenet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'alexNet': model = build_alexNet(in_shape, cf.dataset.n_classes, cf.weight_decay) elif cf.model_name == 'vgg16': model = build_vgg(in_shape, cf.dataset.n_classes, 16, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'vgg19': model = build_vgg(in_shape, cf.dataset.n_classes, 19, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'resnet50': model = build_resnet50(in_shape, cf.dataset.n_classes, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'InceptionV3': model = build_inceptionV3(in_shape, cf.dataset.n_classes, cf.weight_decay, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from) elif cf.model_name == 'densenet': model = build_densenet( in_shape, cf.dataset.n_classes, layers_in_dense_block=cf.layers_in_dense_block, initial_filters=cf.initial_filters, growth_rate=cf.growth_rate, n_bottleneck=cf.n_bottleneck, compression=cf.compression, dropout=cf.dropout, weight_decay=cf.weight_decay) elif cf.model_name == 'yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, tiny=False) elif cf.model_name == 'tiny-yolo': model = build_yolo(in_shape, cf.dataset.n_classes, cf.dataset.n_priors, load_pretrained=cf.load_imageNet, freeze_layers_from=cf.freeze_layers_from, tiny=True) elif cf.model_name == 'ssd300': model = build_ssd300(in_shape, cf.dataset.n_classes + 1) # TODO: find best parameters ssd_utils.initialize_module(model, in_shape, cf.dataset.n_classes + 1, overlap_threshold=0.5, nms_thresh=0.45, top_k=400) elif cf.model_name == 'tiramisu': model = build_tiramisu( in_shape, cf.dataset.n_classes, layers_in_dense_block=cf.layers_in_dense_block, initial_filters=cf.initial_filters, growth_rate=cf.growth_rate, n_bottleneck=cf.n_bottleneck, compression=cf.compression, dropout=cf.dropout, weight_decay=cf.weight_decay) elif cf.model_name == 'ssd300_pretrained': model = build_ssd300_pretrained(in_shape, cf.dataset.n_classes + 1) # TODO: find best parameters ssd_utils.initialize_module(model, in_shape, cf.dataset.n_classes + 1, overlap_threshold=0.5, nms_thresh=0.45, top_k=400) elif cf.model_name == 'ssd_resnet50': model = build_ssd_resnet50(in_shape, cf.dataset.n_classes + 1) # TODO: find best parameters ssd_utils.initialize_module(model, in_shape, cf.dataset.n_classes + 1, overlap_threshold=0.5, nms_thresh=0.45, top_k=400) else: raise ValueError('Unknown model') # Load pretrained weights if cf.load_pretrained: print(' loading model weights from: ' + cf.weights_file + '...') model.load_weights(cf.weights_file, by_name=True) # Compile model model.compile(loss=loss, metrics=metrics, optimizer=optimizer) # Show model structure if cf.show_model: model.summary() plot(model, to_file=os.path.join(cf.savepath, 'model.png')) # Output the model print(' Model: ' + cf.model_name) # model is a keras model, Model is a class wrapper so that we can have # other models (like GANs) made of a pair of keras models, with their # own ways to train, test and predict return One_Net_Model(model, cf, optimizer)