def basic_model_properties(self, cf, variable_input_size): # Define the input size, loss and metrics if cf.dataset.class_mode == 'categorical': if K.image_dim_ordering() == 'th': in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = 'categorical_crossentropy' metrics = ['accuracy'] elif cf.dataset.class_mode == 'detection': # Check model, different detection nets may have different losses and metrics if cf.model_name in ['yolo', 'tiny-yolo']: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) loss = YOLOLoss(in_shape, cf.dataset.n_classes, cf.dataset.priors) metrics = [YOLOMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors)] elif cf.model_name == 'ssd300': in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = MultiboxLoss(cf.dataset.n_classes, neg_pos_ratio=2.0).compute_loss metrics = None # TODO: Add metrics for SSD # priors = pickle.load(open('prior_boxes_ssd300.pkl', 'rb')) # metrics = [SSDMetrics(priors, cf.dataset.n_classes)] else: raise NotImplementedError elif cf.dataset.class_mode == 'segmentation': if K.image_dim_ordering() == 'th': if variable_input_size: in_shape = (cf.dataset.n_channels, None, None) else: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: if variable_input_size: in_shape = (None, None, cf.dataset.n_channels) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights) metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)] else: raise ValueError('Unknown problem type') return in_shape, loss, metrics
def basic_model_properties(self, cf, variable_input_size): # Define the input size, loss and metrics if cf.dataset.class_mode == 'categorical': if K.image_dim_ordering() == 'th': in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = 'categorical_crossentropy' metrics = ['accuracy'] elif cf.dataset.class_mode == 'detection': if cf.model_name in ['yolo', 'tiny-yolo']: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) loss = YOLOLoss(in_shape, cf.dataset.n_classes, cf.dataset.priors) metrics = [ YOLOMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors) ] elif cf.model_name in [ 'ssd300', 'ssd300_pretrained', 'ssd_resnet50' ]: # TODO: in_shape ok for ssd? in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) # TODO: extract config parameters from MultiboxLoss mboxloss = MultiboxLoss(cf.dataset.n_classes + 1, alpha=1.0, neg_pos_ratio=2.0, background_label_id=0, negatives_for_hard=100.0) loss = mboxloss.compute_loss metrics = [] # TODO: add mAP metric elif cf.dataset.class_mode == 'segmentation': if K.image_dim_ordering() == 'th': if variable_input_size: in_shape = (cf.dataset.n_channels, None, None) else: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: if variable_input_size: in_shape = (None, None, cf.dataset.n_channels) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights) metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)] else: raise ValueError('Unknown problem type') return in_shape, loss, metrics
def basic_model_properties(self, cf, variable_input_size): # Define the input size, loss and metrics if cf.dataset.class_mode == 'categorical': if K.image_dim_ordering() == 'th': in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = 'categorical_crossentropy' metrics = ['accuracy'] elif cf.dataset.class_mode == 'detection': if cf.model_name in ['yolo', 'tiny-yolo']: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) loss = YOLOLoss(in_shape, cf.dataset.n_classes, cf.dataset.priors) metrics = [ YOLOMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors) ] elif cf.model_name == 'ssd': if K.image_dim_ordering() == 'th': in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = SSDLoss(in_shape, cf.dataset.n_classes + 1, cf.dataset.priors) #+1 to include background #metrics = [SSDMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors)] metrics = [] else: raise ValueError('Unknown model') elif cf.dataset.class_mode == 'segmentation': if K.image_dim_ordering() == 'th': if variable_input_size: in_shape = (cf.dataset.n_channels, None, None) else: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: if variable_input_size: in_shape = (None, None, cf.dataset.n_channels) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights) metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)] else: raise ValueError('Unknown problem type') return in_shape, loss, metrics
def basic_model_properties(self, cf, variable_input_size): # Define the input size, loss and metrics if cf.dataset.class_mode == 'categorical': if K.image_dim_ordering() == 'th': in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = 'categorical_crossentropy' metrics = ['accuracy'] elif cf.dataset.class_mode == 'detection': if "yolo" in cf.model_name: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) # TODO detection : check model, different detection nets may have different losses and metrics loss = YOLOLoss(in_shape, cf.dataset.n_classes, cf.dataset.priors) metrics = [YOLOMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors)] elif "ssd" in cf.model_name: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels,) loss = SSDLoss(cf.dataset.n_classes) metrics = [SSDMetrics()] elif cf.dataset.class_mode == 'segmentation': if K.image_dim_ordering() == 'th': if variable_input_size: in_shape = (cf.dataset.n_channels, None, None) else: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: if variable_input_size: in_shape = (None, None, cf.dataset.n_channels) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights) metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)] else: raise ValueError('Unknown problem type') return in_shape, loss, metrics
def basic_model_properties(self, cf, variable_input_size): # Define the input size, loss and metrics if cf.dataset.class_mode == 'categorical': if K.image_dim_ordering() == 'th': in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = 'categorical_crossentropy' metrics = ['accuracy'] elif cf.dataset.class_mode == 'detection': if cf.model_name == 'ssd': in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels,) loss = MultiboxLoss(cf.dataset.n_classes, neg_pos_ratio=2.0).compute_loss metrics = None else: # YOLO in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) loss = YOLOLoss(in_shape, cf.dataset.n_classes, cf.dataset.priors) metrics = [YOLOMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors)] elif cf.dataset.class_mode == 'segmentation': if K.image_dim_ordering() == 'th': if variable_input_size: in_shape = (cf.dataset.n_channels, None, None) else: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: if variable_input_size: in_shape = (None, None, cf.dataset.n_channels) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights) metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)] else: raise ValueError('Unknown problem type') return in_shape, loss, metrics
def make_segmentor(self): segmentor = build_segnet(self.img_shape, self.n_classes, l2_reg=0., init='glorot_uniform', path_weights=None, freeze_layers_from=None, use_unpool=False, basic=False) lr = 1e-04 optimizer = RMSprop(lr=lr, rho=0.9, epsilon=1e-8, clipnorm=10) print( ' Optimizer segmentor: rmsprop. Lr: {}. Rho: 0.9, epsilon=1e-8, ' 'clipnorm=10'.format(lr)) the_loss = cce_flatt(self.cf.dataset.void_class, self.cf.dataset.cb_weights) metrics = [IoU(self.cf.dataset.n_classes, self.cf.dataset.void_class)] segmentor.compile(loss=the_loss, metrics=metrics, optimizer=optimizer) return segmentor
x_test -= x_train_mean x_train /= (x_train_std + 1e-7) x_test /= (x_train_std + 1e-7) # plot data if(not args.nodisplay): for idx in range(25): plt.subplot(5,10,2*idx+1) plt.imshow(x_train[idx,:,:,0]) plt.subplot(5,10,2*idx+2) plt.imshow(y_train[idx,:,:,0]) plt.show() if(not args.nomodel): loss = cce_flatt(void_class, None) metrics = [IoU(n_classes, void_class)] #opt = RMSprop(lr=0.001, clipnorm=10) opt = Nadam(lr=0.002) model = build_fcn8(in_shape, n_classes, 0.) model.compile(loss=loss, metrics=metrics, optimizer=opt) cb = [EarlyStopping(monitor='val_loss', min_delta = 0.0001, patience=2)] model.fit(x_train, y_train, epochs=1000, batch_size=16, callbacks=cb, validation_data=(x_valid,y_valid)) score = model.evaluate(x_test, y_test) #, batch_size=128) y_pred = model.predict(x_test) print(score) for sample in range(y_test.shape[0]):
def basic_model_properties(self, cf, variable_input_size): # Define the input size, loss and metrics if cf.dataset.class_mode == 'categorical': if K.image_dim_ordering() == 'th': in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = 'categorical_crossentropy' metrics = ['accuracy'] elif cf.dataset.class_mode == 'detection': if 'yolo' in cf.model_name: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) loss = YOLOLoss(in_shape, cf.dataset.n_classes, cf.dataset.priors) metrics = [YOLOMetrics(in_shape, cf.dataset.n_classes, cf.dataset.priors)] elif cf.model_name == 'ssd': in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = MultiboxLoss(cf.dataset.n_classes, neg_pos_ratio=2.0).compute_loss metrics = [] # TODO: There is no metrics for the ssd model else: raise ValueError('Uknown "' + cf.model_name + '" name for the ' + cf.dataset.class_mode + ' problem type.' 'Only is implemented for: {yolo, tiny-yolo, ssd}') elif cf.dataset.class_mode == 'segmentation': if K.image_dim_ordering() == 'th': if variable_input_size: in_shape = (cf.dataset.n_channels, None, None) else: in_shape = (cf.dataset.n_channels, cf.target_size_train[0], cf.target_size_train[1]) else: if variable_input_size: in_shape = (None, None, cf.dataset.n_channels) else: in_shape = (cf.target_size_train[0], cf.target_size_train[1], cf.dataset.n_channels) loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights) metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)] # if cf.model_name == 'fcn8': # loss = cce_flatt(cf.dataset.void_class, cf.dataset.cb_weights) # metrics = [IoU(cf.dataset.n_classes, cf.dataset.void_class)] # # elif 'segnet' in cf.model_name: # loss = 'categorical_crossentropy' # metrics = [] # # else: # raise ValueError('Uknown "'+cf.model_name+'" name for the '+cf.dataset.class_mode+' problem type.' # 'Only is implemented for: {fc8, segnet}') else: raise ValueError('Unknown problem type') return in_shape, loss, metrics