# validation dataset
dataset_val = u.get_dataset(coco_path, 'val')
gen_val = prepare(dataset_val, epochs, batch_size, input_shape, output_shape)

#  fuu = next(gen_val)
#  import ipdb; ipdb.set_trace()

#  train dataset
dataset_train = u.get_dataset(coco_path, 'train')
gen_train = prepare(dataset_train, epochs, batch_size, input_shape,
                    output_shape)

callback = ModelSaveBestAvgAcc(filepath="model-{epoch:02d}-{avgacc:.2f}.hdf5",
                               verbose=True,
                               cond=filter_val('fmeasure'))

losses = []
for i in range(0, 1):
    losses.append(binary_focal_loss(gamma=2.))

input_tensor = layers.Input(shape=input_shape)
model = get_model(input_tensor=input_tensor)

x = layers.Multiply()([input_tensor, model.output])
x = model(x)

#  import ipdb; ipdb.set_trace()
model = models.Model(inputs=input_tensor, outputs=x)

model.compile(optimizer=opt.Adam(lr=1e-4),
Example #2
0
fuu = next(gen_val)
#  from keras.models import load_model
#  model = load_model(
#      '_demo-v14-model-09-0.97.hdf5',
#      custom_objects={'precision': precision, 'evaluate': evaluate})
import ipdb
ipdb.set_trace()

# train dataset
dataset_train = u.get_dataset(coco_path, 'train')
gen_train = prepare(dataset_train, epochs, batch_size, input_shape,
                    os.path.join(coco_path, 'train_output'))

callback = ModelSaveBestAvgAcc(filepath="model-{epoch:02d}-{avgacc:.2f}.hdf5",
                               verbose=True,
                               cond=filter_val('evaluate'))

losses = []
losses.append(mse)
#  for i in range(0, 1):
#      losses.append(binary_focal_loss(gamma=2.))

#  import ipdb; ipdb.set_trace()
model = get_model(input_shape, dataset_val.num_classes)

model.compile(optimizer=opt.Adam(lr=1e-4),
              loss=losses,
              metrics=['accuracy', precision, evaluate])

model.summary()