def train_model_with_base(basenet, k_fold): import neptune neptune.init( 'buco24/cancer-cnn', api_token='eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vdWkubmVwdHVuZS5haSIsImFwa' 'V91cmwiOiJodHRwczovL3VpLm5lcHR1bmUuYWkiLCJhcGlfa2V5IjoiNzY' '5OTFmNDQtNjRkMS00NDgzLWJjYjUtYTc5Zjk1NzA0MDNhIn0=') PARAMS = { 'batch_size': config.BATCH_SIZE, 'epochs': config.EPOCHS, 'augmentation': config.AUGMENTATION } neptune.create_experiment(name=f"{basenet}-{k_fold}-fold", params=PARAMS) ################ INITIALIZATION ###############################3 trainGen, valGen, totalTrain, totalVal = load_sets(config.TRAIN_SET_PATH, config.VALID_SET_PATH) if basenet == 'vgg-like': model_base = DM.create_model_vgg_like() elif basenet == 'resnet-like': model_base = DM.resnet_like(20) else: model_base, _, _ = DM.create_with_pretrained_model(basenet) # model = multi_gpu_model(model_base, gpus=2) model = model_base callbacks_train = callbacks.get_callbacks(config.SAVE_MODEL_PATH) # model = model_base print("[INFO] compiling model...") opt = Adam(lr=1e-4) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["acc"]) print("[INFO] training...") history = model.fit_generator( trainGen, steps_per_epoch=totalTrain // config.BATCH_SIZE, validation_data=valGen, validation_steps=totalVal // config.BATCH_SIZE, epochs=config.EPOCHS, callbacks=callbacks_train, use_multiprocessing=True, workers=8) K.clear_session() del model gc.collect()
model.summary() # compilo il model con loss function e ottimizzatore model.compile(loss=getLoss(), optimizer=get_optimizer(config.base_lr), metrics=['accuracy']) if config.model_image: plot_model(model, to_file='model_image.jpg') # preparo i generatori di immagini per il training e la valutazione train_generator, val_generator, n_train_samples, n_val_samples = get_generators(config.images_path, config.annotations_path, config.train_val_split, config.batch_size, config.classes, transform=config.augmentation, debug=False) # preparo i callback callbacks = get_callbacks(config) # eseguo il training model.fit_generator(generator=train_generator, steps_per_epoch=ceil(n_train_samples / config.batch_size), epochs=config.epochs, callbacks=callbacks, validation_data=val_generator, validation_steps=ceil(n_val_samples / config.batch_size)) # salvo i pesi model.save_weights(config.trained_weights_path)
def train(self, train_imgs, # lista delle immagini di training valid_imgs, # lista delle immagini di validazione config, result_weights_name, augmentation, train_times=1, # numero di ripetizioni del training set (per piccoli dataset) valid_times=1, # numero di ripetizioni del validation set (per piccoli dataset) nb_epoch=100, # numero di epoche learning_rate=1e-3, # learning rate di training batch_size=32, # grandezza del batch warmup_epochs=3, # numero di epoche iniziali di "warm-up" che consente al modello di prendere famigliarità con il dataset object_scale=5.0, no_object_scale=1.0, coord_scale=1.0, class_scale=1.0, debug=False): self.batch_size = batch_size self.object_scale = object_scale self.no_object_scale = no_object_scale self.coord_scale = coord_scale self.class_scale = class_scale self.debug = debug # compilo il modello optimizer = Adam(lr=learning_rate, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0) self.model.compile(loss=self.custom_loss, optimizer=optimizer, metrics=['accuracy']) # preparo i generatori di training e di validazione generator_config = { 'IMAGE_H': self.input_size, 'IMAGE_W': self.input_size, 'GRID_H': self.grid_h, 'GRID_W': self.grid_w, 'BOX': self.nb_box, 'LABELS': self.labels, 'CLASS': len(self.labels), 'ANCHORS': self.anchors, 'BATCH_SIZE': self.batch_size, 'TRUE_BOX_BUFFER': self.max_box_per_image, } train_batch = BatchGenerator(train_imgs, generator_config, jitter=augmentation, augmentation=augmentation, norm=self.feature_extractor.normalize) valid_batch = BatchGenerator(valid_imgs, generator_config, norm=self.feature_extractor.normalize, jitter=False, augmentation=False) # preparo i vari callback di allenamento callbacks = get_callbacks(config) if warmup_epochs > 0: print("WARMUP...") self.warmup_batches = warmup_epochs * (train_times * len(train_batch) + valid_times * len(valid_batch)) # eseguo il processo di training di warmpup self.model.fit_generator(generator=train_batch, steps_per_epoch=len(train_batch) * train_times, epochs=warmup_epochs, verbose=1, validation_data=valid_batch, validation_steps=len(valid_batch) * valid_times, callbacks=[], workers=3) print("Training...") self.warmup_batches = 0 # eseguo il processo di training normale self.model.fit_generator(generator=train_batch, steps_per_epoch=len(train_batch) * train_times, epochs=nb_epoch, verbose=1, validation_data=valid_batch, validation_steps=len(valid_batch) * valid_times, callbacks=callbacks, workers=3) # salvo i pesi risultanti self.model.save_weights(result_weights_name)