示例#1
0
 def __init__(self, *args, **kws):
     KerasModelCheckpoint.__init__(self, *args, **kws)
     if self.filepath.startswith("gs://"):
         self.on_epoch_end = self._gcp_on_epoch_end
         self._original_filepath = self.filepath
         self._temp_file = tempfile.NamedTemporaryFile()
         self.filepath = self._temp_file.name
示例#2
0
    def __init__(self, filepath, save_best_only=True, training_set=(None, None), testing_set=(None, None), folder=None, cost_string="log_loss", save_training_dataset=False, verbose=1):
        ModelCheckpoint.__init__(self, filepath=filepath, save_best_only=save_best_only, verbose=1)

        self.training_x, self.training_y = training_set
        self.testing_x, self.testing_id, = testing_set
        self.folder = folder
        self.save_training_dataset = save_training_dataset

        if cost_string == "log_loss":
            self.cost_function = cost_string
        elif cost_string == "auc":
            self.cost_function = roc_auc_score
        else:
            log("Found undefined cost function - {}".format(cost_string), ERROR)
            raise NotImplementError
示例#3
0
    def _gcp_on_epoch_end(self, epoch, logs=None):
        # Call original checkpoint to temporary file
        KerasModelCheckpoint.on_epoch_end(self, epoch, logs=logs)

        logs = logs or {}

        # Check if file exists and not empty
        if not os.path.exists(self.filepath):
            log.warning("Checkpoint file does not seem to exists. Ignoring")
            return

        if os.path.getsize(self.filepath) == 0:
            log.warning("File empty, no checkpoint has been saved")
            return

        final_path = self._original_filepath.format(epoch=epoch + 1, **logs)

        with file_io.FileIO(self.filepath, mode='rb') as input_f:
            with file_io.FileIO(final_path, mode='w+b') as output_f:
                output_f.write(input_f.read())

        # Remove local model
        os.remove(self.filepath)
示例#4
0
model.summary() #only last two layers with 30735 and 64 parameters are trainable


# In[15]:

from keras.callbacks import EarlyStopping, ModelCheckpoint
model_path = 'transfer_model.h5'
callbacks = [
        EarlyStopping(
            monitor='val_categorical_crossentropy', 
            patience=3, # stop training at 3
            verbose=0),
        
        ModelCheckpoint(
            model_path , 
            monitor='val_categorical_crossentropy', 
            save_best_only=True, 
            verbose=0)
    ]


# In[16]:

X_tr ,y_tr = X[0::2,], y[0::2,]
X_val , y_val = X[1::2,],y[1::2,]
del X ,y


# In[17]:

import gc
示例#5
0
model.compile(loss='categorical_crossentropy',
              optimizer=Adam(lr=lr_schedule(0)),
              metrics=['accuracy'])
model.summary()
print(model_type)

# Prepare model model saving directory.
save_dir = os.path.join(os.getcwd(), 'saved_models')
model_name = 'cifar10_%s_model.{epoch:03d}.h5' % model_type
if not os.path.isdir(save_dir):
    os.makedirs(save_dir)
filepath = os.path.join(save_dir, model_name)

# Prepare callbacks for model saving and for learning rate adjustment.
checkpoint = ModelCheckpoint(filepath=filepath,
                             monitor='val_acc',
                             verbose=1,
                             save_best_only=True)

lr_scheduler = LearningRateScheduler(lr_schedule)

lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1),
                               cooldown=0,
                               patience=5,
                               min_lr=0.5e-6)
from keras.callbacks import TensorBoard
callbacks = [
    checkpoint, lr_reducer, lr_scheduler,
    TensorBoard(log_dir='./TB_logdir/BetterTrain_BS=256', write_images=False)
]

# # Train
model = Sequential()
model.add(Dense(input_dim=input_size, output_dim=2048, activation='relu'))
model.add(Dropout(0.6))
model.add(Dense(input_dim=2048, output_dim=1024, activation='relu'))
model.add(Dropout(0.4))
model.add(Dense(input_dim=1024, output_dim=512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(input_dim=512, output_dim=512, activation='relu'))
model.add(Dropout(0.2))
# model.add(Dense(input_dim=4096, output_dim=output_size, activation='sigmoid'))
model.add(Dense(input_dim=512, output_dim=output_size, activation='linear'))

# model.compile(optimizer=Adam(), loss=weighted_binary_crossentropy)
model.compile(optimizer=Adam(), loss='mse')

model_checkpoint = ModelCheckpoint('./models/' + 'weights_12_5_epoch_{epoch:02d}.h5', monitor='val_loss', save_best_only=True)

# model.fit(xt, yt, batch_size=64, nb_epoch=500, validation_data=(xs, ys), class_weight=W, verbose=0)
print "Start Training..."
#set early stopping monitor so the model stops training when it won't improve anymore
early_stopping_monitor = EarlyStopping(patience=5)

model.fit(train_pool5_img, description_train_vecs, batch_size=64, nb_epoch=300, verbose=1, shuffle=True,
            validation_split=0.1,
            callbacks=[model_checkpoint, early_stopping_monitor])

preds = model.predict(test_pool5_img)

output_name = "./models/nn_pool5_to_word2vec_300.pkl"
pickle.dump(preds, open(output_name, 'wb'), protocol=2)
initial_lrate=1e-1

def time_decay(EPOCHS, initial_lrate):
    decay_rate = 0.1
    new_lrate = initial_lrate/(1+decay_rate*EPOCHS)
    return new_lrate


from keras.callbacks import LearningRateScheduler
lrate = LearningRateScheduler(time_decay,verbose=1)


# hyperparameter tuning

filepath=" DEC weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')

#early = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1e-4, patience=10)
reduce1 = keras.callbacks.ReduceLROnPlateau(monitor='val_accuracy', verbose=1,factor=0.9, patience=2,min_lr=1e-5 )


callbacks_list = [checkpoint,reduce1]

#callbacks_list = [checkpoint,reduce1,  lrate]
#callbacks_list = [checkpoint, reduce1,clr]

#callbacks_list = [checkpoint,clr]
#callbacks_list = [checkpoint,sched]


#callbacks_list = [checkpoint,lrate]
示例#8
0
    def train(self, model):
        datagen = ImageDataGenerator(rescale=(1. / 255))

        val_datagen = ImageDataGenerator(rescale=(1. / 255))
        os.system('gsutil -m cp -r ' + self.image_path + '/Train .')
        os.system('gsutil -m cp -r ' + self.image_path + '/Validation .')

        batch_size = 32

        def batch_generator(batch_size):
            for batch in datagen.flow_from_directory("Train",
                                                     target_size=(image_size,
                                                                  image_size),
                                                     class_mode="input",
                                                     batch_size=batch_size):
                lab = color.rgb2lab(batch[0])
                X = preprocess(lab)
                Y = lab[:, :, :, 1:] / 128
                yield ([X, Y])

        def val_batch_generator(batch_size):
            for batch in val_datagen.flow_from_directory(
                    "Validation",
                    target_size=(image_size, image_size),
                    class_mode="input",
                    batch_size=batch_size):
                lab = color.rgb2lab(batch[0])
                X = preprocess(lab)
                Y = lab[:, :, :, 1:] / 128
                yield ([X, Y])

        model.summary()

        class WeightsSaver(Callback):
            def __init__(self, N, output_path):
                self.N = N
                self.output_path = output_path
                self.batch = 0

            def on_batch_end(self, batch, logs={}):
                if self.batch % self.N == 0:
                    name = 'currentWeights.h5'
                    self.model.save_weights(name)
                    try:
                        os.system('gsutil cp ' + name + ' ' + self.output_path)
                    except:
                        print("Could not upload current weights")
                self.batch += 1

        checkpoint = ModelCheckpoint("best.hdf5",
                                     monitor="accuracy",
                                     verbose=1,
                                     save_best_only=True,
                                     mode="max")

        every_20_batches = WeightsSaver(20, self.output_path)

        every_10 = ModelCheckpoint("latest.hdf5",
                                   monitor="accuracy",
                                   verbose=1,
                                   save_best_only=False,
                                   mode='auto',
                                   period=1)

        tensorboard = TensorBoard(log_dir=".")
        callbacks = [tensorboard, checkpoint, every_10, every_20_batches]
        model.compile(loss='mean_squared_error',
                      optimizer="adam",
                      metrics=['accuracy'])

        model.fit_generator(batch_generator(batch_size),
                            callbacks=callbacks,
                            epochs=3,
                            steps_per_epoch=4040,
                            validation_data=val_batch_generator(batch_size),
                            validation_steps=157)  #5132 steps per epoch

        # outputDate = now.strftime("%Y-%m-%d %Hh%Mm")
        # os.chdir("output")
        # os.mkdir(outputDate)
        # os.chdir(outputDate)
        try:
            model.save_weights("model_weights.h5")
            model.save("model.h5")
        # else:
        #     model.load_weights("/floyd/input/model/my_model_weights.h5")
        except:
            print("Could not save")

        os.system('gsutil cp model_weights.h5 ' + self.output_path)
        os.system('gsutil cp model.h5 ' + self.output_path)
        os.system('gsutil cp best.hdf5 ' + self.output_path)
        os.system('gsutil cp latest.h5 ' + self.output_path)
示例#9
0
                  output_mode='error', return_sequences=True)

inputs = Input(shape=(nt,) + input_shape)
print (nt,) + input_shape
errors = prednet(inputs)  # errors will be (batch_size, nt, nb_layers)
errors_by_time = TimeDistributed(Dense(1, weights=[layer_loss_weights, np.zeros(1)], trainable=False), trainable=False)(
    errors)  # calculate weighted error by layer
errors_by_time = Flatten()(errors_by_time)  # will be (batch_size, nt)
final_errors = Dense(1, weights=[time_loss_weights, np.zeros(1)], trainable=False)(
    errors_by_time)  # weight errors by time
model = Model(input=inputs, output=final_errors)
model.compile(loss='mean_absolute_error', optimizer='adam')

train_generator = RadarGenerator(train_file, nt, batch_size=batch_size, shuffle=True)
val_generator = RadarGenerator(val_file, nt, batch_size=batch_size, N_seq=N_seq_val)

lr_schedule = lambda \
        epoch: 0.001 if epoch < 75 else 0.0001  # start with lr of 0.001 and then drop to 0.0001 after 75 epochs
callbacks = [LearningRateScheduler(lr_schedule)]
if save_model:
    if not os.path.exists(WEIGHTS_DIR): os.mkdir(WEIGHTS_DIR)
    callbacks.append(ModelCheckpoint(filepath=weights_file, monitor='val_loss', save_best_only=True))

history = model.fit_generator(train_generator, samples_per_epoch, nb_epoch, callbacks=callbacks,
                              validation_data=val_generator, nb_val_samples=N_seq_val)

if save_model:
    json_string = model.to_json()
    with open(json_file, "w") as f:
        f.write(json_string)
up9 = Conv3DTranspose(32, (2, 2, 2), strides=(2, 2, 2), padding='same')(conv8)
conv9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(up9)
conv9 = Conv3D(32, (3, 3, 3), activation='relu', padding='same')(conv9)
conv10 = Conv3D(1, (1, 1, 1), activation='sigmoid')(conv9)
model = Model(inputs=[I1], outputs=[conv10, fc3])

# Training
model.compile(optimizer=Adam(lr=1e-4),
              loss=[dice_loss, ent_loss],
              loss_weights=[1, 0.3],
              metrics=[dice, 'accuracy'])

filepath = "/home/shas/projects/rrg-hamarneh/sponsored/shas/model11/weights-improvement-{epoch:02d}.hdf5"
my_checkpoint = ModelCheckpoint(filepath,
                                monitor='val_dice',
                                save_weights_only=True,
                                period=10,
                                verbose=1)

tbCallBack = TensorBoard(log_dir='./logs',
                         histogram_freq=0,
                         write_graph=True,
                         write_images=True)

# generating training and validation data
training_generator = generator10(h5path='/dev/shm/shas/SFU_scaled',
                                 indices=index_tr,
                                 batchSize=15,
                                 imagesize=64,
                                 channel=4,
                                 number_class=2)
示例#11
0
def make_checkpoint(filepath, monitor='val_mean_squared_error'):
    ckp = ModelCheckpoint(filepath, monitor=monitor, save_best_only=True)
    return ckp
示例#12
0
validation_datas = validation_datas.reshape(nb_validation_samples, input_step_size,1)

# print validation_datas.shape, training_datas.shape, validation_labels.shape


# In[17]:


model = Sequential()
model.add(LSTM(10, input_shape=(input_step_size,1),return_sequences=False))
model.add(Dropout(0.2))
model.add(Dense(output_size))
model.add(Activation('sigmoid'))
model.summary()
# Adam = optimizers.Adam(lr=0.0001)
model.compile(loss='mse', optimizer='adam')
model.fit(training_datas, training_labels, batch_size=batch_size,validation_data=(validation_datas,validation_labels), epochs = epochs, callbacks=[CSVLogger('1layer.csv', append=True), ModelCheckpoint('weights/weights-improvement-{epoch:02d}-{val_loss:.5f}.hdf5', monitor='val_loss', verbose=1,mode='min')])
model.save('1layer.h5')

# In[ ]:





# In[ ]:




示例#13
0
train_samples, validation_samples = train_test_split(samples, test_size=0.2)

print('Train samples: {}'.format(len(train_samples)))
print('Validation samples: {}'.format(len(validation_samples)))

train_generator = BatchGenerator(train_samples, batch_size=256)
validation_generator = BatchGenerator(validation_samples, batch_size=256)
batch_size = 256
epochs = 5
verbose = 1
# Model creation
model = nVidiaModel()

checkpoint = ModelCheckpoint('TrainingCheckpoints/model-{epoch:03d}.h5',
                                 monitor='val_loss',
                                 verbose=0,
                                 save_best_only=True,
                                 mode='auto')

# Compiling and training the model
print('Compiling model...')
model.compile(loss='mean_squared_error', optimizer=Adam(lr=0.0001))


print('Training model...')
history_object = model.fit_generator(train_generator,
                                     steps_per_epoch=len(train_samples)/batch_size,
                                     validation_data=validation_generator,
                                     validation_steps=len(validation_samples)/batch_size,
                                     callbacks=[checkpoint],
                                     epochs=epochs,
示例#14
0
model.add(Activation('relu'))

model.add(Convolution2D(32,3,3, init='he_normal'))
model.add(BatchNormalization(mode=0, axis=1))
model.add(Activation('relu'))

model.add(Flatten())

model.add(BatchNormalization())
model.add(Dense(1,init='glorot_normal'))

model.add(Activation('sigmoid'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', class_mode='binary')
model.summary()

file_name = 'network_B_1'
saveWeigts = ModelCheckpoint(file_name+'_best_weights.h5', monitor='val_acc', verbose=1, save_best_only=True)

cllbcks= [saveWeigts]



for mini_epoch in range(1000):
	print "Epoch ", mini_epoch

	X_train, Y_train = train_batches.next()

	model.fit(X_train, Y_train, batch_size=64, nb_epoch=1, callbacks = cllbcks,
			validation_data=(X_valid,Y_valid),show_accuracy=True, verbose=1)

示例#15
0
print('Descriptions: test=%d' % len(test_descriptions))
# photo features
test_features = LoadPhotoFeatures(
    '/project/ProjectData/Imagefeatures_backup.pkl', test)
print('Photos: test=%d' % len(test_features))
# prepare sequences
X1test, X2test, ytest = CreateSequence(tokenizer, maxLength, test_descriptions,
                                       test_features, vocabSize)

# define the model
model = DefineModel(vocabSize, maxLength)
# define checkpoint callback
filename = 'model-ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5'
checkpoint = ModelCheckpoint(filename,
                             monitor='val_loss',
                             verbose=1,
                             save_best_only=True,
                             mode='min')
# fit model
start = time.time()
hist = model.fit([X1train, X2train],
                 ytrain,
                 epochs=10,
                 verbose=2,
                 callbacks=[checkpoint],
                 validation_data=([X1test, X2test], ytest))
end = time.time()
print("TIME TOOK {:3.2f}MIN".format((end - start) / 60))

#Plot Graph
def train(model_name,
          fold_count,
          train_full_set=False,
          load_weights_path=None,
          ndsb3_holdout=0,
          manual_labels=True,
          local_patient_set=False):
    batch_size = 16
    train_files, holdout_files = get_train_holdout_files(
        train_percentage=80,
        ndsb3_holdout=ndsb3_holdout,
        manual_labels=manual_labels,
        full_luna_set=train_full_set,
        fold_count=fold_count,
        local_patient_set=local_patient_set)

    # train_files = train_files[:100]
    # holdout_files = train_files[:10]
    train_gen = data_generator(batch_size, train_files, True)
    holdout_gen = data_generator(batch_size, holdout_files, False)
    for i in range(0, 10):
        tmp = next(holdout_gen)
        cube_img = tmp[0][0].reshape(CUBE_SIZE, CUBE_SIZE, CUBE_SIZE, 1)
        cube_img = cube_img[:, :, :, 0]
        cube_img *= 255.
        cube_img += MEAN_PIXEL_VALUE
        # helpers.save_cube_img("c:/tmp/img_" + str(i) + ".png", cube_img, 4, 8)
        # logger.info(tmp)

    #input("Enter any key to continue...")
    history = LossHistory()
    logcallback = LoggingCallback(logger.info)

    learnrate_scheduler = LearningRateScheduler(step_decay)
    model = get_net(load_weight_path=load_weights_path)

    # Tensorboard setting
    if not os.path.exists(TENSORBOARD_LOG_DIR):
        os.makedirs(TENSORBOARD_LOG_DIR)

    tensorboard_callback = TensorBoard(
        log_dir=TENSORBOARD_LOG_DIR,
        histogram_freq=2,
        # write_images=True, # Enabling this line would require more than 5 GB at each `histogram_freq` epoch.
        write_graph=True
        # embeddings_freq=3,
        # embeddings_layer_names=list(embeddings_metadata.keys()),
        # embeddings_metadata=embeddings_metadata
    )
    tensorboard_callback.set_model(model)

    holdout_txt = "_h" + str(ndsb3_holdout) if manual_labels else ""
    if train_full_set:
        holdout_txt = "_fs" + holdout_txt
    checkpoint = ModelCheckpoint(settings.WORKING_DIR + "workdir/model_" +
                                 model_name + "_" + holdout_txt + "_e" +
                                 "{epoch:02d}-{val_loss:.4f}.hd5",
                                 monitor='val_loss',
                                 verbose=1,
                                 save_best_only=not train_full_set,
                                 save_weights_only=False,
                                 mode='auto',
                                 period=1)
    checkpoint_fixed_name = ModelCheckpoint(
        settings.WORKING_DIR + "workdir/model_" + model_name + "_" +
        holdout_txt + "_best.hd5",
        monitor='val_loss',
        verbose=1,
        save_best_only=True,
        save_weights_only=False,
        mode='auto',
        period=1)
    # train_history = model.fit_generator(train_gen, len(train_files) / 1, 12, validation_data=holdout_gen, nb_val_samples=len(holdout_files) / 1, callbacks=[checkpoint, checkpoint_fixed_name, learnrate_scheduler])
    train_history = model.fit_generator(
        train_gen,
        len(train_files) / batch_size,
        1,
        validation_data=holdout_gen,
        validation_steps=len(holdout_files) / batch_size,
        callbacks=[
            logcallback, tensorboard_callback, checkpoint_fixed_name,
            learnrate_scheduler
        ])
    logger.info("Model fit_generator finished.")
    model.save(settings.WORKING_DIR + "workdir/model_" + model_name + "_" +
               holdout_txt + "_end.hd5")

    logger.info("history keys: {0}".format(train_history.history.keys()))

    # numpy_loss_history = numpy.array(history.history)
    # numpy.savetxt("workdir/model_" + model_name + "_" + holdout_txt + "_loss_history.txt", numpy_loss_history, delimiter=",")
    pandas.DataFrame(
        train_history.history).to_csv(settings.WORKING_DIR + "workdir/model_" +
                                      model_name + "_" + holdout_txt +
                                      "history.csv")
示例#17
0
activation3 = 'softmax'
activation4 ='elu'
# model.add(Dense(1024, activation=activation, input_shape=(n_cols,)))
model.add(Dense(256, activation=activation4, input_shape=(n_cols,)))  # best so far
# model.add(Dense(128, activation=activation))
# model.add(Dense(32, activation=activation))
# model.add(Dense(64, activation=activation))
model.add(Dense(16, activation=activation4))  #
model.add(Dense(4, activation=activation3))
#
#                       'adam' 'sgd' optimizer
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])

filepath = "keras_model_weights\\weights_best_classification.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy',  # 'accuracy'
                             verbose=1,
                             save_best_only=True,
                             mode='max')    # min for loss functions, max for optimizations

early_stopping_monitor = EarlyStopping(patience=500)  # runs without improvement before stopping
callbacks_list = [checkpoint, early_stopping_monitor]  # list of tasks to do before next run
history = model.fit(features, target,
                    validation_split=.3,        # split data into .7 train, .3 test each run
                    epochs=2500,                # do this many runs unless stop early
                    callbacks=callbacks_list)   # after each run do this list of things, defined above


# ############################## Predictions vs Actual #######################################

best = pd.Series(history.history['val_accuracy']).idxmax()
print(np.array(history.history['val_accuracy']).max())
    def before_train(self):
        logger.info(f"args: {self.args}")
        logger.info(f"config:\n{self.config}")

        # compile model
        model = self.config.create_model(weight=self.args.weights)
        model_sum = []
        model.summary(print_fn=lambda l: model_sum.append(l))
        logger.info("Model summary:\n {}".format("\n".join(model_sum)))

        optimizer = self.config.get_optimizer()
        loss = self.config.get_loss()
        metrics = self.config.get_metrics()
        model.compile(optimizer, loss, metrics)

        lr_scheduler = self.config.get_lr_scheduler()
        if lr_scheduler is not None:
            self.callbacks.append(lr_scheduler)

        # save weights
        acc_name = [acc for acc in model.metrics_names if "acc" in acc]
        if len(acc_name):
            acc_name = acc_name[0]
            val_acc_name = "val_" + acc_name
            weight_path = os.path.join(self.record_dir, "Epoch_{epoch:04d}_{%s:.3f}_{%s:.3f}.h5" % (acc_name, val_acc_name))
            monitor = val_acc_name
            monitor_mode = "max"
        else:
            weight_path = os.path.join(self.record_dir, "Epoch_{epoch:04d}_{loss:.3f}_{val_loss:.3f}.h5")
            monitor = "val_loss"
            monitor_mode = "min"
        logger.info(f"Weight monitor: {monitor} Monitor mode: {monitor_mode}")
        best_weight_path = os.path.join(self.record_dir, "best_ckpt.h5")
        cb_save_each_weight = ModelCheckpoint(weight_path, monitor, save_weights_only=True)
        cb_save_best_weight = ModelCheckpoint(best_weight_path, monitor, save_best_only=True, mode=monitor_mode, save_weights_only=True)
        self.callbacks += [
            cb_save_each_weight,
            cb_save_best_weight
        ]

        # monitor training
        log_path = os.path.join(self.record_dir, "train_log.csv")
        cb_csv_log = CSVLogger(log_path, append=bool(self.ini_epoch))
        cb_tensorboard = TensorBoard(self.record_dir, write_graph=False)
        self.callbacks += [
            cb_csv_log,
            cb_tensorboard
        ]

        if self.ini_epoch != 0:
            logger.info(f"resume train from {self.ini_epoch}")

        # dataset init
        self.train_loader = self.config.get_train_loader()
        self.train_loader.batch_size = self.batch_size
        self.val_loader = self.config.get_validate_loader()
        self.val_loader.batch_size = self.batch_size

        self.model = model

        logger.info("Start training...")
示例#19
0
文件: train.py 项目: yanmeen/afnn
# define loss


if __name__ == '__main__':
    # model selection
    AF_model = AFNN(filters=64, image_channels=1, use_bnorm=True)
    AF_model.summary()

    # load the last model in matconvnet style
    initial_epoch = find_LastCheckpoint(model_dir=save_dir)
    if initial_epoch > 0:
        print('resuming by loading epoch %03d' % initial_epoch)
        AF_model = load_model(os.path.join(
            save_dir, 'model_%03d.hdf5' % initial_epoch), compile=False)

    # compile the model
    AF_model.compile(optimizer=Adam(0.001), loss=losses.mean_squared_error)

    # use call back functions
    check_pointer = ModelCheckpoint(os.path.join(save_dir, 'model_{epoch:03d}.hdf5'),
                                    verbose=1, save_weights_only=False, period=args.save_step)
    csv_logger = CSVLogger(os.path.join(
        save_dir, 'log.csv'), append=True, separator=',')
    lr_scheduler = LearningRateScheduler(lr_schedule)

    history = AF_model.fit_generator(train_datagen(batch_size=args.batch_size),
                                     steps_per_epoch=200, epochs=args.epoch, verbose=1,
                                     initial_epoch=initial_epoch,
                                     callbacks=[check_pointer, csv_logger, lr_scheduler])
示例#20
0
#################################

# print ('vocabulary', vocabulary, 'hidden_size', hidden_size, 'num_steps', num_steps)

optimizer = Adam()
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['categorical_accuracy'])

outputs = [layer.output for layer in model.layers]
for out in outputs:
    print(out)
print(model.summary())
assert (False)

checkpointer = ModelCheckpoint(filepath=data_path + '/model-{epoch:02d}.hdf5',
                               verbose=1)
num_epochs = 50
if args.run_opt == 1:
    model.fit_generator(train_data_generator.generate(),
                        len(train_data) // (batch_size * num_steps),
                        num_epochs,
                        validation_data=valid_data_generator.generate(),
                        validation_steps=len(valid_data) //
                        (batch_size * num_steps),
                        callbacks=[checkpointer])
    # model.fit_generator(train_data_generator.generate(), 2000, num_epochs,
    #                     validation_data=valid_data_generator.generate(),
    #                     validation_steps=10)
    model.save(data_path + "final_model.hdf5")
elif args.run_opt == 2:
    model = load_model(data_path + "\model-40.hdf5")
示例#21
0
                    name='dense_final')(dropout_4)
dropout_5 = Dropout(drop)(dense_final)
softmax_final = Dense(units=len(categories),
                      activation='softmax',
                      name='softmax_final')(dropout_5)
# Compile model
model = Model(inputs=inputs, outputs=softmax_final)
model.compile(optimizer=optimizer,
              loss='categorical_crossentropy',
              metrics=['accuracy'])
# Save checkpoints?
if save_checkpoints:
    filepath = path_to_dir + "weights-improvement-{epoch:02d}-{val_acc:.4f}.hdf5"  # https://machinelearningmastery.com/check-point-deep-learning-models-keras/
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='val_acc',
                                 verbose=verbose,
                                 save_best_only=True,
                                 mode='auto')
print("Training Model...")
if save_checkpoints:
    history = model.fit(Xtrain_encoded,
                        Ytrain_encoded,
                        batch_size=batch_size,
                        epochs=epochs,
                        verbose=verbose,
                        callbacks=[checkpoint],
                        validation_data=(Xtest_encoded,
                                         Ytest_encoded))  # starts training
else:
    history = model.fit(Xtrain_encoded,
                        Ytrain_encoded,
示例#22
0
dense_output2 = dense(dense_output1)
logistic_regression_output = Dense(1, activation='sigmoid',
                                   name='main_output')(dense_output2)

model = Model(inputs=[main_input_1, main_input_2],
              outputs=[logistic_regression_output])

model.compile(optimizer='RMSprop',
              loss={'main_output': 'mean_squared_error'},
              metrics=['mse', 'mae', 'mape', 'cosine'])

tensorboard = TensorBoard(log_dir="logs/".format(time()))

checkpoint = ModelCheckpoint("checkpoint",
                             monitor='val_acc',
                             verbose=1,
                             mode='max')

model.fit({
    'main_input_1': input1,
    'main_input_2': input2
}, {'main_output': main_output},
          epochs=epochs,
          verbose=1,
          batch_size=batch_size,
          callbacks=[tensorboard, checkpoint],
          validation_split=0.2)

print("-----Training Completed-----\n")

model_json = model.to_json()
示例#23
0
def train():
    print('-' * 30)
    print('Loading and preprocessing train data...')
    print('-' * 30)

    # mean = np.mean(imgs_train)  # mean for data centering
    # std = np.std(imgs_train)  # std for data normalization
    #
    # imgs_train -= mean
    # imgs_train /= std

    # imgs_train = imgs_train.astype(np.uint8)

    imgs_train, imgs_mask_train = load_train_data()
    imgs_mask_train = imgs_mask_train.astype('float32')
    imgs_train = imgs_train.astype('float32')
    imgs_mask_train /= 255.  # scale masks to [0, 1]
    imgs_train /= 255.  # scale masks to [0, 1]

    #print(imgs_mask_train[10, 11, 100, :, 0])
    #print(imgs_train[10, 11, 100, :, 0])

    # imgs_mask_train = imgs_mask_train.astype(np.uint8)

    # np.set_printoptions(threshold=np.nan)
    # print('-'*30)
    # print(imgs_train[0][30])

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = get_unet()
    weight_dir = 'weights'
    if not os.path.exists(weight_dir):
        os.mkdir(weight_dir)
    model_checkpoint = ModelCheckpoint(os.path.join(weight_dir,
                                                    project_name + '.h5'),
                                       monitor='val_loss',
                                       save_best_only=True)

    log_dir = 'logs'
    if not os.path.exists(log_dir):
        os.mkdir(log_dir)
    csv_logger = CSVLogger(os.path.join(log_dir, project_name + '.txt'),
                           separator=',',
                           append=False)

    print('-' * 30)
    print('Fitting model...')
    print('-' * 30)
    # model.fit(imgs_train, imgs_mask_train, batch_size=32, epochs=20, verbose=1, shuffle=True,
    #           validation_split=0.15,
    #           callbacks=[model_checkpoint])

    model.fit(imgs_train,
              imgs_mask_train,
              batch_size=1,
              epochs=50,
              verbose=1,
              shuffle=True,
              validation_split=0.10,
              callbacks=[model_checkpoint, csv_logger])

    print('-' * 30)
    print('Training finished')
    print('-' * 30)

batch_size = 32
epochs = 16

X_tra, X_val, y_tra, y_val = train_test_split(
    x_train, y_train, train_size=0.95, random_state=233)
RocAuc = RocAucEvaluation(validation_data=(X_val, y_val), interval=1)

early_stopping = EarlyStopping(monitor='val_loss',
                               min_delta=0,
                               patience=2,
                               verbose=0, mode='auto')

bst_model_path = OUTPUT_DIR + MODEL_STAMP + '.h5'
model_checkpoint = ModelCheckpoint(
    bst_model_path, save_best_only=True, save_weights_only=True)

hist = model.fit(X_tra, y_tra, batch_size=batch_size, epochs=epochs,
                 validation_data=(X_val, y_val),
                 callbacks=[early_stopping, model_checkpoint, RocAuc],
                 verbose=2)
bst_val_score = min(hist.history['val_loss'])

model.load_weights(bst_model_path)

y_pred = model.predict(x_test, batch_size=1024)
submission[["toxic", "severe_toxic", "obscene",
            "threat", "insult", "identity_hate"]] = y_pred
submission.to_csv(OUTPUT_DIR+'submission_%.4f_gru_glove.csv' %
                  (bst_val_score), index=False)
def train(data_type, seq_length, model, saved_model=None,
          concat=False, class_limit=None, image_shape=None,
          load_to_memory=False):
    # Set variables.
    nb_epoch = 1000
    batch_size = 32

    # Helper: Save the model.
    checkpointer = ModelCheckpoint(
        filepath='./data/checkpoints/' + model + '-' + data_type + \
            '.{epoch:03d}-{val_loss:.3f}.hdf5',
        verbose=1,
        save_best_only=True)

    # Helper: TensorBoard
    tb = TensorBoard(log_dir='./data/logs')

    # Helper: Stop when we stop learning.
    early_stopper = EarlyStopping(patience=10)

    # Helper: Save results.
    timestamp = time.time()
    csv_logger = CSVLogger('./data/logs/' + model + '-' + 'training-' + \
        str(timestamp) + '.log')

    # Get the data and process it.
    if image_shape is None:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit
        )
    else:
        data = DataSet(
            seq_length=seq_length,
            class_limit=class_limit,
            image_shape=image_shape
        )

    # Get samples per epoch.
    # Multiply by 0.7 to attempt to guess how much of data.data is the train set.
    samples_per_epoch = ((len(data.data) * 0.7) // batch_size) * batch_size

    if load_to_memory:
        # Get data.
        X, y = data.get_all_sequences_in_memory(batch_size, 'train', data_type, concat)
        X_test, y_test = data.get_all_sequences_in_memory(batch_size, 'test', data_type, concat)
    else:
        # Get generators.
        generator = data.frame_generator(batch_size, 'train', data_type, concat)
        val_generator = data.frame_generator(batch_size, 'test', data_type, concat)

    # Get the model.
    rm = ResearchModels(len(data.classes), model, seq_length, saved_model)

    # Fit!
    if load_to_memory:
        # Use standard fit.
        rm.model.fit(
            X,
            y,
            batch_size=batch_size,
            validation_data=(X_test, y_test),
            verbose=1,
            callbacks=[checkpointer, tb, early_stopper, csv_logger],
            nb_epoch=nb_epoch,
            samples_per_epoch=samples_per_epoch)
    else:
        # Use fit generator.
        rm.model.fit_generator(
            generator=generator,
            samples_per_epoch=samples_per_epoch,
            nb_epoch=nb_epoch,
            verbose=1,
            callbacks=[checkpointer, tb, early_stopper, csv_logger],
            validation_data=val_generator,
            nb_val_samples=256)
示例#26
0
文件: ESRGAN.py 项目: dkdldspd/Study
    def train_generator(self,
        epochs, batch_size,
        workers=1,
        dataname='doctor',
        datapath_train='./images/train_dir',
        datapath_validation='./images/val_dir',
        datapath_test='./images/val_dir',
        steps_per_epoch=1000,
        steps_per_validation=1000,
        crops_per_image=2,
        log_weight_path='./data/weights/',
        log_tensorboard_path='./data/logs/',
        log_tensorboard_name='SR-RRDB-D',
        log_tensorboard_update_freq=1,
        log_test_path="./images/samples-d/"
        ):
        """Trains the generator part of the network with MSE loss"""

        # Create data loaders
        train_loader = DataLoader(
            datapath_train, batch_size,
            self.height_hr, self.width_hr,
            self.upscaling_factor,
            crops_per_image
        )
        test_loader = None
        if datapath_validation is not None:
            test_loader = DataLoader(
                datapath_validation, batch_size,
                self.height_hr, self.width_hr,
                self.upscaling_factor,
                crops_per_image
            )   
        
        self.gen_lr = 3.2e-5
        for step in range(epochs // 10):
            self.compile_generator(self.generator)
            # Callback: tensorboard
            callbacks = []
            if log_tensorboard_path:
                tensorboard = TensorBoard(
                    log_dir=os.path.join(log_tensorboard_path, log_tensorboard_name),
                    histogram_freq=0,
                    batch_size=batch_size,
                    write_graph=False,
                    write_grads=False,
                    update_freq=log_tensorboard_update_freq
                )
                callbacks.append(tensorboard)
            else:
                print(">> Not logging to tensorboard since no log_tensorboard_path is set")

            # Callback: save weights after each epoch
            modelcheckpoint = ModelCheckpoint(
                os.path.join(log_weight_path, dataname + '_{}X.h5'.format(self.upscaling_factor)),
                monitor='PSNR',
                save_best_only=True,
                save_weights_only=True
            )
            callbacks.append(modelcheckpoint)

            # Callback: test images plotting
            if datapath_test is not None:
                testplotting = LambdaCallback(
                    on_epoch_end=lambda epoch, logs: plot_test_images(
                        self,
                        test_loader,
                        datapath_test,
                        log_test_path,
                        epoch + step * 10,
                        name='RRDB-D'
                    )
                )
                callbacks.append(testplotting)

            # Fit the model
            self.generator.fit_generator(
                train_loader,
                steps_per_epoch=steps_per_epoch,
                epochs=10,
                validation_data=test_loader,
                validation_steps=steps_per_validation,
                callbacks=callbacks,
                use_multiprocessing=workers > 1,
                workers=workers
            )
            self.generator.save('./data/weights/Doctor_gan(Step %dK).h5' % (step * 10 + 10))
            self.gen_lr /= 1.149
            print(step, self.gen_lr)
示例#27
0
y_train = label[:-nb_validation_samples]
x_test = data[-nb_validation_samples:]
y_test = label[-nb_validation_samples:]
"""
文本数据表示完成时间
"""
mid_time = time.time()
print("初始时间=:", mid_time - start_time)
"""
模型的建立
"""
print('Build model...')
earlyStopping = EarlyStopping(monitor='val_loss', patience=1, verbose=0)
saveBestModel = ModelCheckpoint(save_best_model_file,
                                monitor='val_loss',
                                verbose=0,
                                save_best_only=True,
                                save_weights_only=True)

embeddings_layer = Embedding(len(word_index) + 1,
                             embedding_dim,
                             input_length=max_sequence_length,
                             trainable=True)

inputs = Input(shape=(max_sequence_length, ), dtype='int32', name='input')
embeddings_sequences = embeddings_layer(inputs)
output = LSTM(lstm_output_size, dropout=0.65,
              recurrent_dropout=0.0)(embeddings_sequences)
# output=Dense(64,activation='relu',name='dense1')(output)

print(output)
示例#28
0
                    self.current_idx = 0
                inn, out, _ = self.inn[self.current_idx]
                self.current_idx += 1
            tmp_x = np.swapaxes(np.array([n_str_to_int_list(inn, max_length)], dtype=int),1,2)
            tmp_y = np.array([str_to_int_list(out, max_length)], dtype=int).reshape((1, -1))
            #print(tmp_x.shape, tmp_y.shape)
            
            yield tmp_x, to_categorical(tmp_y, num_classes=max_output)


train_data_generator = KerasBatchGenerator(args.train_data_num, vocab)
valid_data_generator = KerasBatchGenerator(0, vocab)


print("starting")
checkpointer = ModelCheckpoint(filepath='checkpoints/model-{epoch:02d}.hdf5', verbose=1)

num_epochs = args.epochs

history = model.fit_generator(train_data_generator.generate(), args.epoch_size, num_epochs, validation_data=valid_data_generator.generate(), validation_steps=args.epoch_size / 10, callbacks=[checkpointer])

model.save(args.final_name+'.hdf5')
print(history.history.keys())

def list_to_string(prediction):
    s=""
    for i in range(prediction.shape[0]):
        s += vocab_rev[np.argmax(prediction[i])]
    return s
    
    
示例#29
0

x_test = test_data.astype('float32') / 255.

y_test = test_label.astype('float32') / 255.
x_test = np.reshape(x_test, (len(x_test), 512, 512, 3))  # adapt this if using `channels_first` image data format
y_test = np.reshape(y_test, (len(y_test), 512, 512, 1))  # adapt this if using `channels_first` im


from  SD_Unet import *
model=SD_UNet(input_size=(512,512,3),start_neurons=16,keep_prob=1,block_size=1)
weight="Model/Luna/SD_UNet.h5"

if os.path.isfile(weight): model.load_weights(weight)

model_checkpoint = ModelCheckpoint(weight, monitor='val_acc', verbose=1, save_best_only=True)

y_pred = model.predict(x_test)
y_pred_threshold = []
i=0
for y in y_pred:

    _, temp = cv2.threshold(y, 0.5, 1, cv2.THRESH_BINARY)
    y_pred_threshold.append(temp)
    y = y * 255
    cv2.imwrite('./Luna/test/result/%d.png' % i, y)
    i+=1
y_test = list(np.ravel(y_test))
y_pred_threshold = list(np.ravel(y_pred_threshold))

tn, fp, fn, tp = confusion_matrix(y_test, y_pred_threshold).ravel()
示例#30
0
def _main():
    annotation_path = 'dataset/chanelset/train.txt'  # Update to your feeding list file
    log_dir = 'logs/001/'
    classes_path = 'model_data/yolo_classes.txt'
    anchors_path = 'model_data/yolo_anchors.txt'
    class_names = get_classes(classes_path)
    num_classes = len(class_names)
    anchors = get_anchors(anchors_path)

    input_shape = (608, 608)  # multiple of 32, hw

    is_tiny_version = len(anchors) == 6  # default setting
    if is_tiny_version:
        model = create_tiny_model(
            input_shape,
            anchors,
            num_classes,
            freeze_body=2,
            weights_path='model_data/tiny_yolo_weights.h5')
    else:
        model = create_model(input_shape,
                             anchors,
                             num_classes,
                             freeze_body=2,
                             weights_path='model_data/yolo_weights.h5'
                             )  # make sure you know what you freeze

    logging = TensorBoard(log_dir=log_dir)
    checkpoint = ModelCheckpoint(
        log_dir + 'ep{epoch:03d}-loss{loss:.3f}-val_loss{val_loss:.3f}.h5',
        monitor='val_loss',
        save_weights_only=True,
        save_best_only=True,
        period=3)
    reduce_lr = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.1,
                                  patience=3,
                                  verbose=1)
    early_stopping = EarlyStopping(monitor='val_loss',
                                   min_delta=0,
                                   patience=10,
                                   verbose=1)

    val_split = 0.1
    with open(annotation_path) as f:
        lines = f.readlines()
    np.random.seed(10101)
    np.random.shuffle(lines)
    np.random.seed(None)
    num_val = int(len(lines) * val_split)
    num_train = len(lines) - num_val

    # Train with frozen layers first, to get a stable loss.
    # Adjust num epochs to your dataset. This step is enough to obtain a not bad model.
    if True:
        model.compile(
            optimizer=Adam(lr=1e-3),
            loss={
                # use custom yolo_loss Lambda layer.
                'yolo_loss': lambda y_true, y_pred: y_pred
            })

        batch_size = 32
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(data_generator_wrapper(lines[:num_train],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
                            steps_per_epoch=max(1, num_train // batch_size),
                            validation_data=data_generator_wrapper(
                                lines[num_train:], batch_size, input_shape,
                                anchors, num_classes),
                            validation_steps=max(1, num_val // batch_size),
                            epochs=50,
                            initial_epoch=0,
                            callbacks=[logging, checkpoint])
        model.save_weights(log_dir + 'trained_weights_stage_1.h5')

    # Unfreeze and continue training, to fine-tune.
    # Train longer if the result is not good.
    if True:
        for i in range(len(model.layers)):
            model.layers[i].trainable = True
        model.compile(optimizer=Adam(lr=1e-4),
                      loss={
                          'yolo_loss': lambda y_true, y_pred: y_pred
                      })  # recompile to apply the change
        print('Unfreeze all of the layers.')

        batch_size = 32  # note that more GPU memory is required after unfreezing the body
        print('Train on {} samples, val on {} samples, with batch size {}.'.
              format(num_train, num_val, batch_size))
        model.fit_generator(
            data_generator_wrapper(lines[:num_train], batch_size, input_shape,
                                   anchors, num_classes),
            steps_per_epoch=max(1, num_train // batch_size),
            validation_data=data_generator_wrapper(lines[num_train:],
                                                   batch_size, input_shape,
                                                   anchors, num_classes),
            validation_steps=max(1, num_val // batch_size),
            epochs=50,
            initial_epoch=30,
            callbacks=[logging, checkpoint, reduce_lr, early_stopping])
        model.save_weights(log_dir + 'trained_weights_final.h5')
示例#31
0
    def fit(self, model, data, new_data):
        import time
        import keras
        import numpy
        from keras.callbacks import ModelCheckpoint, EarlyStopping
        from sklearn.model_selection import StratifiedKFold

        params = self.get_params()

        X = data['X']
        Y = data['Y']

        if model.get_name().lower() == 'vgg16':
            X = self.prepare_data(X, True)
        elif model.get_name().lower() == 'mobile_net' or model.get_name(
        ).lower() == 'resnet' or model.get_name().lower() == 'unet':
            X = self.prepare_data(X, False)

        if params['use_new_data']:
            new_X = new_data['X']
            new_Y = new_data['Y']
            if model.get_name().lower() == 'vgg16':
                new_X = self.prepare_data(new_X, True)
            elif model.get_name().lower() == 'mobile_net' or model.get_name(
            ).lower() == 'resnet' or model.get_name().lower() == 'unet':
                new_X = self.prepare_data(new_X, False)
            if (self.get_params()['problem_type'] == 'classification'):
                new_Y = keras.utils.to_categorical(new_Y,
                                                   num_classes=len(
                                                       numpy.unique(Y)))

        title = model.get_name() + '_' + time.strftime("%Y_%m_%d_%H_%M_%S")

        repetitions_scores = []
        repetitions_scores_new_data = []
        matrix_orig = []
        matrix_predicted = []
        for i in range(params['repetitions']):
            test_results = []
            test_orig = []

            test_results_new_data = []
            test_orig_new_data = []

            import random
            seed = random.randint(1, 1000)

            kfold = StratifiedKFold(n_splits=params['folds'],
                                    shuffle=True,
                                    random_state=seed)

            if self.get_params()['verbose']:
                print('Repetition: ' + str(i + 1) + '\n')

            count = 1

            cvscores = []
            cvscores_new_data = []
            for train, test in kfold.split(X, Y):

                architecture = model.get_model()

                if self.get_params()['verbose']:
                    print('Fold: ' + str(count))

                X_train, X_test = X[train], X[test]

                Y_train, Y_test = Y[train], Y[test]

                validation_percentage = params['validation_split']

                # Class weight if there are unbalanced classes
                from sklearn.utils import class_weight
                # class_weight = class_weight.compute_class_weight('balanced',numpy.unique(Y), Y)
                sample_weight = class_weight.compute_sample_weight(
                    class_weight='balanced', y=Y_train)

                if (
                        self.get_params()['problem_type'] == 'classification'
                ):  # and not(params['use_distillery'] and model.get_name().lower() != 'distillery_network'):
                    Y_train = keras.utils.to_categorical(Y_train,
                                                         num_classes=len(
                                                             numpy.unique(Y)))
                    Y_test = keras.utils.to_categorical(Y_test,
                                                        num_classes=len(
                                                            numpy.unique(Y)))

                callbacks_list = []
                callbacks_list.append(
                    ModelCheckpoint(title + "_kfold_weights_improvement.hdf5",
                                    monitor='val_loss',
                                    verbose=params['verbose'],
                                    save_best_only=True,
                                    mode='min'))
                callbacks_list.append(
                    EarlyStopping(monitor='val_loss',
                                  min_delta=params['min_delta'],
                                  patience=params['patience'],
                                  verbose=params['verbose'],
                                  mode='min'))

                history = LossHistory((X_train, Y_train))
                callbacks_list.append(history)

                # Fit the architecture
                architecture.fit(X_train,
                                 Y_train,
                                 epochs=params['epochs'],
                                 batch_size=params['batch'],
                                 validation_split=validation_percentage,
                                 callbacks=callbacks_list,
                                 verbose=params['verbose'],
                                 sample_weight=sample_weight)

                import matplotlib.pyplot as plt
                plt.plot(history.losses[:])
                plt.plot(history.accuracies[:])
                plt.title('model loss and acc')
                plt.ylabel('value')
                plt.xlabel('batch')
                plt.legend(['loss', 'acc'], loc='upper left')
                plt.show()

                # Data Augmentation
                if params['augmentation']:
                    from keras.preprocessing.image import ImageDataGenerator

                    # Initialize Generator
                    datagen = ImageDataGenerator(vertical_flip=True)

                    # Fit parameters from data
                    datagen.fit(X_train)

                    # Fit new data
                    architecture.fit_generator(
                        datagen.flow(X_train,
                                     Y_train,
                                     batch_size=params['batch']))

                model_json = architecture.to_json()
                if count == 1:
                    with open(title + '_model.json', "w") as json_file:
                        json_file.write(model_json)
                    architecture.save_weights(
                        title + '_weights.h5')  # TODO: Check what weights save
                    file = open(title + '_seed.txt', 'a+')
                    file.write('Repetition: ' + str(i + 1) + ' , seed: ' +
                               str(seed) + '\n')
                    file.close()

                # Evaluate the architecture
                print('Evaluation metrics\n')
                scores = architecture.evaluate(X_test,
                                               Y_test,
                                               verbose=params['verbose'])

                Y_predicted = numpy.argmax(architecture.predict(
                    X_test, batch_size=params['batch']),
                                           axis=1)
                test_results += Y_predicted.tolist()
                test_orig += numpy.argmax(Y_test, axis=1).tolist()

                cvscores.append(scores)

                if params['use_new_data']:
                    scores_new_data = architecture.evaluate(
                        new_X, new_Y, verbose=params['verbose'])
                    new_Y_predicted = numpy.argmax(architecture.predict(
                        new_X, batch_size=params['batch']),
                                                   axis=1)
                    test_results_new_data += new_Y_predicted.tolist()
                    test_orig_new_data += numpy.argmax(new_Y, axis=1).tolist()

                    cvscores_new_data.append(scores_new_data)

                matrix_orig.extend(test_orig)
                matrix_predicted.extend(Y_predicted)

                count += 1

            if params['problem_type'].lower() == 'classification':
                scores2 = self.classification_metrics(
                    numpy.array(test_results), test_orig, numpy.unique(Y))
            else:
                scores2 = self.regression_metrics(numpy.array(test_results),
                                                  test_orig)

            import csv

            with open(title + '_test_output.csv', 'w+') as file:
                wr = csv.writer(file)
                wr.writerow(test_results)
            file.close()

            repetitions_scores.append(
                numpy.mean(cvscores, axis=0).tolist() + scores2)

            if params['use_new_data']:
                if params['problem_type'].lower() == 'classification':
                    scores2_new_data = self.classification_metrics(
                        numpy.array(test_results_new_data), test_orig_new_data,
                        numpy.unique(Y))
                else:
                    scores2_new_data = self.regression_metrics(
                        numpy.array(test_results_new_data), test_orig_new_data)

                repetitions_scores_new_data.append(
                    numpy.mean(cvscores_new_data, axis=0).tolist() +
                    scores2_new_data)

        global_score_mean = numpy.mean(repetitions_scores, axis=0)
        global_score_std = numpy.std(repetitions_scores, axis=0)

        if params['problem_type'].lower() == 'classification':
            architecture.metrics_names += [
                "sklearn_acc", "tn", "fp", "fn", "tp", "precision", "recall",
                "specificity", "f1", "auc_roc", "k"
            ]
        else:
            architecture.metrics_names += ["mae", "r2"]

        file = open(title + '_results.txt', 'w')
        for count in range(len(architecture.metrics_names)):
            file.write(
                str(architecture.metrics_names[count]) + ": " +
                str(numpy.around(global_score_mean[count], decimals=4)) +
                chr(177) +
                str(numpy.around(global_score_std[count], decimals=4)) + '\n')
        file.close()

        matrix = self.compute_confusion_matrix(matrix_orig, matrix_predicted)
        numpy.savetxt(title + '_total_confusion_matrix.txt',
                      matrix,
                      fmt='% 4d')
        if params['problem_type'].lower() == 'classification':
            total_metrics_names = [
                "acc", "tn", "fp", "fn", "tp", "recall", "specificity",
                "precision", "f1", "Negative predictive value",
                "False positive rate", "False negative rate",
                "False discovery rate"
            ]
            total_metrics_scores = self.compute_total_classification_metrics(
                matrix)
            file = open(title + '_total_results.txt', 'w')
            for i in range(len(total_metrics_names)):
                file.write(
                    str(total_metrics_names[i]) + ": " +
                    str(numpy.around(total_metrics_scores[i], decimals=4)) +
                    '\n')
            file.close()

        if params['use_new_data']:
            global_score_mean_new_data = numpy.mean(
                repetitions_scores_new_data, axis=0)
            global_score_std_new_data = numpy.std(repetitions_scores_new_data,
                                                  axis=0)

            file = open(title + '_results_new_data.txt', 'w')
            for count in range(len(architecture.metrics_names)):
                file.write(
                    str(architecture.metrics_names[count]) + ": " + str(
                        numpy.around(global_score_mean_new_data[count],
                                     decimals=4)) + chr(177) +
                    str(
                        numpy.around(global_score_std_new_data[count],
                                     decimals=4)) + '\n')
            file.close()
seg_model.summary()
import keras.backend as K
from keras.optimizers import Adam
from keras.losses import binary_crossentropy

## intersection over union
def IoU(y_true, y_pred, eps=1e-6):
    if np.max(y_true) == 0.0:
        return IoU(1-y_true, 1-y_pred) ## empty image; calc IoU of zeros
    intersection = K.sum(y_true * y_pred, axis=[1,2,3])
    union = K.sum(y_true, axis=[1,2,3]) + K.sum(y_pred, axis=[1,2,3]) - intersection
    return -K.mean( (intersection + eps) / (union + eps), axis=0)
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, EarlyStopping, ReduceLROnPlateau
weight_path="{}_weights.best.hdf5".format('seg_model')

checkpoint = ModelCheckpoint(weight_path, monitor='val_loss', verbose=1, save_best_only=True, mode='min', save_weights_only=True)

reduceLROnPlat = ReduceLROnPlateau(monitor='val_loss', factor=0.33,
                                   patience=1, verbose=1, mode='min',
                                   min_delta=0.0001, cooldown=0, min_lr=1e-8)

early = EarlyStopping(monitor="val_loss", mode="min", verbose=2,
                      patience=20) # probably needs to be more patient, but kaggle time is limited

callbacks_list = [checkpoint, early, reduceLROnPlat]
def fit():
    seg_model.compile(optimizer=Adam(1e-3, decay=1e-6), loss=IoU, metrics=['binary_accuracy'])
    
    step_count = min(MAX_TRAIN_STEPS, train_df.shape[0]//BATCH_SIZE)
    aug_gen = create_aug_gen(make_image_gen(train_df))
    loss_history = [seg_model.fit_generator(aug_gen,
示例#33
0
    p.add_argument('--history')
    
    p.add_argument('--batch_size', default=4, type=int)
    p.add_argument('--epochs', default=64, type=int)
    
    p.add_argument('--cont', action='store_true')
    args = p.parse_args()
    
    train_generator, val_generator, train_batches, val_batches = \
            create_generators(args.data_directory, \
                              args.density_scale, args.sigma, args.batch_size)
    
    # Setup callbacks
    early_stopping = EarlyStopping(patience=5, verbose=2)
    model_checkpoint = ModelCheckpoint(args.model, save_best_only=True, verbose=2)
    callbacks = [early_stopping, model_checkpoint]
    if args.history is not None:
        csv_logger = CSVLogger(args.history, append=True)
        callbacks.append(csv_logger)
    
    # Load model
    custom_objects = dict(inspect.getmembers(losses, inspect.isfunction))
    model = models.load_model(args.model, custom_objects=custom_objects)
    model.summary()

    # Get score
    if args.cont:
        losses = model.evaluate_generator(val_generator, val_batches)
        val_loss_idx = model.metrics_names.index('loss')
        print('Loaded model with: %s' % ' - '.join( \
        
    def on_epoch_end(self, batch, logs={}):
        self.losses.append(logs.get('loss'))
        self.lr.append(step_decay(len(self.losses)))
        print('lr:', step_decay(len(self.losses)))

def step_decay(epoch):
    initial_lrate = LR
    drop = DROP
    epochs_drop = EPOCHS_DROP
    lrate = initial_lrate * math.pow(drop, math.floor((epoch)/epochs_drop))
    return lrate

checkpointer = ModelCheckpoint(
    filepath='./best_weights.hdf5',
    monitor="val_categorical_accuracy",
    save_best_only=True,
    save_weights_only=False,
    verbose=2)

earlyStoper = EarlyStopping(
    monitor='val_loss', 
    mode='min', 
    restore_best_weights=True, 
    min_delta =0.75,
    # patience = 5,
    verbose=2)

# learning schedule callback
loss_history = LossHistory()
lrate = LearningRateScheduler(step_decay)
callbacks_list = [loss_history, lrate, checkpointer]