Ejemplo n.º 1
0
    if config.l2 > 0:
        model = apply_kernel_regularizer(model,
                                         tf.keras.regularizers.l2(config.l2))
    model.compile(optimizer=opt, loss=custom_loss, metrics=[d_total, cos_sim])
    model.summary()
    """ DATA """
    train_set = make_dataset(config, training=True)
    test_set = make_dataset(config, training=False)
    """ TRAINING """
    callbacks = [
        CSVLogger(NAME.replace('.h5', '.log'), append=True),
        LearningRateScheduler(custom_scheduler(4096, TOTAL_EPOCH / 12)),
        SWA(start_epoch=TOTAL_EPOCH // 2, swa_freq=2),
        ModelCheckpoint(NAME,
                        monitor='val_d_total',
                        save_best_only=True,
                        verbose=1),
        TerminateOnNaN()
    ]

    model.fit(train_set,
              epochs=TOTAL_EPOCH,
              batch_size=BATCH_SIZE,
              steps_per_epoch=config.steps_per_epoch,
              validation_data=test_set,
              validation_steps=12,
              callbacks=callbacks)

    model.save(NAME.replace('.h5', '_SWA.h5'))
Ejemplo n.º 2
0
    test_set = make_dataset(config, training=False)
    print(train_set)
    for x, y in train_set.take(1):
        print(tf.shape(x), tf.shape(y))
    """ TRAINING """
    from train_frame import custom_scheduler
    callbacks = [
        CSVLogger(NAME.replace('.h5', '.log'), append=True),
        # LearningRateScheduler(custom_scheduler(config.n_dim*8, config.epochs/10)),
        ReduceLROnPlateau(monitor='val_loss',
                          factor=config.lr_factor,
                          patience=config.lr_patience),
        SWA(start_epoch=config.epochs // 2, swa_freq=2),
        ModelCheckpoint(NAME,
                        monitor='val_d_total',
                        mode='min',
                        save_best_only=True),
        TerminateOnNaN()
    ]

    model.fit(train_set,
              epochs=config.epochs,
              batch_size=config.batch_size,
              steps_per_epoch=config.steps_per_epoch,
              validation_data=test_set,
              validation_steps=16,
              callbacks=callbacks)

    # TODO : BN
    model.save(NAME.replace('.h5', '_SWA.h5'))
Ejemplo n.º 3
0
            #                   mode='max'),
            SWA(start_epoch=TOTAL_EPOCH // 2, swa_freq=2),
            ModelCheckpoint(NAME,
                            monitor='val_auc',
                            mode='max',
                            save_best_only=True),
            TensorBoard(log_dir=f'./logs/{NAME.replace(".h5", "")}',
                        histogram_freq=0,
                        profile_batch=2),
            TerminateOnNaN()
        ]

        model.fit(train_dataset,
                  epochs=TOTAL_EPOCH,
                  batch_size=BATCH_SIZE,
                  validation_data=(test_x, test_y),
                  steps_per_epoch=config.steps_per_epoch,
                  class_weight=class_weight,
                  callbacks=callbacks)

        result = model.evaluate(test_x, test_y, verbose=1)
        with open(NAME.replace('.h5', '.log'), 'a') as f:
            f.write(f'\n{result}\n')

    # SWA
    model.compile(tf.keras.optimizers.SGD(0.),
                  'sparse_categorical_crossentropy')
    model.fit(test_x, test_y, batch_size=BATCH_SIZE)

    result = model.evaluate(test_x, test_y, verbose=1)
    with open(NAME.replace('.h5', '.log'), 'a') as f:
Ejemplo n.º 4
0
                              mode='max'),
            SWA(start_epoch=TOTAL_EPOCH//2, swa_freq=2),
            ModelCheckpoint(NAME,
                            monitor='val_auc',
                            mode='max',
                            save_best_only=True),
            TensorBoard(log_dir=f'./logs/{NAME.replace(".h5", "")}',
                        histogram_freq=0,
                        profile_batch=2),
            TerminateOnNaN()
        ]

        model.fit(train_dataset,
                  epochs=TOTAL_EPOCH,
                  batch_size=BATCH_SIZE,
                  validation_data=(val_x, val_y),
                  steps_per_epoch=x.shape[0]//BATCH_SIZE,
                  class_weight=class_weight,
                  callbacks=callbacks)

        result = model.evaluate(test_x, test_y, verbose=1)
        with open(NAME.replace('.h5', '.log'), 'a') as f:
            f.write(f'\n{result}\n')

    # For SWA
    repeat = x.shape[0] // BATCH_SIZE
    for x, y in train_dataset:
        model(x, training=True)
        repeat -= 1
        if repeat <= 0:
            break
Ejemplo n.º 5
0
x_test = x_test[:5138,:,:,:]
print(x_train.shape)
print(y_train.shape)

print(x_test.shape)
print(y_test.shape)
print('끝')

# 2. 모델
model = Sequential()
model.add(EfficientNetB0(include_top=False))
model.add(Dense(20, activation='softmax',name='s1'))
model.summary()
# 3. 훈련
model.compile(optimizer = 'adam', loss = 'sparse_categorical_crossentropy', metrics = ['acc'])
model.fit(x_train, y_train, batch_size=12, epochs=2, validation_split=0.1)


# 4. 평가, 예측

loss, acc = model.evaluate(x_train, y_train)

y_predict = model.predict(x_test)
print("loss : ", loss)
print("acc : ", acc)

print(y_predict)
print('진짜 끝')