Exemplo n.º 1
0
aaa = 1
x_train = x_train.reshape(x_train.shape[0], x_train.shape[1], x_train.shape[2],
                          aaa)
x_test = x_test.reshape(x_test.shape[0], x_test.shape[1], x_test.shape[2], aaa)
print(x_train.shape, y_train.shape)  # (3628, 128, 862, 1) (3628,)
print(x_test.shape, y_test.shape)  # (908, 128, 862, 1) (908,)

model = MobileNet(
    include_top=True,
    input_shape=(128, 862, 1),
    classes=2,
    pooling=None,
    weights=None,
)
model.summary()
# model.trainable = False
# model.save('C:/nmb/nmb_data/h5/5s/mobilenet_rmsprop_1.h5')

# 컴파일, 훈련
op = RMSprop(lr=1e-3)
batch_size = 8
es = EarlyStopping(monitor='val_loss',
                   patience=20,
                   restore_best_weights=True,
                   verbose=1)
lr = ReduceLROnPlateau(monitor='val_loss', vactor=0.5, patience=10, verbose=1)
path = 'C:/nmb/nmb_data/h5/5s/mobilenet/mobilenet_rmsprop_1.h5'
mc = ModelCheckpoint(path, monitor='val_loss', verbose=1, save_best_only=True)
model.compile(optimizer=op,
              loss="sparse_categorical_crossentropy",
Exemplo n.º 2
0
def train(input_params, train, test, valid, class_cnt):
    current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")

    # tensorboard
    train_log_dir = 'logs/gradient_tape/' + current_time + '/train'
    valid_log_dir = 'logs/gradient_tape/' + current_time + '/valid'
    test_log_dir = 'logs/gradient_tape/' + current_time + '/test'
    train_summary_writer = tf.summary.create_file_writer(train_log_dir)
    valid_summary_writer = tf.summary.create_file_writer(valid_log_dir)
    # test_summary_writer = tf.summary.create_file_writer(test_log_dir)

    # todo: create model with hyperparams with model_dir = '../data/models/params/current_time/'
    model_dir = '../data/models/model-' + current_time
    # Instantiate an optimizer.
    optimizer = Adam(learning_rate=0.001)
    # Instantiate a loss function.
    loss_fn = SparseCategoricalCrossentropy(from_logits=True)
    train_step = test_step = 0

    # Prepare the metrics.
    #todo use same variable for all the acc_metrics.
    acc_metric = SparseCategoricalAccuracy()

    if utility.dir_empty(model_dir):
        # model definition
        mobilenet = MOBILENET(include_top=False,
                              input_shape=(224, 224, 3),
                              weights='imagenet',
                              pooling='avg',
                              dropout=0.001)
        mobilenet.summary()
        # select till which layer use mobilenet.
        base_model = Model(inputs=mobilenet.input, outputs=mobilenet.output)
        base_model.summary()

        model = Sequential([
            base_model,
            Dropout(0.2),
            Dense(units=class_cnt, activation='softmax'),
        ])
        model.summary()

        epochs = 200
        for epoch in range(epochs):
            print("\nStart of epoch %d" % (epoch,))
            for batch_idx, (x_batch_train, y_batch_train) in enumerate(train):
                with tf.GradientTape() as tape:
                    # forward pass
                    logits = model(x_batch_train, training=True)

                    # compute loss for mini batch
                    loss_value = loss_fn(y_batch_train, logits)

                grads = tape.gradient(loss_value, model.trainable_weights)

                optimizer.apply_gradients(zip(grads, model.trainable_weights))

                # Update training metric.
                acc_metric.update_state(y_batch_train, logits)

                with train_summary_writer.as_default():
                    # import code; code.interact(local=dict(globals(), **locals()))
                    #TODO: add the metrics for test too.
                    #TODO: take the mean of the losses in every batch and then show,
                    #TODO       loss_value is last loss of the batch(only 1).
                            
                    tf.summary.scalar('loss', loss_value, step=train_step)
                    tf.summary.scalar('accuracy', acc_metric.result(), step=train_step)
                    train_step += 1

                if batch_idx % 10 == 0:
                    print("training loss for one batch at step %d: %.4f" % (batch_idx, float(loss_value)))
            # Display metrics at the end of each epoch.
            
            print("Training acc over epoch: %.4f" % (float(acc_metric.result()),))

            # Reset training metrics at the end of each epoch
            acc_metric.reset_states()


            # iterate on validation 
            for batch_idx, (x_batch_val, y_batch_val) in enumerate(valid):
                # val_logits: y_pred of the validation. 
                val_logits = model(x_batch_val, training=False)
                loss = loss_fn(y_batch_val, val_logits)
                # Update val metrics
                acc_metric.update_state(y_batch_val, val_logits)

                with valid_summary_writer.as_default():
                    tf.summary.scalar('loss', loss, step=test_step)
                    tf.summary.scalar('accuracy', acc_metric.result(), step=test_step)
                    test_step += 1
                
            print("Validation acc: %.4f" % (float(acc_metric.result()),))
            # print(classification_report(y_batch_val, val_logits, target_names=labels))
            acc_metric.reset_states()
        
        acc_metric.reset_states()
        model.save(model_dir + 'model')
        
    else:  # if model_dir is not empty
        print("model already exist. loading model...")
        model = load_model(model_dir+'model')
Exemplo n.º 3
0
    """
    return np.mean([apk(a, p, k) for a, p in zip(actual, predicted)])

def preds2catids(predictions):
    return pd.DataFrame(np.argsort(-predictions, axis=1)[:, :3], columns=['a', 'b', 'c'])

def top_3_accuracy(y_true, y_pred):
    return top_k_categorical_accuracy(y_true, y_pred, k=3)
STEPS = 800
EPOCHS = 16
size = 64
batchsize = 680
model = MobileNet(input_shape=(size, size, 1), alpha=1., weights=None, classes=NCATS)
model.compile(optimizer=Adam(lr=0.002), loss='categorical_crossentropy',
              metrics=[categorical_crossentropy, categorical_accuracy, top_3_accuracy])
print(model.summary())
def draw_cv2(raw_strokes, size=256, lw=6, time_color=True):
    img = np.zeros((BASE_SIZE, BASE_SIZE), np.uint8)
    for t, stroke in enumerate(raw_strokes):
        for i in range(len(stroke[0]) - 1):
            color = 255 - min(t, 10) * 13 if time_color else 255
            _ = cv2.line(img, (stroke[0][i], stroke[1][i]),
                         (stroke[0][i + 1], stroke[1][i + 1]), color, lw)
    if size != BASE_SIZE:
        return cv2.resize(img, (size, size))
    else:
        return img

def image_generator_xd(size, batchsize, ks, lw=6, time_color=True):
    while True:
        for k in np.random.permutation(ks):
Exemplo n.º 4
0
def mobilenet_seg_model(pretrained_weights=None, input_size=(256, 256, 3)):
    model = MobileNet(input_shape=input_size,
                      weights='imagenet',
                      include_top=False)

    x = model.output

    encoder = Model(inputs=model.input, outputs=x)

    skip_layer_1 = encoder.get_layer('conv_pw_1_relu').output
    skip_layer_2 = encoder.get_layer('conv_pw_3_relu').output
    skip_layer_3 = encoder.get_layer('conv_pw_5_relu').output
    skip_layer_4 = encoder.get_layer('conv_pw_11_relu').output

    up6 = Conv2D(
        512,
        2,
        activation='relu',
        padding='same',
        kernel_initializer='he_normal')(
            x)  #(UpSampling2D(size = (2,2), interpolation='bilinear')(x))
    up6 = Conv2DTranspose(512,
                          2,
                          activation='relu',
                          padding='same',
                          strides=2,
                          kernel_initializer='he_normal')(up6)
    merge6 = concatenate([skip_layer_4, up6], axis=3)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(
        256,
        2,
        activation='relu',
        padding='same',
        kernel_initializer='he_normal'
    )(conv6)  #(UpSampling2D(size = (2,2), interpolation='bilinear')(conv6))
    up7 = Conv2DTranspose(256,
                          2,
                          activation='relu',
                          padding='same',
                          strides=2,
                          kernel_initializer='he_normal')(up7)
    merge7 = concatenate([skip_layer_3, up7], axis=3)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(
        128,
        2,
        activation='relu',
        padding='same',
        kernel_initializer='he_normal'
    )(conv7)  #(UpSampling2D(size = (2,2), interpolation='bilinear')(conv7))
    up8 = Conv2DTranspose(128,
                          2,
                          activation='relu',
                          padding='same',
                          strides=2,
                          kernel_initializer='he_normal')(up8)
    merge8 = concatenate([skip_layer_2, up8], axis=3)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(
        64,
        2,
        activation='relu',
        padding='same',
        kernel_initializer='he_normal'
    )(conv8)  #(UpSampling2D(size = (2,2), interpolation='bilinear')(conv8))
    up9 = Conv2DTranspose(64,
                          2,
                          activation='relu',
                          padding='same',
                          strides=2,
                          kernel_initializer='he_normal')(up9)
    merge9 = concatenate([skip_layer_1, up9], axis=3)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)

    up10 = Conv2D(
        32,
        2,
        activation='relu',
        padding='same',
        kernel_initializer='he_normal'
    )(conv9)  #(UpSampling2D(size = (2,2), interpolation='bilinear')(conv9))
    up10 = Conv2DTranspose(32,
                           2,
                           activation='relu',
                           padding='same',
                           strides=2,
                           kernel_initializer='he_normal')(up10)
    #merge9 = concatenate([conv1,up9], axis = 3)
    conv10 = Conv2D(32,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(up10)
    conv10 = Conv2D(32,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(conv10)

    conv10 = Conv2D(16,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(conv10)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv10)

    model = Model(inputs=model.input, outputs=conv10)

    #model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics=[my_IoU])
    #model.compile(optimizer = Adam(lr = 1e-4), loss = dice_coef_loss, metrics=[my_IoU])
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=weighted_bce_dice_loss,
                  metrics=[my_IoU])

    model.summary()

    if (pretrained_weights is not None):
        model.load_weights(pretrained_weights)

    return model