def init():
    log.info('init called')
    sess = tf.InteractiveSession()
    loaded_model = MobileNet(input_shape=(
        size, size, 1), alpha=1., weights=None, classes=NCATS)
    loaded_model.load_weights('./doodle_classification/model/model.h5')
    loaded_model.compile(optimizer=Adam(lr=0.002),
                         loss='categorical_crossentropy',
                         metrics=[categorical_crossentropy,
                                  categorical_accuracy, top_3_accuracy])
    # log.info(loaded_model.summary())
    graph = tf.get_default_graph()
    return loaded_model, sess, graph
Exemplo n.º 2
0
    def get_model():
        from tensorflow.keras.applications import MobileNet

        mobilenet = MobileNet(include_top=True,
                              weights=None,
                              input_shape=(224, 224, 3),
                              classes=1000)
        mobilenet.load_weights("mobilenet_1_0_224_tf.h5")
        inputs = {mobilenet.input_names[0]: ((1, 3, 224, 224), "float32")}

        data = {}
        np.random.seed(0)

        for name, (shape, dtype) in inputs.items():
            if dtype == "uint8":
                low, high = 0, 1
            else:
                low, high = -1, 1
            data[name] = np.random.uniform(low, high, shape).astype(dtype)

        mod, params, ref_outputs = _get_keras_model(mobilenet, inputs, data)
        return mod, params, inputs, data, ref_outputs
Exemplo n.º 3
0
path = 'C:/nmb/nmb_data/h5/5s/mobilenet/mobilenet_rmsprop_1.h5'
mc = ModelCheckpoint(path, monitor='val_loss', verbose=1, save_best_only=True)

model.compile(optimizer=op,
              loss="sparse_categorical_crossentropy",
              metrics=['acc', f1_m, recall_m, precision_m])
history = model.fit(x_train,
                    y_train,
                    epochs=1000,
                    batch_size=batch_size,
                    validation_split=0.2,
                    callbacks=[es, lr, mc])

# 평가, 예측
# model = load_model('C:/nmb/nmb_data/h5/5s/mobilenet/mobilenet_rmsprop_1.h5')
model.load_weights('C:/nmb/nmb_data/h5/5s/mobilenet/mobilenet_rmsprop_1.h5')
result = model.evaluate(x_test, y_test, batch_size=8)
print("loss : {:.5f}".format(result[0]))
print("acc : {:.5f}".format(result[1]))
print("f1_score : {:.5f}".format(result[2]))
print("recall_m : {:.5f}".format(result[3]))
print("precision_m : {:.5f}".format(result[4]))

############################################ PREDICT ####################################

pred = ['C:/nmb/nmb_data/predict_04_26/F', 'C:/nmb/nmb_data/predict_04_26/M']

count_f = 0
count_m = 0

for pred_pathAudio in pred:
Exemplo n.º 4
0
path = 'C:/nmb/nmb_data/h5/5s/mobilenet/mobilenet_nadam_1.h5'
mc = ModelCheckpoint(path, monitor='val_loss', verbose=1, save_best_only=True)

model.compile(optimizer=op,
              loss="sparse_categorical_crossentropy",
              metrics=['acc'])
history = model.fit(x_train,
                    y_train,
                    epochs=1000,
                    batch_size=batch_size,
                    validation_split=0.2,
                    callbacks=[es, lr, mc])

# 평가, 예측
# model = load_model('C:/nmb/nmb_data/h5/5s/mobilenet/mobilenet_nadam_1.h5')
model.load_weights('C:/nmb/nmb_data/h5/5s/mobilenet/mobilenet_nadam_1.h5')
result = model.evaluate(x_test, y_test, batch_size=8)
print("loss : {:.5f}".format(result[0]))
print("acc : {:.5f}".format(result[1]))

############################################ PREDICT ####################################

pred = ['C:/nmb/nmb_data/predict_04_26/F', 'C:/nmb/nmb_data/predict_04_26/M']

count_f = 0
count_m = 0

for pred_pathAudio in pred:
    files = librosa.util.find_files(pred_pathAudio, ext=['wav'])
    files = np.asarray(files)
    for file in files:
Exemplo n.º 5
0

SIZE = 64
BASE_SIZE = 256
N_LABELS = 340

sess = tf.Session()
graph = tf.get_default_graph()

set_session(sess)

model = MobileNet(input_shape=(SIZE, SIZE, 1),
                  alpha=1.,
                  weights=None,
                  classes=N_LABELS)
model.load_weights('model.h5')

with open('labels', 'rb') as fp:
    labels = pickle.load(fp)


def draw(raw_strokes, size=256, lw=6, time_color=True):
    img = np.zeros((BASE_SIZE, BASE_SIZE), np.uint8)
    for t, stroke in enumerate(raw_strokes):
        for i in range(len(stroke[0]) - 1):
            color = 255 - min(t, 10) * 13 if time_color else 255
            _ = cv2.line(img, (stroke[0][i], stroke[1][i]),
                         (stroke[0][i + 1], stroke[1][i + 1]), color, lw)
    if size != BASE_SIZE:
        return cv2.resize(img, (size, size))
    else:
Exemplo n.º 6
0
def mobilenet_seg_model(pretrained_weights=None, input_size=(256, 256, 3)):
    model = MobileNet(input_shape=input_size,
                      weights='imagenet',
                      include_top=False)

    x = model.output

    encoder = Model(inputs=model.input, outputs=x)

    skip_layer_1 = encoder.get_layer('conv_pw_1_relu').output
    skip_layer_2 = encoder.get_layer('conv_pw_3_relu').output
    skip_layer_3 = encoder.get_layer('conv_pw_5_relu').output
    skip_layer_4 = encoder.get_layer('conv_pw_11_relu').output

    up6 = Conv2D(
        512,
        2,
        activation='relu',
        padding='same',
        kernel_initializer='he_normal')(
            x)  #(UpSampling2D(size = (2,2), interpolation='bilinear')(x))
    up6 = Conv2DTranspose(512,
                          2,
                          activation='relu',
                          padding='same',
                          strides=2,
                          kernel_initializer='he_normal')(up6)
    merge6 = concatenate([skip_layer_4, up6], axis=3)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge6)
    conv6 = Conv2D(512,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv6)

    up7 = Conv2D(
        256,
        2,
        activation='relu',
        padding='same',
        kernel_initializer='he_normal'
    )(conv6)  #(UpSampling2D(size = (2,2), interpolation='bilinear')(conv6))
    up7 = Conv2DTranspose(256,
                          2,
                          activation='relu',
                          padding='same',
                          strides=2,
                          kernel_initializer='he_normal')(up7)
    merge7 = concatenate([skip_layer_3, up7], axis=3)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge7)
    conv7 = Conv2D(256,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv7)

    up8 = Conv2D(
        128,
        2,
        activation='relu',
        padding='same',
        kernel_initializer='he_normal'
    )(conv7)  #(UpSampling2D(size = (2,2), interpolation='bilinear')(conv7))
    up8 = Conv2DTranspose(128,
                          2,
                          activation='relu',
                          padding='same',
                          strides=2,
                          kernel_initializer='he_normal')(up8)
    merge8 = concatenate([skip_layer_2, up8], axis=3)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge8)
    conv8 = Conv2D(128,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv8)

    up9 = Conv2D(
        64,
        2,
        activation='relu',
        padding='same',
        kernel_initializer='he_normal'
    )(conv8)  #(UpSampling2D(size = (2,2), interpolation='bilinear')(conv8))
    up9 = Conv2DTranspose(64,
                          2,
                          activation='relu',
                          padding='same',
                          strides=2,
                          kernel_initializer='he_normal')(up9)
    merge9 = concatenate([skip_layer_1, up9], axis=3)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(merge9)
    conv9 = Conv2D(64,
                   3,
                   activation='relu',
                   padding='same',
                   kernel_initializer='he_normal')(conv9)

    up10 = Conv2D(
        32,
        2,
        activation='relu',
        padding='same',
        kernel_initializer='he_normal'
    )(conv9)  #(UpSampling2D(size = (2,2), interpolation='bilinear')(conv9))
    up10 = Conv2DTranspose(32,
                           2,
                           activation='relu',
                           padding='same',
                           strides=2,
                           kernel_initializer='he_normal')(up10)
    #merge9 = concatenate([conv1,up9], axis = 3)
    conv10 = Conv2D(32,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(up10)
    conv10 = Conv2D(32,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(conv10)

    conv10 = Conv2D(16,
                    3,
                    activation='relu',
                    padding='same',
                    kernel_initializer='he_normal')(conv10)
    conv10 = Conv2D(1, 1, activation='sigmoid')(conv10)

    model = Model(inputs=model.input, outputs=conv10)

    #model.compile(optimizer = Adam(lr = 1e-4), loss = 'binary_crossentropy', metrics=[my_IoU])
    #model.compile(optimizer = Adam(lr = 1e-4), loss = dice_coef_loss, metrics=[my_IoU])
    model.compile(optimizer=Adam(lr=1e-4),
                  loss=weighted_bce_dice_loss,
                  metrics=[my_IoU])

    model.summary()

    if (pretrained_weights is not None):
        model.load_weights(pretrained_weights)

    return model
Exemplo n.º 7
0
NCATS = 340
DP_DIR = 'shuffled_csv/'
np.random.seed(seed=1987)
tf.set_random_seed(seed=1987)

STEPS = 1000
EPOCHS = 100
size = 128
batchsize = 512

print("Setting up MobileNet")
model = MobileNet(input_shape=(size, size, 1),
                  alpha=1.,
                  weights=None,
                  classes=NCATS)
model.load_weights('models/mobilenet-best-run.h5')

model.compile(
    optimizer=Adam(lr=0.008),
    loss='categorical_crossentropy',
    metrics=[categorical_crossentropy, categorical_accuracy, top_3_accuracy])

valid_df = pd.read_csv(os.path.join(DP_DIR,
                                    'train_k{}.csv.gz'.format(NCSVS - 1)),
                       nrows=35000)
x_valid = df_to_image_array_xd(valid_df, size)
y_valid = keras.utils.to_categorical(valid_df.y, num_classes=NCATS)
print(x_valid.shape, y_valid.shape)
print('Validation array memory {:.2f} GB'.format(x_valid.nbytes / 1024.**3))

train_datagen = image_generator_xd(size=size,
Exemplo n.º 8
0
lr = ReduceLROnPlateau(monitor='val_loss', vactor=0.5, patience=10, verbose=1)
path = 'C:/data/modelcheckpoint/mobilenet_rmsprop_10s.h5'
mc = ModelCheckpoint(path, monitor='val_loss', verbose=1, save_best_only=True)

model.compile(optimizer=op,
              loss="sparse_categorical_crossentropy",
              metrics=['acc', f1_m, recall_m, precision_m])
history = model.fit(x_train,
                    y_train,
                    epochs=1000,
                    batch_size=batch_size,
                    validation_split=0.2,
                    callbacks=[es, lr, mc])

# 평가, 예측
model.load_weights('C:/data/modelcheckpoint/mobilenet_rmsprop_10s.h5')
result = model.evaluate(x_test, y_test, batch_size=8)
print("loss : {:.5f}".format(result[0]))
print("acc : {:.5f}".format(result[1]))
print("f1_score : {:.5f}".format(result[2]))
print("recall_m : {:.5f}".format(result[3]))
print("precision_m : {:.5f}".format(result[4]))

############################################ PREDICT ####################################

pred = ['D:/nmb/0602_10s/predict_10/10s_F', 'D:/nmb/0602_10s/predict_10/10s_M']

count_f = 0
count_m = 0

for pred_pathAudio in pred: