def run(model): # Download kitti dataset build_data.maybe_download_training_img(DATA_DIRECTORY) x, y = build_data.get_data(TRAINING_DATA_DIRECTORY, IMAGE_SHAPE) if model is None: inputs = Input(shape=(IMAGE_SHAPE[0], IMAGE_SHAPE[1], 3)) # Block 1 block1_conv1 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(inputs) block1_conv2 = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(block1_conv1) block1_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(block1_conv2) # Block 2 block2_conv1 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(block1_pool) block2_conv2 = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(block2_conv1) block2_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(block2_conv2) # Block 3 block3_conv1 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(block2_pool) block3_conv2 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(block3_conv1) block3_conv3 = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(block3_conv2) block3_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(block3_conv3) # Block 4 block4_conv1 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(block3_pool) block4_conv2 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(block4_conv1) block4_conv3 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(block4_conv2) block4_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(block4_conv3) # Block 5 block5_conv1 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(block4_pool) block5_conv2 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(block5_conv1) block5_conv3 = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(block5_conv2) block5_pool = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(block5_conv3) pool5_conv1x1 = Conv2D(2, (1, 1), activation='relu', padding='same')(block5_pool) upsample_1 = Conv2DTranspose(2, kernel_size=(4, 4), strides=(2, 2), padding="same")(pool5_conv1x1) pool4_conv1x1 = Conv2D(2, (1, 1), activation='relu', padding='same')(block4_pool) add_1 = Add()([upsample_1, pool4_conv1x1]) upsample_2 = Conv2DTranspose(2, kernel_size=(4, 4), strides=(2, 2), padding="same")(add_1) pool3_conv1x1 = Conv2D(2, (1, 1), activation='relu', padding='same')(block3_pool) add_2 = Add()([upsample_2, pool3_conv1x1]) upsample_3 = Conv2DTranspose(2, kernel_size=(16, 16), strides=(8, 8), padding="same")(add_2) output = Dense(2, activation='softmax')(upsample_3) model = Model(inputs, output, name='multinet_seg') adam = Adam(lr=LEARNING_RATE) model.compile(loss='categorical_crossentropy', optimizer=adam, metrics=['accuracy']) model.fit(x, y, batch_size=BATCH_SIZE, epochs=EPOCHS) model.save('trained_model/trained_model' + str(time.time()) + '.h5')
y_train = np_utils.to_categorical(y_train, NUM_CLASSES) y_test = np_utils.to_categorical(y_test, NUM_CLASSES) X_train = X_train.astype("float") / 255.0 X_test = X_test.astype("float") / 255.0 # モデルの定義 model = VGG16(weights='imagenet', include_top=False, input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3)) top_model = Sequential() top_model.add(Flatten(input_shape=model.output_shape[1:])) top_model.add(Dense(256, activation='relu')) top_model.add(Dropout(0.5)) top_model.add(Dense(NUM_CLASSES, activation='softmax')) model = Model(inputs=model.input, outputs=top_model(model.output)) for layer in model.layers[:15]: layer.trainable = False opt = Adam(lr=0.0001) model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=['accuracy']) model.fit(X_train, y_train, batch_size=16, epochs=5) results = model.evaluate(X_test, y_test, batch_size=16) print("test loss, test acc:", results) model.save("./AnimalJudgmentModel.h5")