예제 #1
0
import pdb

test_datagen = ImageDataGenerator(rescale=1. / 255)

test_datagen.config['center_crop_size'] = (224, 224)
test_datagen.set_pipeline([center_crop])

dgdx_val = test_datagen.flow_from_directory(
    '/home/nancy/mvmt_vid_dataset/test/',
    read_formats={'png'},
    target_size=(int(256 * (224 / 192.0)), int(192 * (224 / 192.0))),
    batch_size=32,
    shuffle=False,
    class_mode=None)

test_datagen.fit_generator(dgdx_val, nb_iter=100)

validation_generator = dgdx_val

base_model = VGG16(input_tensor=(Input(shape=(224, 224, 3))),
                   weights='imagenet',
                   include_top=False)
#base_model = VGG16(input_tensor=(Input(shape=(224, 224, 3))), include_top=False)

#for layer in base_model.layers[:10]:
#    layer.trainable = False

model = load_model("my_model.h5")

results = model.predict_generator(validation_generator, 32 * 5)
#pdb.set_trace()
예제 #2
0
dgdx = train_datagen.flow_from_directory(
        '/home/wangnxr/dataset/vid_offset_0/train/',
        read_formats={'png'},
        target_size=(int(300), int(224)),
        batch_size=32,
        class_mode='binary')

dgdx_val = test_datagen.flow_from_directory(
        '/home/wangnxr/dataset/vid_offset_0/test/',
        read_formats={'png'},
        target_size=(int(300), int(224)),
        batch_size=32,
        class_mode='binary')

train_datagen.fit_generator(dgdx, nb_iter=96)
test_datagen.fit_generator(dgdx_val, nb_iter=96)

train_generator=dgdx
validation_generator=dgdx_val

base_model = VGG16(input_tensor=(Input(shape=(3, 224, 224))), weights='imagenet', include_top=False)
#base_model = VGG16(input_tensor=(Input(shape=(224, 224, 3))), include_top=False)

x = base_model.output
x = Flatten(name='flatten')(x)
x = Dense(1024,  name='fc1')(x)
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = Dropout(0.5)(x)
x = Dense(256, name='fc2')(x)