Esempio n. 1
0
def run(model):
    ## complie
    model.compile(optimizer="Adam",
                  loss="categorical_crossentropy",
                  metrics=['accuracy'])
    ## fit
    model.fit(data['train_img'], data['train_lb'], epoch=10, batch_size=10)
    ## evalute
    score = model.evalute(data['test_img'], data['test_lb'], batch_size=10)
Esempio n. 2
0
(cifar10_x_train, cifar10_y_train), (cifar10_x_test, cifar10_y_test) = cifar10.load_data()
#(cifar10_train_x, _), (cifar10_test_x, _) = cifar10.load_data()

x_train = x_train.astype('float32')/255
x_test = x_test.astype('float32')/255

y_train = np_utils.to_categorical(cifar10_y_train)
y_test = np_utils.to_categorical(cifar10_y_test)

model = Sequential()
model.add(Conv2D(32, (3, 3), padding = 'same'))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size = (2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dense(323))
model.add(Dropout(0.5))
model.add(Dense(221))
model.add(Activation('softmax'))
model.summary()


model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['acc'])

model.fit(x_train, y_train, epochs=1, batch_size=256)

print("Score:", model.evaluate(x_test, y_test)[1])

Esempio n. 3
0
from keras.layers import Input, Dense
from keras.models import model

# This returns a tensor
input=Input(shape=(784, ))

# a layer instance is callable on a tensor, and returns a tensor
x= Dense(64, activation='relu')(inputs)
y=Dense(64, activation='relu')(x)
predictions=Dense(10, activation='softmax')(x)

# This creates a model that includes
# the Input layer and three Dense layers
model=Model(inputs=inputs, outputs=predictions)
model.compile(optimizer='rmsprop',
				loss='categorical_crossentropy',
				metrics=['accuracy'])
model.fit(data, labels) # start training

# all models are callable, just like layers

'''
functional api를 이용하면, 훈련된 모델을 재사용하는 것이 쉽다.
model들을 layer처럼 취급할 수 있다. tensor에서 부르면.
구조만 재사용하는 것이 아닌, weights도 재사용하게 된다.
'''

x=Input(shape=(784, ))
# This works, and return the 10-way softmax we defined above
y=model(x)
Esempio n. 4
0
plt.imshow(image.img_to_array(image.load_img(np.random.choise(image_files))))
plt.show()

vgg = VGG16(input_shape = IMAGE_SIZE + [3], weights = 'imagenet', include_top = False)

for layer in vgg.layers:
    layer.trainable = False

x = Flatten()(vgg.output)

prediction = Dense(len(folders), activation = 'softmax')(x)

model = Model(inputs=vgg.input, outputs = prediction)
model.summary()

model.compile(loss = 'categorical_crossentropy', optimizer = 'rmsprop', metric = ['accuracy'])

gen = ImageDataGenerator(
  rotation_range=20,
  width_shift_range=0.1,
  height_shift_range=0.1,
  shear_range=0.1,
  zoom_range=0.2,
  horizontal_flip=True,
  vertical_flip=True,
  preprocessing_function=preprocess_input
)

train_gen = gen.flow_from_directory(train_path, target_size = IMAGE_SIZE, shuffle = True, batch_size = batch_size)
test_gen = gen.flow_from_directory(test_path, target_size = IMAGE_SIZE, shuffle = True, batch_size = batch_size)