def main(): (waves, labels), (x_validate, y_validate) = load_data() x_mean = 4.3854903e-05 # np.concatenate(waves).mean() x_std = 0.042366702 # np.concatenate(waves).std() waves = tuple(map(lambda wave: (wave - x_mean) / x_std, waves)) x_validate = (x_validate - x_mean) / x_std model = Model( *juxt(identity, computational_graph(max(y_validate) + 1))(Input( shape=x_validate.shape[1:]))) model.compile(loss='sparse_categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model.summary() batch_size = 50 epoch_size = 500 results = model.fit_generator( data_generator(waves, labels, batch_size), steps_per_epoch=8000 // batch_size, epochs=epoch_size, validation_data=(x_validate, y_validate), callbacks=[ReduceLROnPlateau(factor=0.5, patience=50, verbose=1)]) with open('./results/history.pickle', 'wb') as f: pickle.dump(results.history, f) save_model(model, './results/model.h5') del model
def main(): (x_train, y_train), (x_validation, y_validation) = load_data() model = Model(*juxt(identity, computational_graph(y_train.shape[1]))(Input(shape=x_train.shape[1:]))) model.compile(loss='categorical_crossentropy', optimizer=SGD(momentum=0.9), metrics=['accuracy']) model.summary() # plot_model(model, to_file='./results/model.png') train_data = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True, width_shift_range=0.125, height_shift_range=0.125, horizontal_flip=True) validation_data = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True) for data in (train_data, validation_data): data.fit(x_train) # 実用を考えると、x_validationでのfeaturewiseのfitは無理だと思う……。 batch_size = 100 epochs = 200 results = model.fit_generator(train_data.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=x_train.shape[0] // batch_size, epochs=epochs, callbacks=[LearningRateScheduler(partial(getitem, tuple(take(epochs, concat(repeat(0.01, 1), repeat(0.1, 99), repeat(0.01, 50), repeat(0.001))))))], validation_data=validation_data.flow(x_validation, y_validation, batch_size=batch_size), validation_steps=x_validation.shape[0] // batch_size) with open('./results/history.pickle', 'wb') as f: pickle.dump(results.history, f) save_model(model, './results/model.h5') del model
def draw_wave(self, wave): min_ys, max_ys = zip( *map(juxt(np.min, np.max), np.array_split(wave * 128 + 128, 256))) for object_id in self.wave_canvas.find_all(): self.wave_canvas.delete(object_id) for x, min_y, max_y in zip(count(), min_ys, max_ys): self.wave_canvas.create_line(x, min_y, x, max_y)
def ljuxt(*fs): return rcompose(juxt(*fs), list)
def ljuxt(*fs): # Kerasはジェネレーターを引数に取るのを嫌がるみたい、かつ、funcyはPython3だと積極的にジェネレーターを使うみたいなので、リストを返すjuxtを作りました。 return rcompose(juxt(*fs), list)
def get_min_max_coordinate(): coordinates = tuple(mapcat(lambda shape: (shape.bb.left, shape.bb.top, shape.bb.right, shape.bb.bottom), filter(lambda shape: shape.body.body_type == pymunk.Body.DYNAMIC, space.shapes))) return juxt(max, min)(coordinates)
def notnone_fn(*funcs): return lambda val: first(filter(notnone, juxt(*funcs)(val)))
def main(): import os with tf.device("/cpu:0"): (x_train, y_train), (x_validation, y_validation) = load_data() batch_size = 32 epochs = 200 input_shape = Input(shape=x_train.shape[1:]) model_file = './results/model.h5' if os.path.exists(model_file): model = load_model(model_file) # with tf.device("/cpu:0"): # validation_data = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True) else: model = Model(*juxt(identity, computational_graph(y_train.shape[1]))( input_shape)) model.compile(loss='categorical_crossentropy', optimizer=SGD(momentum=0.9), metrics=['accuracy']) with tf.device("/cpu:0"): train_data = ImageDataGenerator(featurewise_center=True, featurewise_std_normalization=True, width_shift_range=0.125, height_shift_range=0.125, horizontal_flip=True) validation_data = ImageDataGenerator( featurewise_center=True, featurewise_std_normalization=True) for data in (train_data, validation_data): data.fit( x_train) # 実用を考えると、x_validationでのfeaturewiseのfitは無理だと思う……。 results = model.fit_generator( train_data.flow(x_train, y_train, batch_size=batch_size), steps_per_epoch=x_train.shape[0] // batch_size, epochs=epochs, callbacks=[ LearningRateScheduler( partial( getitem, tuple( take( epochs, concat(repeat(0.01, 1), repeat(0.1, 99), repeat(0.01, 50), repeat(0.001)))))) ], validation_data=validation_data.flow(x_validation, y_validation, batch_size=batch_size), validation_steps=x_validation.shape[0] // batch_size) with open('./results/history.pickle', 'wb') as f: pickle.dump(results.history, f) save_model(model, model_file) try: with tf.device("/cpu:0"): # model.summary() # print("=== AFTER POPPING THE LAST ===") model.layers.pop() # model.summary() # generate_confusion_matrix(model, x_validation, y_validation, batch_size) # plot_model(model, to_file='./results/model.png') except Exception as ex: print("plot_model failed with error:", repr(ex), "\nMoving on...") siamese(input_shape, model)
def __ljuxt(self, *fs): return rcompose(juxt(*fs), list)