def main(): # tag::e2e_processor[] go_board_rows, go_board_cols = 19, 19 nb_classes = go_board_rows * go_board_cols encoder = SevenPlaneEncoder((go_board_rows, go_board_cols)) processor = GoDataProcessor(encoder=encoder.name()) X, y = processor.load_go_data(num_samples=100) # end::e2e_processor[] # tag::e2e_model[] input_shape = (encoder.num_planes, go_board_rows, go_board_cols) model = Sequential() network_layers = large.layers(input_shape) for layer in network_layers: model.add(layer) model.add(Dense(nb_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.fit(X, y, batch_size=128, epochs=20, verbose=1) # end::e2e_model[] # tag::e2e_agent[] deep_learning_bot = DeepLearningAgent(model, encoder) model_file = h5py.File("../agents/deep_bot.h5", "w") deep_learning_bot.serialize(model_file) model_file.close()
def main(): samp = 1000 epo = 1 # tag::e2e_processor[] timestr = time.strftime("%Y%m%d-%H%M%S") model_h5filename = "./agents/deep_bot_" + timestr + "_s" + str( samp) + "e" + str(epo) + ".h5" go_board_rows, go_board_cols = 19, 19 nb_classes = go_board_rows * go_board_cols encoder = XPlaneEncoder((go_board_rows, go_board_cols)) data_dir = "data/" + str(encoder.num_planes) + "-planes" processor = GoDataProcessor(encoder=encoder.name(), data_directory=data_dir) X, y = processor.load_go_data(num_samples=samp) # end::e2e_processor[] # tag::e2e_model[] input_shape = (encoder.num_planes, go_board_rows, go_board_cols) model = Sequential() network_layers = large.layers(input_shape) for layer in network_layers: model.add(layer) try: with tf.device('/device:GPU:0'): model.add(Dense(nb_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy']) model.fit(X, y, batch_size=128, epochs=epo, verbose=1) # end::e2e_model[] # tag::e2e_agent[] deep_learning_bot = DeepLearningAgent(model, encoder) deep_learning_bot.serialize(h5py.File(model_h5filename, "w")) # end::e2e_agent[] # tag::e2e_load_agent[] model_file = h5py.File(model_h5filename, "r") bot_from_file = load_prediction_agent(model_file) web_app = get_web_app({'predict': bot_from_file}) web_app.run() # end::e2e_load_agent[] except RuntimeError as e: print(e)
go_board_rows, go_board_cols = 19, 19 num_classes = go_board_rows * go_board_cols num_games = 100 encoder = SevenPlaneEncoder((go_board_rows, go_board_cols)) processor = GoDataProcessor(encoder=encoder.name()) if __name__ == '__main__': generator = processor.load_go_data('train', num_games, use_generator=True) test_generator = processor.load_go_data('test', num_games, use_generator=True) input_shape = (encoder.num_planes, go_board_rows, go_board_cols) network_layers = large.layers(input_shape) model = Sequential() for layer in network_layers: model.add(layer) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer='adagrad', metrics=['accuracy']) epochs = 5 batch_size = 128 model.fit_generator( generator=generator.generate(batch_size, num_classes), epochs=epochs, steps_per_epoch=generator.get_num_samples(), # / batch_size, validation_data=test_generator.generate(batch_size, num_classes),
def my_first_network( cont_train=True, num_games=100, num_samples=None, num_samples_test=None, epochs=10, batch_size=128, percent_validation=10, optimizer='adadelta', learning_rate=0.1, patience=5, where_save_model='../checkpoints/small_model_epoch_{epoch:3d}_{val_loss:.3f}_{val_accuracy:.3f}.h5', where_save_bot='../checkpoints/small_deep_bot.h5', pr_kgs='n', seed=1337, name_model='my_small', num_layers=12, first_filter=192, num_filters=64, first_kernel_size=5, other_kernel_size=3): go_board_rows, go_board_cols = 19, 19 num_classes = go_board_rows * go_board_cols model = Sequential() encoder = AlphaGoEncoder((go_board_rows, go_board_cols), use_player_plane=True) processor = GoDataProcessor(encoder=encoder.name(), data_directory='data') if pr_kgs == 'y': # Only forming train and test data into data directory generator = processor.load_go_data('train', num_games, use_generator=True, seed=seed) test_generator = processor.load_go_data('test', num_games, use_generator=True, seed=seed) return else: generator = processor.load_go_data('train', num_games, use_generator=True, seed=0) test_generator = processor.load_go_data('test', num_games, use_generator=True, seed=0) input_shape = (encoder.num_planes, go_board_rows, go_board_cols) if network_alphago != 'y': network_layers = large.layers(input_shape) train_log = 'training_' + name_model + '_' + str( num_games) + '_epochs_' + str(epochs) + '_' + optimizer + '.csv' csv_logger = CSVLogger(train_log, append=True, separator=';') lrate = LearningRateScheduler(step_decay) if patience > 2: r_patience = patience - 1 else: r_patience = patience Reduce = ReduceLROnPlateau(monitor='val_accuracy', factor=0.1, patience=r_patience, verbose=1, mode='auto', min_delta=0.00001, cooldown=0, min_lr=0) # tensor_board_log_dir = '/home/nail/CODE_GO/checkpoints/my_log_dir' # tensorboard = TensorBoard(log_dir=tensor_board_log_dir, histogram_freq=1, embeddings_freq=1, write_graph=True) if optimizer == 'adagrad': callback_list = [ ModelCheckpoint(where_save_model, monitor='val_accuracy', save_best_only=True), EarlyStopping(monitor='val_accuracy', mode='auto', verbose=verb, patience=patience, min_delta=0, restore_best_weights=True), csv_logger, Reduce ] elif optimizer == 'SGD': callback_list = [ ModelCheckpoint(where_save_model, monitor='val_accuracy', save_best_only=True), EarlyStopping(monitor='val_accuracy', mode='auto', verbose=verb, patience=patience, min_delta=0, restore_best_weights=True), csv_logger, Reduce, lrate ] else: callback_list = [ ModelCheckpoint(where_save_model, monitor='val_accuracy', save_best_only=True), EarlyStopping(monitor='val_accuracy', mode='auto', verbose=verb, patience=patience, min_delta=0, restore_best_weights=True), csv_logger, Reduce, ] if cont_train is False: # Обучение с самого начала с случайных весов if network_alphago != 'y': # Not AlphaGo network for layer in network_layers: model.add(layer) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model.summary() else: # AlphaGo network model = alphago_predict.alphago_model( input_shape=input_shape, first_filter=first_filter, num_filters=num_filters, num_layers=num_layers, first_kernel_size=first_kernel_size, other_kernel_size=other_kernel_size) model.add(Dense(num_classes, activation='softmax')) model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=['accuracy']) model.summary() # if num_samples == None: history = model.fit_generator( generator=generator.generate(batch_size, num_classes), epochs=epochs, steps_per_epoch=generator.get_num_samples() / batch_size, validation_data=test_generator.generate(batch_size, num_classes), validation_steps=test_generator.get_num_samples() / batch_size, verbose=verb, callbacks=callback_list) # else: # step_per_ep_train = int(num_samples / batch_size) # valid_steps = int(percent_validation/100 * num_samples_test / batch_size) # 10% от всех тестовых данных # history = model.fit_generator( # generator=generator.generate(batch_size, num_classes), # epochs=epochs, # steps_per_epoch=step_per_ep_train, # validation_data=test_generator.generate( # batch_size, num_classes), # validation_steps=valid_steps, # verbose=verb, # callbacks=callback_list # ) if cont_train is True: # Обучение используя уже предобученную модель, продолжение обучения. model = load_model('../checkpoints/model_continue.h5') if optimizer == 'SGD': opt = SGD(learning_rate=learning_rate) if optimizer == 'adagrad': opt = Adagrad(learning_rate=learning_rate) if optimizer == 'adadelta': opt = Adadelta(learning_rate=learning_rate) model.compile(loss='categorical_crossentropy', optimizer=opt, metrics=['accuracy']) # if num_samples == None: history = model.fit_generator( generator=generator.generate(batch_size, num_classes), epochs=epochs, steps_per_epoch=generator.get_num_samples() / batch_size, validation_data=test_generator.generate(batch_size, num_classes), validation_steps=test_generator.get_num_samples() / batch_size, verbose=verb, callbacks=callback_list) # else: # step_per_ep_train = int(num_samples / batch_size) # valid_steps = int(percent_validation/100 * num_samples_test / batch_size) # 10% от всех тестовых данных # history = model.fit_generator( # generator=generator.generate(batch_size, num_classes), # epochs=epochs, # steps_per_epoch=step_per_ep_train, # validation_data=test_generator.generate( # batch_size, num_classes), # validation_steps=valid_steps, # verbose=verb, # callbacks=callback_list # ) # if num_samples_test == None: # score = model.evaluate_generator( # generator=test_generator.generate(batch_size, num_classes), # steps=test_generator.get_num_samples() / batch_size) # else: score = model.evaluate_generator(generator=test_generator.generate( batch_size, num_classes), steps=num_samples_test / batch_size) bot_save(model, encoder, where_save_bot) # Отрисовка графика результата print("Test score: ", score[0]) print("Test accuracy: ", score[1]) # В конце вывести архитектуру модели. model.summary() plt.subplot(211) plt.title("Accuracy") plt.plot(history.history["accuracy"], color="g", label="Train") plt.plot(history.history["val_accuracy"], color="b", label="Validation") plt.legend(loc="best") plt.subplot(212) plt.title("Loss") plt.plot(history.history["loss"], color="g", label="Train") plt.plot(history.history["val_loss"], color="b", label="Validation") plt.legend(loc="best") plt.tight_layout() plt.show()