def __init__(self, db, size, height, apphash): self.db = db self.size = size self.height = height self.apphash = apphash # TODO pass the keras model also as init parameter self.keras_model = KerasModel()
def predict(wavfile, modelfile): spl, wav = readWav(wavfile) wav = wav.reshape([1, -1, 1, 1]) modelA = KerasModel() modelA.load_weights(modelfile) result = modelA.predict(wav, 1) return numpy.argmax(result, 1)[0]
def train(sp=-1): model = KerasModel() train = DataGenerator('train') test = DataGenerator('test') if sp != -1: chkp = '.' + os.sep + 'models' + os.sep + 'save_' + str(sp) + '.h5' model.load_weights(chkp) print('start point: %d'%sp) print("开始训练:") model.fit_generator( generator=train, #steps_per_epoch:在声明一个epoch完成并开始下一个epoch之前从generator产生的总步数,它通常应该等于你的数据集的样本数量除以批量大小,对于Sequence,它是可选的,如果未指定,将使用len(generator)作为步数 #按理说是按照上面的标准进行的,但是因为我们数据集较少,所以我们使用更多的数量跑 #在测试的时候我运用的是14是完全按照理论做的 #如果现在的次数不能够正常运行请重新改回14并增加时代 steps_per_epoch=3000, #epochs 确定世代的次数 epochs=epochs_time, verbose=1, #validation_steps是步长 validation_steps=100 #想要了解更多fit_generator的参数:https://blog.csdn.net/qq_32951799/article/details/82918098 ) #每个世代的模型都做保存 model.save_weights('.' + os.sep + 'models' + os.sep + 'save_' +str(epochs_time)+ '.h5' )
def __init__(self, name): if "-" in name: self.name = name.split("-")[0] else: self.name = name current_directory = os.path.dirname(os.path.abspath(__file__)) model_name = 'weights-{}.hdf5'.format(self.name.lower()) model_location = os.path.join(current_directory, model_name) self._kmodel = KerasModel(name=self.name.lower(), load_weights=True, location=model_location) random_input_data = np.random.rand(PLANET_MAX_NUM, PER_PLANET_FEATURES) predictions = self._kmodel.predict(random_input_data) self._turns = 0 assert len(predictions) == PLANET_MAX_NUM
def __init__(self): self.id = args.id self.optimizer = tf.keras.optimizers.Adam() self.loss = tf.keras.losses.SparseCategoricalCrossentropy() self.loss_metrics = tf.keras.metrics.Mean(name='loss') self.acc_metrics = tf.keras.metrics.SparseCategoricalAccuracy( name='acc') # Generate the Keras Model dummy_data = load_dummy(args.datasetname) self.model = KerasModel() self.model(dummy_data) self.model.load_weights(args.weights_file) self.dataset = load_dataset(directory=args.datasetname, client_id=self.id, batch_size=args.batch, epochs=args.epochs) self.datagen = iter(self.dataset) self.acc_gradient = None
def __init__(self): self.optimizer = tf.keras.optimizers.Adam() self.loss = tf.keras.losses.SparseCategoricalCrossentropy() self.loss_metrics = tf.keras.metrics.Mean(name='loss') self.acc_metrics = tf.keras.metrics.SparseCategoricalAccuracy( name='acc') log_dir = "logs/{}".format(args.name) self.summary_writer = tf.summary.create_file_writer(logdir=log_dir) self.summary_writer.set_as_default() weights_dir_path = os.path.join("temp", args.name) os.makedirs(weights_dir_path, exist_ok=True) # Generate the Keras Model dummy_data = load_dummy(args.datasetname) self.model = KerasModel() self.model(dummy_data) self.current_iteration = 1 self.total_iterations = args.iterations self.client_count = args.total self.clients_per_round = args.clients self.client_history = list() self.test_data = load_test_dataset("cifar/test_data.h5")
def train(sp=-1): model = KerasModel() wave_file_list = [ item for item in os.listdir('./data/train') if item[-3:] == 'wav' ] wave_train_data = list(map(preprocess, wave_file_list)) wave_train_data = np.array(wave_train_data, dtype='float32') wave_train_data.resize( (wave_train_data.shape[0], wave_train_data.shape[1], 1, 1)) wave_train_label = list(map(generate_label, wave_file_list)) wave_train_label = np.array(wave_train_label, dtype='int8') cb = MyCallback() callbacks = [ cb, EarlyStopping(monitor='val_acc', patience=4, verbose=1, mode='auto') ] model.fit(wave_train_data, wave_train_label, batch_size=64, epochs=50, verbose=1, validation_split=0.1, callbacks=callbacks)
def train(sp=-1): model = KerasModel() train = DataGenerator('train') test = DataGenerator('test') if sp != -1: chkp = '.' + os.sep + 'models' + os.sep + 'save_' + str(sp) + '.h5' model.load_weights(chkp) print('start point: %d' % sp) for i in range(sp + 1, 100): model.fit_generator(generator=train, samples_per_epoch=3000, nb_epoch=1, validation_data=test, nb_val_samples=100) model.save_weights('.' + os.sep + 'models' + os.sep + 'save_' + str(i) + '.h5')
from model import KerasModel from tensorflow.python.framework import graph_io from tensorflow.python.training import saver from keras import backend as K import os import sys model = KerasModel() if len(sys.argv) < 1: sp = 64 else: sp = int(sys.argv[1]) chkp = '.' + os.sep + 'models' + os.sep + 'save_' + str(sp) + '.h5' sess = K.get_session() model.load_weights(chkp) ckpt_path = saver.Saver().save(sess, "models/model.ckpt") graph_io.write_graph(sess.graph, './models', 'model.pb') command = '''python -m tensorflow.python.tools.freeze_graph \ --input_graph=%s \ --input_checkpoint=%s \ --output_graph=%s \ --output_node_names=%s \ ''' % (os.path.join('.', 'models', 'model.pb'), os.path.join('.', 'models', 'model.ckpt'), os.path.join('.', 'asrModel.pb'), '"dense_2/Softmax"') os.system(command)
class Bot: def __init__(self, name): if "-" in name: self.name = name.split("-")[0] else: self.name = name current_directory = os.path.dirname(os.path.abspath(__file__)) model_name = 'weights-{}.hdf5'.format(self.name.lower()) model_location = os.path.join(current_directory, model_name) self._kmodel = KerasModel(name=self.name.lower(), load_weights=True, location=model_location) random_input_data = np.random.rand(PLANET_MAX_NUM, PER_PLANET_FEATURES) predictions = self._kmodel.predict(random_input_data) self._turns = 0 assert len(predictions) == PLANET_MAX_NUM def play(self): game = hlt.Game(self.name) turns = 0 while True: turns += 1 start = time.time() game_map = game.update_map() features = self.produce_features(game_map) predictions = self._kmodel.predict(features) assignments = self.produce_ships_to_planets_assignment( game_map, predictions) cmd_queue = self.play_game(game_map, assignments, turns, 0, [], start, training=False) # cmd_queue = self.play_game(game_map, turns, 0, [], start, training=False) game.send_command_queue(cmd_queue) def produce_features(self, game_map): """ For each planet produce a set of features that we will feed to the neural net. We always return an array with PLANET_MAX_NUM rows - if planet is not present in the game, we set all featurse to 0. :param game_map: game map :return: 2-D array where i-th row represents set of features of the i-th planet """ feature_matrix = [[0 for _ in range(PER_PLANET_FEATURES)] for _ in range(PLANET_MAX_NUM)] all_planets = (p for p in game_map.all_planets()) for planet in all_planets: # Compute "ownership" feature - 0 if planet is not occupied, 1 if occupied by us, -1 if occupied by enemy. if planet.owner == game_map.get_me(): ownership = 1 elif planet.owner is None: ownership = 0 else: # owned by enemy ownership = -1 my_best_distance = 10000 enemy_best_distance = 10000 gravity = 0 health_weighted_ship_distance = 0 sum_of_health = 0 all_players = (p for p in game_map.all_players()) me = game_map.get_me() for player in all_players: for ship in player.all_ships(): d = ship.calculate_distance_between(planet) if player == me: my_best_distance = min(my_best_distance, d) sum_of_health += ship.health health_weighted_ship_distance += d * ship.health gravity += ship.health / (d * d) else: enemy_best_distance = min(enemy_best_distance, d) gravity -= ship.health / (d * d) distance_from_center = distance(planet.x, planet.y, game_map.width / 2, game_map.height / 2) health_weighted_ship_distance = health_weighted_ship_distance / sum_of_health remaining_docking_spots = planet.num_docking_spots - len( planet.all_docked_ships()) signed_current_production = planet.current_production * ownership is_active = remaining_docking_spots > 0 or ownership != 1 feature_matrix[planet.id] = [ planet.health, remaining_docking_spots, planet.remaining_resources, signed_current_production, gravity, my_best_distance, enemy_best_distance, ownership, distance_from_center, health_weighted_ship_distance, is_active ] return feature_matrix def produce_ships_to_planets_assignment(self, game_map, predictions): """ Given the predictions from the neural net, create assignment (undocked ship -> planet) deciding which planet each ship should go to. Note that we already know how many ships is going to each planet (from the neural net), we just don't know which ones. :param game_map: game map :param predictions: probability distribution describing where the ships should be sent :return: list of pairs (ship, planet) """ undocked_ships = list( filter( lambda ship: ship.docking_status == ship.DockingStatus. UNDOCKED, game_map.get_me().all_ships())) # largest_planet = max(planet.radius for planet in game_map.all_planets()) # greedy assignment assignment = [] number_of_ships_to_assign = len(undocked_ships) if number_of_ships_to_assign == 0: return assignment planet_heap = [] ship_heaps = [[] for _ in range(PLANET_MAX_NUM)] # Create heaps for greedy ship assignment. all_planets = (p for p in game_map.all_planets()) me = game_map.get_me() for planet in all_planets: # We insert negative number of ships as a key, since we want max heap here. heapq.heappush(planet_heap, (-predictions[planet.id] * number_of_ships_to_assign, planet.id)) h = [] for ship in undocked_ships: d = ship.calculate_distance_between(planet) heapq.heappush(h, (d, ship.id)) ship_heaps[planet.id] = h # Create greedy assignment already_assigned_ships = set() while number_of_ships_to_assign > len(already_assigned_ships): # Remove the best planet from the heap and put it back in with adjustment. # (Account for the fact the distribution values are stored as negative numbers on the heap.) ships_to_send, best_planet_id = heapq.heappop(planet_heap) ships_to_send = -(-ships_to_send - 1) heapq.heappush(planet_heap, (ships_to_send, best_planet_id)) # Find the closest unused ship to the best planet. _, best_ship_id = heapq.heappop(ship_heaps[best_planet_id]) while best_ship_id in already_assigned_ships: _, best_ship_id = heapq.heappop(ship_heaps[best_planet_id]) # Assign the best ship to the best planet. assignment.append((me.get_ship(best_ship_id), game_map.get_planet(best_planet_id))) already_assigned_ships.add(best_ship_id) return assignment def navigate(self, game_map, start_of_round, ship, destination, speed): """ Send a ship to its destination. Because "navigate" method in Halite API is expensive, we use that method only if we haven't used too much time yet. :param game_map: game map :param start_of_round: time (in seconds) between the Epoch and the start of this round :param ship: ship we want to send :param destination: destination to which we want to send the ship to :param speed: speed with which we would like to send the ship to its destination :return: """ current_time = time.time() have_time = current_time - start_of_round < 1.2 navigate_command = None if have_time: navigate_command = ship.navigate(destination, game_map, speed=speed, max_corrections=100, angular_step=2) if navigate_command is None: # ship.navigate may return None if it cannot find a path. In such a case we just thrust. dist = ship.calculate_distance_between(destination) speed = speed if (dist >= speed) else dist navigate_command = ship.thrust( speed, ship.calculate_angle_between(destination)) return navigate_command def check_enemies(self, ship, others, game_map): me = game_map.get_me() danger = [s for s in others if ship.calculate_distance_between(s) < 15] return danger def handle_defense(self, ship, others, game_map, cmd_q, planet=None): # others = list from check_enemies me = game_map.get_me() eothers = others if planet: mi = eothers.index( min(eothers, key=lambda s: distance(s.x, s.y, planet.x, planet.y))) else: mi = eothers.index( min(eothers, key=lambda s: distance(s.x, s.y, ship.x, ship.y))) t = eothers[mi] speed = 7 status = ship.docking_status.value if status == UNDOCKED: cmd_q.append( ship.navigate(ship.closest_point_to(t, min_distance=1), game_map, speed)) elif status == UNDOCKING or DOCKING: cmd_q.append("") elif status == DOCKED: nearby = others if len(nearby) > 0 and len(ship.planet.all_docked_ships()) > 1: cmd_q.append(ship.undock()) else: cmd_q.append("") def handle_dock(self, ship, planet, game_map, command_queue): command_queue.append(ship.dock(planet)) def handle_out_of_range(self, ship, planet, game_map, command_queue, angle, dist=None): if dist is None: dist = ship.calculate_distance_between(planet) point = ship.closest_point_to(planet, min_distance=1) command_queue.append(ship.navigate(point, game_map, 7)) def play_game(self, game_map, assignments, turns, i, command_queue, start, training=False, graph=False): my_ships = {} taken_dmg = [] for ship in game_map.get_me().all_ships(): my_ships[int(ship.x), int(ship.y)] = ship if ship.health < 255: taken_dmg.append(ship.id) me = game_map.get_me() attack_mode = False speed = hlt.constants.MAX_SPEED # Enable Attack Mode based on ownership if turns > 10: my_planets = [p for p in game_map.all_planets() if p.owner == me] full_planets = [] for planet in my_planets: if planet.docking_spots == 0: full_planets.append(planet) num_p = len(game_map.all_planets()) # 10 - 5 = 5 / 10 = .5 * 100 = 50 % # Check what % of planets are mine perc_p = ((num_p - len(my_planets)) / num_p) * 100 if len(full_planets) == len(my_planets) and perc_p > 50: # All my planets have been filled attack_mode = True # for (x, y), this_ship in my_ships.items(): aothers = [s for s in game_map._all_ships() if s.owner != me] for ship, planet in assignments: others = [s for s in aothers if s.id != ship.id] # Rush Defense if ship.id in taken_dmg: danger = self.check_enemies(ship, others, game_map) if len(danger) > 0: self.handle_defense(ship, danger, game_map, command_queue) continue # Check if docking/undocking if ship.docking_status.value == DOCKING or ship.docking_status.value == UNDOCKING: command_queue.append("") continue # Ship already docked elif ship.docking_status.value == DOCKED: danger = self.check_enemies(ship, others, game_map) if len(danger) > 0: self.handle_defense(ship, danger, game_map, command_queue) continue else: command_queue.append("") continue is_planet_friendly = not planet.is_owned() or planet.owner == me # Unowned or Mine if is_planet_friendly: # In range to dock and has space if ship.can_dock(planet) and not planet.is_full(): self.handle_dock(ship, planet, game_map, command_queue) continue # Not in range or In Range and no space else: danger = self.check_enemies(ship, others, game_map) if len(danger) > 0: command_queue.append( self.handle_defense(ship, others, game_map, command_queue, planet)) else: command_queue.append( self.navigate(game_map, start, ship, ship.closest_point_to(planet), speed)) continue # Enemy else: # Check for enemies danger = self.check_enemies(ship, others, game_map) # Enemy planet attack mode or nearby enemies if attack_mode or len(danger) > 0: weakest = planet.weakest_ship() command_queue.append( ship.navigate( ship.closest_point_to(weakest, min_distance=1), game_map, 7)) continue else: angle = ship.calculate_angle_between(planet) dist = ship.calculate_distance_between(planet) if dist < 3: weakest = planet.weakest_ship() command_queue.append( ship.navigate( ship.closest_point_to(weakest, min_distacne=1), game_map, 7)) continue else: self.handle_out_of_range(ship, planet, game_map, command_queue, angle, dist=dist) continue q = [c for c in command_queue if c is not None] return q
def main(): sess = tf.Session() with sess.as_default(): parser = argparse.ArgumentParser(description="Halite 2 ML Training") parser.add_argument("--model_name", help="Name of the model", default="keras-model.h5") parser.add_argument("--minibatch_size", help="Size of the minibatch", default=100, type=int) parser.add_argument("--steps", help="Number of steps", default=1000, type=int) parser.add_argument("--games_limit", help="Number of games", default=1000, type=int) parser.add_argument("--data", help="Location of Replays", default="data/sample/") parser.add_argument("--cache", help="Model to Load", default=None) parser.add_argument("--load_data", help="Load Features from file", default=True, type=bool) parser.add_argument("--pack", help="Which replay pack to use", default="all") parser.add_argument("--load_weights", help="Load weights", default=False, type=bool) parser.add_argument("--lr", help="Learning Rate", default=1e-3, type=float) parser.add_argument("--evo", help="use Genetic evolution", default=False, type=bool) args = parser.parse_args() if not args.load_data: if args.seed: np.random.seed(args.seed) if args.data.endswith('.zip'): raw_data = fetch_data_zip(args.data, args.games_limit) else: raw_data = fetch_data_dir(args.data, args.games_limit) data_input, data_output = parse(raw_data, None, args.dump_features_location) else: data_input, data_output = load_data(pack=args.pack) data_size = len(data_input) training_input, training_output = data_input, data_output training_data_size = len(training_input) # randomly permute the data permutation = np.random.permutation(training_data_size) training_input, training_output = training_input[permutation], training_output[permutation] if not args.evo: kmodel = KerasModel(args.model_name, args.load_weights, training=True, batch_size=args.minibatch_size, lr=args.lr) model = kmodel.model model.summary() eval_input = kmodel.normalize_input(training_input) for i in range(10): preds = kmodel.predict(training_input[i]) print("Pred {}".format(preds)) count = 0 true_count = 0 for i, v in enumerate(preds): count += 1 as_perc = round(v, 3)*100 t_as_perc = round(training_output[0][i], 3)*100 if as_perc == t_as_perc: true_count += 1 print("{0:.2f} vs {1:.2f} | {2}".format(as_perc, t_as_perc, as_perc == t_as_perc)) print("{0}/{1} = {2:.2f}%".format(true_count, count, true_count/count*100)) score = model.evaluate(eval_input, training_output, verbose=1) print("\nInitial: loss: {0:.2f}, acc: {1:.2f}%".format(score[0], score[1] * 100)) print("Metrics: {}".format(model.metrics_names)) history = kmodel.fit(training_input, training_output, batch_size=args.minibatch_size, epochs=args.steps) current_directory = os.path.dirname(os.path.abspath(__file__)) model_path = os.path.join(current_directory, os.path.pardir, "models/") kmodel.save(model_path) summary(history, model_path) else: max_conv_layers = 2 max_dense_layers = 4 max_conv_kernels = 128 max_dense_nodes = 512 input_shape = data_input.shape[1:] num_classes = 28 genome_handler = BotGenomeHandler(max_conv_layers, max_dense_layers, max_conv_kernels, max_dense_nodes, input_shape, num_classes) num_generations = 20 population_size = 30 num_epochs = 1 devol = DEvol(genome_handler) perc = int(training_data_size * .8) x_train, x_test = training_input[perc:], training_input[:perc] y_train, y_test = training_output[perc:], training_output[:perc] dataset = ((x_train, y_train), (x_test, y_test)) model, accuracy, loss = devol.run(dataset, num_generations, population_size, num_epochs) model.summary() print("Accuracy: {}\tLoss: {}".format(accuracy, loss))
import io from model import KerasModel import json # initialize our Flask application and the Keras model app = flask.Flask(__name__) model = None @app.route("/predict", methods=["POST"]) def predict(): # initialize the data dictionary that will be returned from the # view data = {"value": None} if flask.request.method == "POST": tx = flask.request.json input_from_transaction = json.loads(tx)['input'] output = model.get_model_output(input_from_transaction) data["value"] = int(output) return flask.jsonify(data) # if this is the main thread of execution first load the model and # then start the server if __name__ == "__main__": print(("* Loading Keras model and Flask starting server..." "please wait until server has fully started")) model = KerasModel() app.run()
class Server(): def __init__(self): self.optimizer = tf.keras.optimizers.Adam() self.loss = tf.keras.losses.SparseCategoricalCrossentropy() self.loss_metrics = tf.keras.metrics.Mean(name='loss') self.acc_metrics = tf.keras.metrics.SparseCategoricalAccuracy( name='acc') log_dir = "logs/{}".format(args.name) self.summary_writer = tf.summary.create_file_writer(logdir=log_dir) self.summary_writer.set_as_default() weights_dir_path = os.path.join("temp", args.name) os.makedirs(weights_dir_path, exist_ok=True) # Generate the Keras Model dummy_data = load_dummy(args.datasetname) self.model = KerasModel() self.model(dummy_data) self.current_iteration = 1 self.total_iterations = args.iterations self.client_count = args.total self.clients_per_round = args.clients self.client_history = list() self.test_data = load_test_dataset("cifar/test_data.h5") def iterate(self, iteration: int): weights_file_path = os.path.join("temp", args.name, "weights_server.h5") # Output weights self.model.save_weights(weights_file_path) # Choose clients chosen_clients = sample(range(self.client_count), self.clients_per_round) # Inform simulator to train clients message = { "type": "train", "clients": chosen_clients, "weights_file_path": weights_file_path, "step": iteration } print(json.dumps(message), flush=True) # Get successful clients response = json.loads(self.readline())["ids"] # print(json.dumps({ # "type": "update", # "message": "Received data from clients {}".format(str(response)), # "step": iteration # }), flush=True) self.client_history.append(response) # Apply client updates self.update_weights(iteration, response) self.test(iteration) def update_weights(self, iteration: int, clients: [int]): gradients = load_gradients(args.name, iteration, clients) # print(json.dumps({ # "type": "log", # "message": str(gradients[0].shape), # "step": iteration # }), flush=True) self.optimizer.apply_gradients( zip(gradients, self.model.trainable_variables)) def test(self, iteration: int): for batch in iter(self.test_data): test_predictions = self.model(batch["x"]) loss = self.loss(batch["y"], test_predictions) self.loss_metrics(loss) self.acc_metrics(batch["y"], test_predictions) average_loss = self.loss_metrics.result() average_acc = self.acc_metrics.result() print(json.dumps({ "type": "update", "message": ("Test Loss: {:.5f}, Test Accuracy: {:.3f}%".format( average_loss, average_acc * 100)), "step": iteration }), flush=True) tf.summary.scalar("loss", average_loss, step=iteration) tf.summary.scalar("accuracy", average_acc, step=iteration) self.loss_metrics.reset_states() self.acc_metrics.reset_states() def listen(self): command = self.readline() while (command != "exit"): if command == "start": self.train() command = self.readline() sys.exit(0) def readline(self): return sys.stdin.readline().strip() def train(self): for i in range(1, self.total_iterations + 1): self.iterate(i) self.current_iteration += 1
help='whether add another matrix for modeling time decay') parser.add_argument('--shuffle', dest='shuffle', type=bool, default=True, help='whether shuffle the data') parser.add_argument('--set_batch', dest='set_batch', type=bool, default=False, help='whether set the batch size') parser.add_argument( '--tag_format', dest='tag_format', type=str, default='conlleval', help= 'defined tag format conlleval/normal (defaul is for conlleval usage, normal one outputs a tag sequence for one sentence per line) ' ) parser.add_argument( '--e2e', dest='e2e_flag', type=bool, default=False, help='whether only using the last turn (end-to-end training)') args = parser.parse_args() argparams = vars(args) KerasModel(argparams).run()
class Client(): def __init__(self): self.id = args.id self.optimizer = tf.keras.optimizers.Adam() self.loss = tf.keras.losses.SparseCategoricalCrossentropy() self.loss_metrics = tf.keras.metrics.Mean(name='loss') self.acc_metrics = tf.keras.metrics.SparseCategoricalAccuracy( name='acc') # Generate the Keras Model dummy_data = load_dummy(args.datasetname) self.model = KerasModel() self.model(dummy_data) self.model.load_weights(args.weights_file) self.dataset = load_dataset(directory=args.datasetname, client_id=self.id, batch_size=args.batch, epochs=args.epochs) self.datagen = iter(self.dataset) self.acc_gradient = None def iterate(self): # Iterate through all batches for batch in self.datagen: self.train_step(batch) print("Client {} results: Loss - {:.5f}, Acc - {:.3f}%".format( self.id, self.loss_metrics.result(), self.acc_metrics.result() * 100)) sys.stdout.flush() self.save_gradients() sys.stdout.write(json.dumps({"id": self.id})) sys.exit(0) def train_step(self, batch): # Calculate outcome for one batch with tf.GradientTape() as tape: predictions = self.model(batch["x"], training=True) loss = self.loss(batch["y"], predictions) grads = tape.gradient(loss, self.model.trainable_variables) # Accumulate gradients self.accumulate_gradients(grads) # Apply gradients to model self.optimizer.apply_gradients( zip(grads, self.model.trainable_variables)) self.loss_metrics(loss) self.acc_metrics(batch["y"], predictions) def accumulate_gradients(self, gradient: list): if self.acc_gradient is None: self.acc_gradient = gradient else: self.acc_gradient = [ tf.add(old_grad, new_grad) for old_grad, new_grad in zip(self.acc_gradient, gradient) ] def save_gradients(self): gradient_path = os.path.join("temp", args.name, "gradient_client_{}.h5".format(self.id)) gradient_np = [value.numpy() for value in self.acc_gradient] with open(gradient_path, "wb") as gradient_file: pickle.dump(gradient_np, gradient_file)
'ner': ('O', 'I-LOC', 'B-PER', 'I-PER', 'I-ORG', 'I-MISC', 'B-MISC', 'B-LOC', 'B-ORG'), } tagset = eval_dataset.generate_y( tag_type=args.predict_tag, tagset=('<PAD>', ) + default_tagsets[args.predict_tag] if args.use_default_tagset else None, to_categorical=True) train_dataset.generate_y(tag_type=args.predict_tag, tagset=tagset, to_categorical=True) # TODO: put original pytorch model on top to recreate original performance logger.info('Build model...') model_keras = KerasModel(n_classes=len(tagset), input_dims=bert_output_shape[-1], lr=args.lr, top_rnns=args.top_rnns) logger.info('Train with batch_size=%i...' % args.batch_size) train_metrics = model_keras.fit( train_dataset, batch_size=args.batch_size, n_epochs=args.n_epochs, eval_dataset=eval_dataset, ) for _data, _metrics in train_metrics.items(): logger.info(format_metrics(metrics=_metrics, prefix=_data)) final_metrics = counts_to_metrics(**_metrics) logger.info(format_metrics(metrics=final_metrics, prefix=_data))
class State(object): """ Talks directly to cold storage and the merkle only """ def __init__(self, db, size, height, apphash): self.db = db self.size = size self.height = height self.apphash = apphash # TODO pass the keras model also as init parameter self.keras_model = KerasModel() @classmethod def load_state(cls, dbfile=None): """ Create or load State. returns: State """ if not dbfile: return (cls(MemoryDB(), 0, 0, BLANK_ROOT_HASH)) def save(self): # Save to storage meta = StateMetaData(self.size, self.height, self.apphash) serial = rlp.encode(meta, sedes=StateMetaData) self.db.set(STATE_KEY, serial) return self.apphash def get_model_output(self, tx): """function takes transaction as the input returns the output of the model """ input_from_transaction = json.loads(tx)['input'] model_from_transcation = json.loads(tx)['model'] try: return self.keras_model.get_model_output(input_from_transaction, model_from_transcation) except Exception as e: logger.warning('Error while using Keras model (%s): %s', type(e).__name__, e) return None def get_transaction_hash(self, tx, mode): """function takes raw hex tx as input returns the hash of the transactiaon of the input field """ logger.debug(tx) logger.debug(mode) if (mode == method_query): input_from_transaction = json.loads(tx)['input'] input_from_transaction_as_np = np.asarray(input_from_transaction) hash_of_transaction = calculate_hash(input_from_transaction_as_np) return hash_of_transaction elif (mode == method_upload): model_name_from_transaction = json.loads(tx)['name'] model_hash_from_transaction = json.loads(tx)['model'] model_url_from_transaction = json.loads(tx)['url'] return model_name_from_transaction, model_hash_from_transaction, model_url_from_transaction else: raise ValueError