def main(self): logger.info('Run evaluation') model = Model.load_model(path=self.model_path) model.load_weights(self.weights_path) loss, acc = model.eval(chunk_dirs=self.chunk_dirs) self.eval_results = {'loss': loss, 'acc': acc}
def train_remote(train_ipfs, test_ipfs, args): logger.info('Start remote train') producer = load_producer() logger.info('Generate initial model weights_ipfs') model = Model.load_model(path=args.path) initial_weights_path = '/tmp/tatau_initial_weights' model.save_weights(initial_weights_path) ipfs = IPFS() logger.info('Upload weights_ipfs to IPFS') initial_weights_file = ipfs.add_file(initial_weights_path) os.unlink(initial_weights_path) dataset_name = os.path.basename(args.name) dataset = Dataset.create( db=producer.db, encryption=producer.encryption, name=dataset_name, train_dir_ipfs=train_ipfs, test_dir_ipfs=test_ipfs ) # logger.info('Dataset created: {}'.format(dataset)) logger.info('Create model') train_model = TrainModel.upload_and_create( name=args.name, code_path=args.path, db=producer.db, encryption=producer.encryption ) logger.debug('Model created: {}'.format(train_model)) logger.info('Create train job') task = TaskDeclaration.create( producer_id=producer.asset_id, dataset_id=dataset.asset_id, train_model_id=train_model.asset_id, workers_needed=args.workers, verifiers_needed=args.verifiers, batch_size=args.batch, epochs=args.epochs, weights_ipfs=initial_weights_file.multihash, db=producer.db, encryption=producer.encryption, epochs_in_iteration=args.epochs_in_iteration ) logger.debug('Train job created: {}'.format(task))
def train(train_dir, test_dir, model_path, batch_size, epochs): model = Model.load_model(path=model_path) class LocalProgress(TrainProgress): def progress_callback(self, progress): logger.info("Progress: {:.2f}".format(progress)) model.train(chunk_dirs=[x[0] for x in os.walk(train_dir)][1:], batch_size=batch_size, nb_epochs=epochs, current_iteration=1, train_progress=LocalProgress())
def main(self): logger.info('Run Summarizer') results_list = self.results_list model = Model.load_model(self.model_path) summarizer = model.get_weights_summarizer() serializer = model.get_weights_serializer() for weights_path in results_list: weights = serializer.load(weights_path) summarizer.update(weights=weights) weights = summarizer.commit() summarized_weights_path = os.path.join(self.base_dir, 'summarized_weights') serializer.save(path=summarized_weights_path, weights=weights) self.summarized_weights_path = summarized_weights_path
def main(self): logger.info('Start estimation') batch_size = int(sys.argv[2]) nb_epochs = int(sys.argv[3]) current_iteration = int(sys.argv[4]) model = Model.load_model(path=self.model_path) init_weights_path = self.init_weights_path if init_weights_path is not None: model.load_weights(init_weights_path) else: logger.info('Initial weights are not set') progress = TrainProgress() model.train( chunk_dirs=[self.train_chunk_dir], batch_size=batch_size, nb_epochs=nb_epochs, current_iteration=current_iteration, train_progress=progress )
def main(self): logger.info('Start training') batch_size = int(sys.argv[2]) nb_epochs = int(sys.argv[3]) current_iteration = int(sys.argv[4]) model = Model.load_model(path=self.model_path) init_weights_path = self.init_weights_path if init_weights_path is not None: model.load_weights(init_weights_path) else: logger.info('Initial weights are not set') progress = TrainProgress() train_history = model.train(chunk_dirs=self.chunk_dirs, batch_size=batch_size, nb_epochs=nb_epochs, train_progress=progress, current_iteration=current_iteration) train_weights_path = os.path.join(self.base_dir, 'result_weights') model.save_weights(train_weights_path) self.train_weights_path = train_weights_path self.train_history = train_history
def train_local(train_dir, test_dir, model_path, batch_size, epochs): model = Model.load_model(path=model_path) class LocalProgress(TrainProgress): def progress_callback(self, progress): logger.info("Progress: {:.2f}".format(progress)) train_chunks = [ os.path.join(train_dir, chunk_dir) for chunk_dir in os.listdir(train_dir) if os.path.isdir(os.path.join(train_dir, chunk_dir)) ] test_chunks = [ os.path.join(test_dir, chunk_dir) for chunk_dir in os.listdir(test_dir) if os.path.isdir(os.path.join(test_dir, chunk_dir)) ] model.train( chunk_dirs=train_chunks, batch_size=batch_size, current_iteration=1, nb_epochs=epochs, train_progress=LocalProgress()) loss, acc = model.eval(chunk_dirs=test_chunks) print('loss({}):{}, acc({}):{}'.format(loss.__class__.__name__, loss, acc.__class__.__name__, acc))