def main(argv): parser = argparse.ArgumentParser(description='TODO') parser.add_argument('config', type=str, help='Path to configuration file to use.') parser.add_argument( 'mean', type=str, help='Path to npy file holding mean for normalization.') parser.add_argument( 'variance', type=str, help='Path to npy file holding variance for normalization.') parser.add_argument('model_dir', type=str, help='Path to saved model to use for inference.') args = parser.parse_args() mean = np.load(args.mean) variance = np.load(args.variance) stddev = [np.math.sqrt(x) for x in variance] def _normalize_op(features): channels = [ tf.expand_dims((features['patch'][:, :, channel] - mean[channel]) / stddev[channel], -1) for channel in range(3) ] features['patch'] = tf.concat(channels, 2) return features cutil.make_directory(args.model_dir) cutil.publish(args.model_dir) config_path = args.config config = ctfm.parse_json(config_path) config_datasets = config.get('datasets') config_model = config.get('model') train_fn = ctfd.construct_train_fn(config_datasets, operations=[_normalize_op]) steps = int( config_datasets.get('training').get('size') / config_datasets.get('batch')) params_dict = { 'config': config_model, 'model_dir': args.model_dir, 'mean': mean, 'stddev': stddev } classifier = tf.estimator.Estimator(model_fn=my_model, model_dir=args.model_dir, params=params_dict, config=tf.estimator.RunConfig( model_dir=args.model_dir, save_summary_steps=1000, log_step_count_steps=1000)) if not os.path.exists( os.path.join(args.model_dir, os.path.basename(config_path))): shutil.copy2(config_path, args.model_dir) for epoch in range(config_datasets.get('training').get('epochs')): classifier = classifier.train(input_fn=train_fn, steps=steps) export_dir = os.path.join(args.model_dir, 'saved_model') cutil.make_directory(export_dir) cutil.publish(export_dir) cutil.publish(args.model_dir) cutil.publish(export_dir)
def main(argv): parser = argparse.ArgumentParser(description='TODO') parser.add_argument('config', type=str, help='Path to configuration file to use.') parser.add_argument( 'mean', type=str, help='Path to npy file holding mean for normalization.') parser.add_argument( 'variance', type=str, help='Path to npy file holding variance for normalization.') parser.add_argument('model_dir', type=str, help='Path to saved model to use for inference.') args = parser.parse_args() mean = np.load(args.mean) variance = np.load(args.variance) stddev = [np.math.sqrt(x) for x in variance] def _normalize_op(features): channels = [ tf.expand_dims((features['patch'][:, :, channel] - mean[channel]) / stddev[channel], -1) for channel in range(3) ] features['patch'] = tf.concat(channels, 2) return features def _subsampling_op(features): features['patch'] = ctfi.subsample(features['patch'], 2) return features cutil.make_directory(args.model_dir) cutil.publish(args.model_dir) config_path = args.config config = ctfm.parse_json(config_path) config_datasets = config.get('datasets') config_model = config.get('model') train_fn = ctfd.construct_train_fn(config_datasets, operations=[_normalize_op]) #def train_fn(): # dataset = tf.data.Dataset.from_tensor_slices(np.random.rand(256,32,32,3)) # dataset = dataset.map(lambda x : ({"patch": x}, 0)).batch(256).repeat() # return dataset steps = int( config_datasets.get('training').get('size') / config_datasets.get('batch')) params_dict = {'config': config_model, 'model_dir': args.model_dir} classifier = tf.estimator.Estimator(model_fn=my_model, model_dir=args.model_dir, params=params_dict, config=tf.estimator.RunConfig( model_dir=args.model_dir, save_summary_steps=1000, log_step_count_steps=1000)) if not os.path.exists( os.path.join(args.model_dir, os.path.basename(config_path))): shutil.copy2(config_path, args.model_dir) for epoch in range(config_datasets.get('training').get('epochs')): classifier = classifier.train(input_fn=train_fn, steps=steps) export_dir = os.path.join(args.model_dir, 'saved_model') cutil.make_directory(export_dir) cutil.publish(export_dir) # TODO: Write command to create serving input receiver fn from config. serving_input_receiver_fn = ctfd.construct_serving_fn( config_model['inputs']) classifier.export_saved_model(export_dir, serving_input_receiver_fn) cutil.publish(args.model_dir) cutil.publish(export_dir)