def __init__(self,
                 log_dir: Union[str, Path],
                 model_session: tf.Session,
                 verbose: int = 1):
        super().__init__()

        log_dir = str(log_dir)
        self._flops_logger = tfboard_loggers.TFBoardScalarLogger(log_dir +
                                                                 "/flops")
        self._params_logger = tfboard_loggers.TFBoardScalarLogger(log_dir +
                                                                  "/params")
        self._model_session = model_session
        self._verbose = verbose
    def __init__(self,
                 log_dir: Union[str, Path],
                 sub_folder: str = "parameters",
                 verbose: int = 1):
        super().__init__()

        log_dir = str(log_dir) + "/" + sub_folder
        self._params_logger = tfboard_loggers.TFBoardScalarLogger(log_dir)
        self._verbose = verbose
# Method 2:

continuous_text_log_folder_path = BASE_LOG_FOLDER_PATH / "continuous_m2_text_test"
with tfboard_loggers.TFBoardContinuousTextLogger(
        continuous_text_log_folder_path,
        "continuous_m2_text_tag") as continuous_text_logger_m2:
    continuous_text_logger_m2.markdown("# Method 2")
    continuous_text_logger_m2.info("This is an info too")
    continuous_text_logger_m2.error("This is an error :(")
    continuous_text_logger_m2.warn("This is a warning, so pay attention")

############ Logging Scalars ##############

scalar_log_folder_path = BASE_LOG_FOLDER_PATH / "scalar_test"
scalar_logger = tfboard_loggers.TFBoardScalarLogger(scalar_log_folder_path)

for i, t in enumerate(np.arange(0.0, 1.0, 0.01)):
    val = np.sin(2 * np.pi * t)
    scalar_logger.log_scalar("scalar_tag", val, i)

############ Logging Histograms ##############

hist_log_folder_path = BASE_LOG_FOLDER_PATH / "histogram_test"
hist_logger = tfboard_loggers.TFBoardHistogramLogger(hist_log_folder_path)

for i in range(1000):
    val = np.random.rand(50) * (i + 1)
    hist_logger.log_histogram("hist_tag", val, i, bins=100)

############ Logging Images ##############
Пример #4
0
print("Combined:")
GD = cdcgan_models.generator_containing_discriminator(G, D)
GD.summary()

optimizer = optimizers.Adam(0.0002, 0.5)

G.compile(loss='binary_crossentropy', optimizer=optimizer)
GD.compile(loss='binary_crossentropy', optimizer=optimizer)
D.trainable = True
D.compile(loss='binary_crossentropy', optimizer=optimizer)

# Setup Tensorboard loggers

tfboard_loggers.TFBoardModelGraphLogger.log_graph("../models/logs", K.get_session())
loss_logger = tfboard_loggers.TFBoardScalarLogger("../models/logs/loss")
image_logger = tfboard_loggers.TFBoardImageLogger("../models/logs/generated_images")

# Model Training

iteration = 0

nb_of_iterations_per_epoch = int(X_train.shape[0] / BATCH_SIZE)
print("Number of iterations per epoch: {0}".format(nb_of_iterations_per_epoch))

for epoch in range(EPOCHS):
    pbar = tqdm(desc="Epoch: {0}".format(epoch), total=X_train.shape[0])

    g_losses_for_epoch = []
    d_losses_for_epoch = []
Пример #5
0
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)

EXPERIMENT_FOLDER_PATH = Path(__file__).resolve().parent / "experiments" / args.name
if args.overwrite_experiment and EXPERIMENT_FOLDER_PATH.is_dir():
    shutil.rmtree(str(EXPERIMENT_FOLDER_PATH))
EXPERIMENT_FOLDER_PATH.mkdir(parents=True, exist_ok=False)

args_json_path = EXPERIMENT_FOLDER_PATH / "args.json"
fed_learn.save_args_as_json(args, EXPERIMENT_FOLDER_PATH / args_json_path)
tfboard_loggers.TFBoardTextLogger(EXPERIMENT_FOLDER_PATH).log_markdown("args", "```\n{0}\n```".format(
    json.dumps(args.__dict__, indent=4, sort_keys=True)), -1)

train_hist_path = EXPERIMENT_FOLDER_PATH / "fed_learn_global_test_results.json"
global_weight_path = EXPERIMENT_FOLDER_PATH / "global_weights.h5"

tf_scalar_logger = tfboard_loggers.TFBoardScalarLogger(EXPERIMENT_FOLDER_PATH)

client_train_params = {"epochs": args.client_epochs, "batch_size": args.batch_size}


def model_fn():
    return fed_learn.create_model((32, 32, 3), 10, init_with_imagenet=False, learning_rate=args.learning_rate)


weight_summarizer = fed_learn.FedAvg()
server = fed_learn.Server(model_fn, weight_summarizer, args.clients, args.fraction, args.debug)

weight_path = args.weights_file
if weight_path is not None:
    server.load_model_weights(weight_path)
Пример #6
0
 def create_scalar_logger(self) -> tfboard_loggers.TFBoardScalarLogger:
     tf_scalar_logger = tfboard_loggers.TFBoardScalarLogger(
         self.experiment_folder_path)
     return tf_scalar_logger