def _make_run(self, run): proto_run = wandb_internal_pb2.RunRecord() run._make_proto_run(proto_run) proto_run.host = run._settings.host if run._config is not None: config_dict = run._config._as_dict() self._make_config(config_dict, obj=proto_run.config) return proto_run
def _make_run(self, run): proto_run = wandb_internal_pb2.RunRecord() run._make_proto_run(proto_run) proto_run.host = run._settings.host if run._config is not None: config_dict = run._config._as_dict() self._make_config(data=config_dict, obj=proto_run.config) if run._telemetry_obj: proto_run.telemetry.MergeFrom(run._telemetry_obj) return proto_run
def _make_run(self, run: "Run") -> pb.RunRecord: proto_run = pb.RunRecord() run._make_proto_run(proto_run) if run._settings.host: proto_run.host = run._settings.host if run._config is not None: config_dict = run._config._as_dict() # type: ignore self._make_config(data=config_dict, obj=proto_run.config) if run._telemetry_obj: proto_run.telemetry.MergeFrom(run._telemetry_obj) return proto_run
def make_run_data(data): rdata = wandb_internal_pb2.RunRecord() run_id = data.get("run_id") if run_id: rdata.run_id = run_id entity = data.get("entity") if entity: rdata.entity = entity project = data.get("project") if project: rdata.project = project run_group = data.get("group") if run_group: rdata.run_group = run_group job_type = data.get("job_type") if job_type: rdata.job_type = job_type config_dict = data.get("config") config_dict = data.get("config") if config_dict: make_config(config_dict, obj=rdata.config) return rdata
def fn(write_function, logdir="./", save=True, root_dir="./"): with backend_interface() as interface: proto_run = pb.RunRecord() mocked_run._make_proto_run(proto_run) run_start = pb.RunStartRequest() run_start.run.CopyFrom(proto_run) request = pb.Request() request.run_start.CopyFrom(run_start) record = pb.Record() record.request.CopyFrom(request) internal_hm.handle_request_run_start(record) internal_hm._tb_watcher.add(logdir, save, root_dir) # need to sleep to give time for the tb_watcher delay time.sleep(15) write_function() ctx_util = parse_ctx(mock_server.ctx) return ctx_util
def _send_tensorboard(self, tb_root, tb_logdirs, send_manager): if self._entity is None: viewer, server_info = send_manager._api.viewer_server_info() self._entity = viewer.get("entity") proto_run = wandb_internal_pb2.RunRecord() proto_run.run_id = self._run_id or wandb.util.generate_id() proto_run.project = self._project or wandb.util.auto_project_name(None) proto_run.entity = self._entity url = "{}/{}/{}/runs/{}".format( self._app_url, url_quote(proto_run.entity), url_quote(proto_run.project), url_quote(proto_run.run_id), ) print("Syncing: %s ..." % url) sys.stdout.flush() record = send_manager._interface._make_record(run=proto_run) send_manager.send(record) settings = wandb.Settings( root_dir=TMPDIR.name, run_id=proto_run.run_id, _start_datetime=datetime.datetime.now(), _start_time=time.time(), ) watcher = tb_watcher.TBWatcher( settings, proto_run, send_manager._interface, True ) for tb in tb_logdirs: watcher.add(tb, True, tb_root) sys.stdout.flush() watcher.finish() # send all of our records like a boss while not send_manager._interface.record_q.empty(): data = send_manager._interface.record_q.get(block=True) send_manager.send(data) sys.stdout.flush() send_manager.finish()
def _send_tensorboard(self, tb_root, tb_logdirs, send_manager): if self._entity is None: viewer, server_info = send_manager._api.viewer_server_info() self._entity = viewer.get("entity") proto_run = wandb_internal_pb2.RunRecord() proto_run.run_id = self._run_id or wandb.util.generate_id() proto_run.project = self._project or wandb.util.auto_project_name(None) proto_run.entity = self._entity url = "{}/{}/{}/runs/{}".format( self._app_url, url_quote(proto_run.entity), url_quote(proto_run.project), url_quote(proto_run.run_id), ) print("Syncing: %s ..." % url) sys.stdout.flush() # using a handler here automatically handles the step # logic, adds summaries to the run, and handles different # file types (like images)... but we need to remake the send_manager record_q = queue.Queue() sender_record_q = queue.Queue() new_interface = interface.BackendSender(record_q) send_manager = sender.SendManager(send_manager._settings, sender_record_q, queue.Queue(), new_interface) record = send_manager._interface._make_record(run=proto_run) settings = wandb.Settings( root_dir=TMPDIR.name, run_id=proto_run.run_id, _start_datetime=datetime.datetime.now(), _start_time=time.time(), ) handle_manager = handler.HandleManager(settings, record_q, None, False, sender_record_q, None, new_interface) mkdir_exists_ok(settings.files_dir) send_manager.send_run(record, file_dir=settings.files_dir) watcher = tb_watcher.TBWatcher(settings, proto_run, new_interface, True) for tb in tb_logdirs: watcher.add(tb, True, tb_root) sys.stdout.flush() watcher.finish() # send all of our records like a boss progress_step = 0 spinner_states = ["-", "\\", "|", "/"] line = " Uploading data to wandb\r" while len(handle_manager) > 0: data = next(handle_manager) handle_manager.handle(data) while len(send_manager) > 0: data = next(send_manager) send_manager.send(data) print_line = spinner_states[progress_step % 4] + line wandb.termlog(print_line, newline=False, prefix=True) progress_step += 1 # finish sending any data while len(send_manager) > 0: data = next(send_manager) send_manager.send(data) sys.stdout.flush() handle_manager.finish() send_manager.finish()
def make_run_data(data): rdata = wandb_internal_pb2.RunRecord() if run_id := data.get("run_id"): rdata.run_id = run_id