def run(name: str = None, config: dict = None, device: str = None, check: bool = False) -> dict: config = config or experiment_config device = device or utils.get_device() print(f"device: {device}") utils.set_global_seed(SEED) # inititalize weigths & biases name = name or '_'.join( filter(None, [experiment_name, f"{datetime.datetime.now():%Y-%m-%d-%S}"])) # convert parquet ot zip parquet_to_images(TRAIN, ZIP_TRAIN_FILE, SIZE) parquet_to_images(TEST, ZIP_TEST_FILE, SIZE) # run experiment runner = SupervisedRunner( device=device, input_key="images", output_key=["logit_" + c for c in output_classes.keys()], input_target_key=list(output_classes.keys()), ) experiment = Experiment(config) runner.run_experiment(experiment, check=check) return { 'runner': runner, 'experiment': experiment, 'config': config, }
def run(name: str = None, config: dict = None, device: str = None, check: bool = False) -> dict: config = config or experiment_config device = device or utils.get_device() print(f"device: {device}") utils.set_global_seed(SEED) config['monitoring_params']['name'] = EXPERIMENT_NAME # convert parquet ot zip parquet_to_images(TRAIN, ZIP_TRAIN_FILE, SIZE) parquet_to_images(TEST, ZIP_TEST_FILE, SIZE) # run experiment runner = SupervisedRunner( device=device, input_key="images", output_key=["logit_" + c for c in output_classes.keys()], input_target_key=list(output_classes.keys()), ) experiment = Experiment(config) runner.run_experiment(experiment, check=check) return { 'runner': runner, 'experiment': experiment, 'config': config, }
def run(max_lr: float = 1e-1, steps_per_epoch: int = 1413, device: str = None, check: bool = False) -> dict: config = copy.deepcopy(experiment_config) device = device or utils.get_device() print(f"device: {device}") utils.set_global_seed(SEED) # convert parquet ot zip parquet_to_images(TRAIN, ZIP_TRAIN_FILE, SIZE) parquet_to_images(TEST, ZIP_TEST_FILE, SIZE) config['monitoring_params']['name'] = EXPERIMENT_NAME config['stages']['state_params']['checkpoint_data']['image_size'] = SIZE # add scheduler to config config["stages"]["scheduler_params"] = { "scheduler": "OneCycleLR", "max_lr": max_lr, "epochs": config["stages"]["state_params"]["num_epochs"], "steps_per_epoch": steps_per_epoch, "div_factor": 200, "final_div_factor": 1e5, } experiment = Experiment(config) # run experiment runner = SupervisedWandbRunner( device=device, input_key="images", output_key=["logit_" + c for c in output_classes.keys()], input_target_key=list(output_classes.keys()),) runner.run_experiment(experiment, check=check) return experiment, runner
def run(logdir_suffix: str = '', device: str = None, check: bool = False) -> dict: device = device or utils.get_device() print(f"device: {device}") utils.set_global_seed(SEED) # convert parquet ot zip parquet_to_images(TRAIN, ZIP_TRAIN_FILE, SIZE) parquet_to_images(TEST, ZIP_TEST_FILE, SIZE) # run experiment RunnerClass = SupervisedRunner if check else SupervisedWandbRunner runner = RunnerClass( device=device, input_key="images", output_key=["features"] + ["logit_" + c for c in output_classes.keys()], input_target_key=list(output_classes.keys()), ) experiment = Experiment(logdir='./logs' + logdir_suffix) runner.run_experiment(experiment, check=check) return { 'runner': runner, 'experiment': experiment, }
def run(config: dict = None, logdir_suffix: str = '', device: str = None, check: bool = False) -> dict: config = config or experiment_config device = device or utils.get_device() print(f"device: {device}") utils.set_global_seed(SEED) config['monitoring_params']['name'] = EXPERIMENT_NAME config['stages']['state_params']['checkpoint_data']['image_size'] = SIZE config['args']['logdir'] += logdir_suffix # convert parquet ot zip parquet_to_images(TRAIN, ZIP_TRAIN_FILE, SIZE) parquet_to_images(TEST, ZIP_TEST_FILE, SIZE) # run experiment RunnerClass = SupervisedRunner if check else SupervisedWandbRunner runner = RunnerClass( device=device, input_key="images", output_key=["logit_" + c for c in output_classes.keys()], input_target_key=list(output_classes.keys()), ) experiment = Experiment(config) runner.run_experiment(experiment, check=check) return { 'runner': runner, 'experiment': experiment, 'config': config, }
def run(config: dict = None, model_filepath: str = None, logdir_suffix: str = '_' + EXPERIMENT_NAME, max_lr: float = 1e-1, steps_per_epoch: int = 1413, device: str = None, check: bool = False) -> dict: config = config or experiment_config device = device or utils.get_device() print(f"device: {device}") utils.set_global_seed(SEED) config['monitoring_params']['name'] = EXPERIMENT_NAME config['stages']['state_params']['checkpoint_data']['image_size'] = SIZE config['args']['logdir'] += logdir_suffix # convert parquet ot zip parquet_to_images(TRAIN, ZIP_TRAIN_FILE, SIZE) parquet_to_images(TEST, ZIP_TEST_FILE, SIZE) # add scheduler to config config["stages"]["scheduler_params"] = { "scheduler": "OneCycleLR", "max_lr": max_lr, "epochs": config["stages"]["state_params"]["num_epochs"], "steps_per_epoch": steps_per_epoch, "div_factor": 500, "final_div_factor": 1e5, "max_momentum": 0.999 } # run experiment RunnerClass = SupervisedRunner if check else SupervisedWandbRunner runner = RunnerClass( device=device, input_key="images", output_key=["logit_" + c for c in output_classes.keys()], input_target_key=list(output_classes.keys()), ) experiment = Experiment(config, model_filepath) runner.run_experiment(experiment, check=check) return { 'runner': runner, 'experiment': experiment, 'config': config, }