def main(): config = argparse.parse_cmd(arguments) datasets = { 'train': dataloader.get_dataset( config['dataloader'], transformation_list=config['dataloader']['train_list'], num_classes=10, split=config['dataloader']['train_split_id']), 'val': dataloader.get_dataset( config['dataloader'], transformation_list=config['dataloader']['val_list'], num_classes=10, split=config['dataloader']['val_split_id']) } for dataset_type in ['train', 'val']: for i, (images, y) in enumerate(datasets[dataset_type]): image = images[0] # opencv manage images as BGR object, TF as RGB image = image.numpy()[:, :, ::-1] cv2.imwrite(os.path.join('/tmp', f'{dataset_type}_{i}.png'), image) if i == 20: break
def main(): global plot args = argparse.parse_cmd(arguments) if args['no_plot']: plot = False server = args['server'] remote_dir = args['remote_dir'] out = run_bash(f'ssh {server} "cd {remote_dir} && cat */trial.json"') jsons = split_json(out) csv_content = 'experiment_id,factor,framework,depth,score\n' for trial in jsons: trial = json.loads(trial) values = trial['hyperparameters']['values'] csv_values = [ trial['trial_id'], values['factor'], values['framework'], values['depth'], trial['score'] ] csv_content += ','.join(list(map(parse_str, csv_values))) + '\n' with open(args['csv_path'], 'w') as f: f.write(csv_content) if not plot: return # plot the results data = pd.read_csv(args['csv_path']) print(data) f, ax = plt.subplots(figsize=(9, 6)) data = data.drop_duplicates(subset=["factor", "depth"]) data = data.pivot("factor", "depth", "score") print(data) sns.heatmap(data, annot=True, vmin=0, vmax=1, cmap='CMRmap') plt.show()
def main(): args = argparse.parse_cmd(args_spec) dataset = get_dataset(args) socket = create_zmq_socket(args['zmq_port']) images_count, correct_count = send_and_evaluate_dataset(dataset, socket) accuracy = correct_count / images_count print("Accuracy of the remote model:", accuracy)
def main(): conf = argparse.parse_cmd(inference_arguments) print(conf) # currently first gpu is being picked if there are multiple GPUs. # conf['hardware']['gpu'] = get_gpu_info().get('name')[0] prepare_docker(conf["docker_images"]) # benchmark all docker images against all models against all engine env_configs = create_all_environment_configs(conf) results = [] for env_config in env_configs: print( f"Benchmark {env_config['model']} using {env_config['engine']} on {env_config['docker']}" ) cmd = docker_run_cmd(env_config['docker'], env_config['engine'], env_config['model'], conf) print(cmd) stream = os.popen(cmd) out = stream.read() print(out) # look for a correct output i = 2 while out.split('\n')[-i][0] != '{': i += 1 # print(i) r = json.loads(out.split('\n')[-i]) call_api(r, env_config, conf) results.append(r) format_results(env_configs, results, conf["output"])
def test_parse_yaml_config(self, mock_args): arguments = get_arguments() arguments.append([str, "yaml_config", "", "config file"]) create_yaml_file_fixed_location() parameters = parse_cmd(arguments) self.assertEqual(parameters['parameter_int'], 1) self.assertEqual(parameters['parameter_str'], "plop") self.assertEqual(parameters['parameter_list'], [1, 2, 3]) self.assertEqual(parameters['parameter_bool'], True) self.assertEqual(parameters['parameter_dict']['parameter_int'], 3)
def main(): """ function called when starting the code via command-line """ args = argparse.parse_cmd(arguments) args['server'] = alchemy_api.start_training(args['server']) # Use weight and biases only use_wandb is true and framework is tensorflow if args['wandb_params']['use_wandb'] and "tensorflow" in args['framework']: import wandb wandb.init(name= args['wandb_params']['run_name'], project=args['wandb_params']['project'], config=args) args = wandb.config train(args)
def main(): args = argparse.parse_cmd(arguments) server = args['server'] remote_dir = args['remote_dir'] out = run_bash(f'ssh {server} "cd {remote_dir} && cat */trial.json"') jsons = split_json(out) for trial in jsons: trial = json.loads(trial) print() print(trial['trial_id']) print(trial['hyperparameters']['values']) print(trial['score'])
def test_parse_empty_config(self, mock_args): arguments = get_arguments() parameters = parse_cmd(arguments) out_parameters = { "parameter_int": 0, "parameter_str": '', "parameter_list": [1, 5, 6], "parameter_bool": False, "parameter_dict": { "parameter_int": 5 } } self.assertEqual(parameters, out_parameters)
def main(): matplotlib.use("GTK3Agg") args = argparse.parse_cmd(arguments) alphas, min_epoch, max_epoch, params = prepare_data(args) number_bars = (max_epoch - min_epoch) // args["step"] + 1 colors = cm.OrRd_r(np.linspace(.2, .6, number_bars)) # grid has a fixed number of columns of 5 n_params = len(params) if n_params == 1: fig, axs = plt.subplots(1, 1, figsize=(9, 3)) axs = [[axs]] elif n_params <= 5: fig, axs = plt.subplots(math.ceil(n_params), 1, figsize=(9, 3)) axs = [axs] else: fig, axs = plt.subplots(math.ceil(n_params / 5), 5, figsize=(9, 3)) fig.suptitle( f'Alpha parameter between {min_epoch} and {max_epoch} epochs (step: {args["step"]})' ) total_width = 0.7 width = total_width / number_bars for i in range(number_bars): epoch = min_epoch + i * args["step"] for k, param in enumerate(params): p = alphas[str(epoch)][param + '_savable'] # TODO should be removed as soon as data is better formated data = {} for j in range(len(p)): data[str(j)] = p[j] names = list(data.keys()) values = list(data.values()) x = np.arange(len(p)) axs[k // 5][k % 5].bar(x - total_width / 2 + width * i, values, width, label=str(i)) #, color=colors) axs[k // 5][k % 5].set_title(param) # fig.tight_layout() plt.show()
def main(): args = argparse.parse_cmd(arguments) datasets = { 'train': dataloader.get_dataset( args['dataloader'], transformation_list=args['dataloader']['train_list'], num_classes=10, split='train'), 'val': dataloader.get_dataset( args['dataloader'], transformation_list=args['dataloader']['val_list'], num_classes=10, split='test') } for dataset_type in ['train', 'val']: for i, (images, y) in enumerate(datasets[dataset_type]): image = images[0] cv2.imwrite(os.path.join('/tmp', f'{dataset_type}_{i}.png'), image.numpy()) if i == 20: break
def main(): """ CLI entry point """ # parse arguments args = argparse.parse_cmd(args_spec) # perform global configuration (XLA and memory growth) global_conf.config_tf2(args) # load model model = load_model(args) model.summary() # if dataloader.name is set, evaluating on a specific dataset if args['dataloader']['name'] is not None: evaluate_dataset(args, model) # otherwise for images listen to a zmq socket else: socket = create_zmq_socket(args['zmq_port']) assert len( model.inputs) == 1, "Cannot find model input to send images on" process_incoming_image_batches(model, list(model.inputs[0].shape), socket)
def main(): """ function called when starting the code via command-line """ args = argparse.parse_cmd(arguments) args['server'] = alchemy_api.start_training(args['server']) train(args)
[str, 'images_dir_path', '', 'directory path for the images'], [ str, 'annotation_file_path', '', 'annotation file path in the format `1st column`: images names, `2nd column`: label. ' 'if it is `None` it will be assumed that images are stored in the sub directories of `images_dir_path`' 'by the name of their classes' ], [ str, 'delimiter', ',', 'Delimiter to split the annotation file columns' ], [ bool, 'header_exists', False, 'whether there is any header in the annotation file' ], [ 'list[str]', 'split_names', ['train', 'validation', 'test'], 'Names of the splits' ], [ 'list[float]', 'split_percentages', [0.8, 0.1, 0.1], 'Percentages of the splits' ], ] ] ] if __name__ == "__main__": args = parse_cmd(dataset_arguments) build_tfrecord_dataset(args)
print(config) # currently first gpu is being picked if there are multiple GPUs. # conf['hardware']['gpu'] = get_gpu_info().get('name')[0] prepare_docker(config["docker_images"]) # benchmark all docker images against all models against all engine env_configs = create_all_environment_configs(config) results = [] for env_config in env_configs: print( f"Benchmark {env_config['model']} using {env_config['engine']} on {env_config['docker']}" ) cmd = docker_run_cmd(env_config['docker'], env_config['engine'], env_config['model'], config) print(cmd) stream = os.popen(cmd) out = stream.read() print(out) # look for a correct output i = 2 while out.split('\n')[-i][0] != '{': i += 1 # print(i) r = json.loads(out.split('\n')[-i]) results.append(r) format_results(env_configs, results, config["output"]) if __name__ == "__main__": config = argparse.parse_cmd(inference_arguments) benchmark(config)
# 6 training if config['progressive_resizing']: progressive_training(model=model, config=config, train_dataset=train_dataset, val_dataset=val_dataset, callbacks=callbacks, latest_epoch=latest_epoch, max_queue_size=16, optimizer=optimizer) else: model.fit(x=train_dataset, validation_data=val_dataset, epochs=config['num_epochs'], callbacks=callbacks, max_queue_size=16, initial_epoch=latest_epoch ) # 7 training print("export model") export.export(model, export_dir, config) print("Training Completed!!") if __name__ == '__main__': config = argparse.parse_cmd(arguments) train(config)