def run_exp_lib(dataset_feat_net_triples, get_model=get_model_with_default_configs): results = [] exp_nums = len(dataset_feat_net_triples) print("-----\nTotal %d experiments in this run:" % exp_nums) for exp_id, (dataset_name, feat_str, net) in enumerate(dataset_feat_net_triples): print('{}/{} - {} - {} - {}'.format(exp_id + 1, exp_nums, dataset_name, feat_str, net)) print("Here we go..") sys.stdout.flush() for exp_id, (dataset_name, feat_str, net) in enumerate(dataset_feat_net_triples): print('-----\n{}/{} - {} - {} - {}'.format(exp_id + 1, exp_nums, dataset_name, feat_str, net)) sys.stdout.flush() dataset = get_dataset(dataset_name, sparse=True, feat_str=feat_str, root=args.data_root) model_func = get_model(net) if 'MNIST' in dataset_name or 'CIFAR' in dataset_name: train_dataset, test_dataset = dataset train_acc, acc, duration = single_train_test( train_dataset, test_dataset, model_func, epochs=args.epochs, batch_size=args.batch_size, lr=args.lr, lr_decay_factor=args.lr_decay_factor, lr_decay_step_size=args.lr_decay_step_size, weight_decay=0, epoch_select=args.epoch_select, with_eval_mode=args.with_eval_mode) std = 0 else: cross_validation_with_val_set( dataset, model_func, folds=10, epochs=args.epochs, batch_size=args.batch_size, lr=args.lr, lr_decay_factor=args.lr_decay_factor, lr_decay_step_size=args.lr_decay_step_size, weight_decay=0, epoch_select=args.epoch_select, with_eval_mode=args.with_eval_mode, logger=logger, dataset_name=args.dataset, aug1=args.aug1, aug_ratio1=args.aug_ratio1, aug2=args.aug2, aug_ratio2=args.aug_ratio2, suffix=args.suffix) """
def run_exp_lib(dataset_feat_net_triples): results = [] sys.stdout.flush() for (dataset_name, feat_str) in dataset_feat_net_triples: sys.stdout.flush() dataset = get_dataset(dataset_name, sparse=True, feat_str=feat_str, root=args.data_root) max_node_num = max(dataset.data.num_nodes) print('Data: {}, Max Node Num: {}'.format(dataset_name, max_node_num)) train_acc, acc, std, duration = cross_validation_with_val_set( args, dataset, max_node_num=max_node_num, folds=10, epochs=args.epochs, batch_size=args.batch_size, lr=args.lr, lr_decay_factor=args.lr_decay_factor, lr_decay_step_size=args.lr_decay_step_size, weight_decay=args.weight_decay, epoch_select=args.epoch_select, with_eval_mode=args.with_eval_mode, logger=logger)
val_loss, test_acc = info['val_loss'], info['test_acc'] print('{:02d}/{:03d}: Val Loss: {:.4f}, Test Accuracy: {:.3f}'.format( fold, epoch, val_loss, test_acc)) results = [] for dataset_name, Net in product(datasets, nets): best_result = (float('inf'), 0, 0) # (loss, acc, std) print('-----\n{} - {}'.format(dataset_name, Net.__name__)) for num_layers, hidden in product(layers, hiddens): dataset = get_dataset(dataset_name, sparse=Net != DiffPool) model = Net(dataset, num_layers, hidden) loss, acc, std = cross_validation_with_val_set( dataset, model, folds=10, epochs=args.epochs, batch_size=args.batch_size, lr=args.lr, lr_decay_factor=args.lr_decay_factor, lr_decay_step_size=args.lr_decay_step_size, weight_decay=0, logger=None) if loss < best_result[0]: best_result = (loss, acc, std) desc = '{:.3f} + {:.3f}'.format(best_result[1], best_result[2]) print('Best result - {}'.format(desc)) results += ['{} - {}: {}'.format(dataset_name, model, desc)] print('-----\n{}'.format('\n'.join(results)))
def run_exp_lib(dataset_feat_net_triples, get_model=get_model_with_default_configs): results = [] exp_nums = len(dataset_feat_net_triples) print("-----\nTotal %d experiments in this run:" % exp_nums) for exp_id, (dataset_name, feat_str, net) in enumerate(dataset_feat_net_triples): print('{}/{} - {} - {} - {}'.format(exp_id + 1, exp_nums, dataset_name, feat_str, net)) print("Here we go..") sys.stdout.flush() for exp_id, (dataset_name, feat_str, net) in enumerate(dataset_feat_net_triples): print('-----\n{}/{} - {} - {} - {}'.format(exp_id + 1, exp_nums, dataset_name, feat_str, net)) sys.stdout.flush() dataset = get_dataset(dataset_name, sparse=True, feat_str=feat_str, root=args.data_root) model_func = get_model(net) if 'MNIST' in dataset_name or 'CIFAR' in dataset_name: train_dataset, test_dataset = dataset train_acc, acc, duration = single_train_test( train_dataset, test_dataset, model_func, epochs=args.epochs, batch_size=args.batch_size, lr=args.lr, lr_decay_factor=args.lr_decay_factor, lr_decay_step_size=args.lr_decay_step_size, weight_decay=0, epoch_select=args.epoch_select, with_eval_mode=args.with_eval_mode) std = 0 else: train_acc, acc, std, duration = cross_validation_with_val_set( dataset, model_func, folds=10, epochs=args.epochs, batch_size=args.batch_size, lr=args.lr, lr_decay_factor=args.lr_decay_factor, lr_decay_step_size=args.lr_decay_step_size, weight_decay=0, epoch_select=args.epoch_select, with_eval_mode=args.with_eval_mode, logger=logger) summary1 = 'data={}, model={}, feat={}, eval={}'.format( dataset_name, net, feat_str, args.epoch_select) summary2 = 'train_acc={:.2f}, test_acc={:.2f} ± {:.2f}, sec={}'.format( train_acc * 100, acc * 100, std * 100, round(duration, 2)) results += ['{}: {}, {}'.format('fin-result', summary1, summary2)] print('{}: {}, {}'.format('mid-result', summary1, summary2)) sys.stdout.flush() print('-----\n{}'.format('\n'.join(results))) sys.stdout.flush()