def __init__(self, configuration): base_configuration = { 'kernel_type': DoGKernel, 'kernels_conf': [[3, 3 / 9, 6 / 9], [3, 6 / 9, 3 / 9], [7, 7 / 9, 14 / 9], [7, 14 / 9, 7 / 9], [13, 13 / 9, 26 / 9], [13, 26 / 9, 13 / 9]], 'padding': 6, 'threshold': 50, 'use_threshold': True } self.configuration = u.populate_configuration(configuration, base_configuration) # creates kernels and pad smaller kernels to fit biggest one self.max_kernel_size = max( [conf[0] for conf in self.configuration['kernels_conf']]) kernels = [] for conf in self.configuration['kernels_conf']: kernel = self.configuration['kernel_type'](*conf)().unsqueeze(0) pad = (self.max_kernel_size - conf[0]) // 2 kernels.append( torch.nn.functional.pad(kernel, (pad, pad, pad, pad))) self.kernels = torch.stack(kernels)
def __init__(self, configuration={}): self.base_config = { 'layers_config': [{ 'in_channels': 2, 'out_channels': 32, 'kernel_size': 5, 'weight_mean': 0.8, 'weight_std': 0.05 }, { 'in_channels': 32, 'out_channels': 150, 'kernel_size': 2, 'weight_mean': 0.8, 'weight_std': 0.05 }], 'learning_rates': (0.004, -0.003), 'max_ap': torch.tensor([0.15]), 'an_update': -0.75, 'thresholds': [10, 1], 'timestep_update_lr': 500, 'pooling_vars': [{ 'kernel_size': 2, 'stride': 2, 'padding': 1 }] * 2, 'n_winners': [5, 8], 'inhibition_radius': [2, 1], 'batch_size': 1, 'dataset_path': 'data/', 'n_epochs': [2, 20], 'save_path': 'model/sdcnne_exp.pt' } self.config = u.populate_configuration(configuration, self.base_config) self.device = torch.device( 'cuda' if torch.cuda.is_available() else 'cpu') self.config['max_ap'] = self.config['max_ap'].to(self.device) pad_fn = lambda x: (x - 1) // 2 + ((x - 1) % 2 > 0) self.paddings_train = [[pad_fn(conf['kernel_size'])] * 4 for conf in self.config['layers_config']] self.paddings_forward = [ pad_fn(conf['kernel_size']) for conf in self.config['layers_config'] ] self.set_metadata() self.set_dataloader() self.instanciate_model()
def __init__(self, configuration={}): base_config = { 'kernel_type': toci.DoGKernel, 'kernels_conf': [[7, 1, 2], [7, 2, 1]], 'padding': 3, 'threshold': 50, 'use_threshold': True, 'normalization_radius': 8, 'n_time_steps': 15 } self.config = u.populate_configuration(base_config, configuration) self.to_tensor = torchvision.transforms.ToTensor() self.filters = toci.Filter(self.config)
argparser.add_argument('--reduce_dim', default=False, type=ast.literal_eval) argparser.add_argument('--lr', default=1e-4, type=float) argparser.add_argument('--smoothing_epsilon', default=0.1, type=float) argparser.add_argument('--save_path', default='convnet/', type=str) argparser.add_argument('--plot_attention', default=False, type=ast.literal_eval) argparser.add_argument('--eval_step', default=10, type=int) argparser.add_argument('--max_epochs', default=500, type=int) argparser.add_argument('--train_acc_step', default=5, type=int) argparser.add_argument('--load_model', default=False, type=ast.literal_eval) argparser.add_argument('--clip_grad', default=False, type=ast.literal_eval) argparser.add_argument('--weight_decay', default=0., type=float) # L2 regularization -> 0.01 argparser.add_argument('--l1_reg', default=0., type=float) # L1 regularization -> 0.001 argparser.add_argument('--warmup', default=100, type=int) argparser.add_argument('--clip_grad_val', default=0.1, type=float) argparser.add_argument('--logfile', default='_convnet_experiments_feedback_logs.txt', type=str) args = argparser.parse_args() # logging.basicConfig(stream=sys.stdout, level=logging.INFO) logging.basicConfig(filename=args.logfile, filemode='a', level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s') settings = u.populate_configuration(settings, vars(args)) global plotter plotter = u.VisdomPlotter(env_name='ConvNet Experiments') if not os.path.isdir(settings['save_path']): os.makedirs(settings['save_path']) launch_experiment(settings)