val_data = torch.utils.data.DataLoader(valset, batch_size=100, shuffle=False, num_workers=4) testset = data_providers.CIFAR100(root='data', set_name='test', download=False, transform=transform_test) test_data = torch.utils.data.DataLoader(testset, batch_size=100, shuffle=False, num_workers=4) num_output_classes = 100 custom_conv_net = ConvolutionalNetwork(num_output_classes=num_output_classes) # initialize our network object, in this case a ConvNet conv_experiment = ExperimentBuilder(network_model=custom_conv_net, use_gpu=args.use_gpu, experiment_name=args.experiment_name, num_epochs=args.num_epochs, learning_rate=args.learning_rate, weight_decay_coefficient = args.weight_decay,
torch.manual_seed(seed=args.seed) # sets pytorch's seed train_data = data_providers.EMNISTDataProvider( 'train', batch_size=args.batch_size, rng=rng) # initialize our rngs using the argument set seed val_data = data_providers.EMNISTDataProvider( 'valid', batch_size=args.batch_size, rng=rng) # initialize our rngs using the argument set seed test_data = data_providers.EMNISTDataProvider( 'test', batch_size=args.batch_size, rng=rng) # initialize our rngs using the argument set seed custom_conv_net = ConvolutionalNetwork( # initialize our network object, in this case a ConvNet input_shape=(args.batch_size, args.image_num_channels, args.image_height, args.image_width), dim_reduction_type=args.dim_reduction_type, num_output_classes=train_data.num_classes, num_filters=args.num_filters, num_layers=args.num_layers, use_bias=False) conv_experiment = ExperimentBuilder( network_model=custom_conv_net, experiment_name=args.experiment_name, num_epochs=args.num_epochs, weight_decay_coefficient=args.weight_decay_coefficient, gpu_id=args.gpu_id, use_gpu=args.use_gpu, continue_from_epoch=args.continue_from_epoch, train_data=train_data, val_data=val_data, test_data=test_data) # build an experiment object