def test_that_the_output_is_of_the_correct_scale(self):
        # create random weights
        self.make_model()
        self.weights_path = os.path.join(self.weights_folder,
                                         'testweights_E001_.hdf5')
        self.rdn.save_weights(self.weights_path)
        # test the network with random weights
        self.create_random_dataset('correct', dataset_size=1)
        cl_args = ['--no_verbose', '--test']
        test_args = {
            'test_folder': self.dataset_folder['correct']['LR'],
            'results_folder': self.results_folder,
            'weights_path': self.weights_path,
        }

        parser = get_parser()
        cl_args = parser.parse_args(cl_args)
        cl_args = vars(cl_args)
        load_configuration(cl_args, '../config.json')
        cl_args.update(test_args)
        cl_args.update(self.model_params)
        run.main(cl_args)
        for file in os.listdir(self.results_folder):
            img = imageio.imread(os.path.join(self.results_folder, file))
            self.assertTrue(img.shape == (self.img_size['HR'],
                                          self.img_size['HR'], 3))
 def test_that_configuration_is_loaded_correctly(self):
     parser = get_parser()
     cl_args = parser.parse_args(['--train', '--custom-data'])
     cl_args = vars(cl_args)
     load_configuration(cl_args, '../config.json')
     self.assertTrue(isinstance(cl_args['G'], int))
     self.assertTrue(cl_args['log_dir'] == './logs')
     self.assertTrue(cl_args['weights_dir'] == './weights')
     self.assertTrue(cl_args['data_name'] == 'CUSTOM')
 def test_if_train_is_executed(self):
     self.create_random_dataset('correct', dataset_size=14)
     train_arguments = {
         'validation_labels': self.dataset_folder['correct']['HR'],
         'validation_input': self.dataset_folder['correct']['LR'],
         'training_labels': self.dataset_folder['correct']['HR'],
         'training_input': self.dataset_folder['correct']['LR'],
     }
     cl_args = ['--pytest', '--no_verbose']
     parser = get_parser()
     cl_args = parser.parse_args(cl_args)
     cl_args = vars(cl_args)
     load_configuration(cl_args, '../config.json')
     cl_args.update(train_arguments)
     run.main(cl_args)
     self.assertTrue(1 != 0)
    def test_if_trainable_weights_update_with_one_step(self):
        self.scale = self.model_params['scale']
        self.img_size = {'HR': 10 * self.scale, 'LR': 10}
        self.dataset_size = 8
        self.create_random_dataset(type='correct')

        before_step = []
        for layer in self.model.rdn.layers:
            if len(layer.trainable_weights) > 0:
                before_step.append(layer.get_weights()[0])

        train_arguments = {
            'validation_labels': self.dataset_folder['correct']['HR'],
            'validation_input': self.dataset_folder['correct']['LR'],
            'training_labels': self.dataset_folder['correct']['HR'],
            'training_input': self.dataset_folder['correct']['LR'],
        }
        cl_args = ['--pytest', '--no_verbose']
        parser = get_parser()
        cl_args = parser.parse_args(cl_args)
        cl_args = vars(cl_args)
        load_configuration(cl_args, '../config.json')
        cl_args.update(train_arguments)
        trainer = Trainer(train_arguments=cl_args)

        i = 0
        for layer in self.model.rdn.layers:
            if len(layer.trainable_weights) > 0:
                self.assertTrue(
                    np.all(before_step[i] == layer.get_weights()[0]))
                i += 1

        trainer.train_model(self.model)

        i = 0
        for layer in self.model.rdn.layers:
            if len(layer.trainable_weights) > 0:
                self.assertFalse(
                    np.all(before_step[i] == layer.get_weights()[0]))
                i += 1
示例#5
0
from models import VanillaCNN, SpectralDropoutCNN, SpectralDropoutEasyCNN
from utils.data_utils import get_data_loaders
from utils.utils import get_parser
# from utils.visualization import plot_hidden

# System
path_to_home = os.environ['HOME']
path_to_proj = os.path.join(path_to_home, 'spectral-dropout')
path_save = os.path.join(path_to_proj, 'saved_models')

# Model parameters
GREYSCALE = True
WEIGHT_DECAY = 1e-3

# Get a parser for the training settings
parser = get_parser()


def signal_handler(sig, frame):
    print('Aborting, trying to save the model')
    try:
        final_model_save_name = ''.join([model_save_name, 'final', '.pt'])
        torch.save(
            model,
            os.path.join(path_save, model_save_name, final_model_save_name))
        print('Model saved!')
    except NameError:
        print('Model not saved, probably not existing yet')
    finally:
        sys.exit(0)
 def test_that_parser_returns_the_correct_namepace(self):
     parser = get_parser()
     cl_args = parser.parse_args(['--train'])
     namespace = cl_args._get_kwargs()
     self.assertTrue(('train', True) in namespace)
     self.assertTrue(('test', False) in namespace)
示例#7
0
from utils import utils, keystoneutils, novautils


# Create a parser and parse the args and/or the environment variables.
p = utils.get_parser()
args = p.parse_args()



# Create a keystone client from the args and run supported operations.
kc = keystoneutils.get_client(args)

# List all projects.
print kc.projects.list()
# Show details of a project.
print kc.projects.get(kc.projects.list()[0].id)
# Create a project.
print kc.projects.create('my_project', 'default',
                         description='This is my_project.')
# List all users with admin roles.
print kc.role_assignments.list(role=kc.roles.list(name='admin')[0].id)



# Create a nova client and run supported operations.
nc = novautils.get_client(kc.session)
#            OR
# session = keystoneutils.get_session(args)
# nc = novautils.get_client()

# List all instances. Like `nova list --all-tenants`.