Exemplo n.º 1
0
    def __init__(self, name):
        config_data = read_file_in_dir('./', name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", name)

        self.name = config_data['experiment_name']
        self.experiment_dir = os.path.join(ROOT_STATS_DIR, self.name)

        # Load Datasets
        self.train_loader, self.val_loader, self.test_loader = get_datasets(config_data)

        # Setup Experiment
        self.epochs = config_data['experiment']['num_epochs']
        lr = config_data['experiment']['learning_rate']
        wd = config_data['experiment']['weight_decay']
        momentum = config_data["experiment"]["momentum"]
        self.current_epoch = 0
        self.training_losses = []
        self.val_losses = []
        self.training_mean_aucs = []
        self.val_mean_aucs = []
        self.best_model = None  # Save your best model in this field and use this in test method.

        # Init Model
        self.model = get_model(config_data)

        # TODO: Set these Criterion and Optimizers Correctly
        self.criterion = torch.nn.BCEWithLogitsLoss()
        self.optimizer = torch.optim.SGD(self.model.parameters(), lr=lr, weight_decay=wd, momentum=momentum)

        self.init_model()

        # Load Experiment Data if available
        self.load_experiment()
Exemplo n.º 2
0
    def __init__(self, name, instance_name=None):
        config_data = read_file_in_dir('./config/', name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", name)

        # Load Datasets
        if instance_name is not None:
            self.name = instance_name
        else:
            self.name = config_data['experiment_name']
        self.experiment_dir = os.path.join(ROOT_STATS_DIR, self.name)

        ds_train, ds_val = get_datasets(config_data)
        self.train_loader = DataLoader(
            ds_train,
            batch_size=config_data['experiment']['batch_size_train'],
            shuffle=True,
            num_workers=config_data['experiment']['num_workers'],
            pin_memory=True)
        self.val_loader = DataLoader(
            ds_val,
            batch_size=config_data['experiment']['batch_size_val'],
            shuffle=True,
            num_workers=config_data['experiment']['num_workers'],
            pin_memory=True)

        ds_test = get_test_dataset(config_data)
        self.test_loader = DataLoader(
            ds_test,
            batch_size=1,
            num_workers=config_data['experiment']['num_workers'],
            pin_memory=True)

        # Setup Experiment Stats
        self.epochs = config_data['experiment']['num_epochs']
        self.current_epoch = 0
        self.training_losses = []
        self.val_losses = []
        self.val_dices = []
        self.ed_dices = []
        self.es_dices = []

        # Init Model and Criterion
        self.criterion = get_criterion(config_data)
        self.model = get_model(config_data)
        self.optimizer = torch.optim.Adam(
            self.model.parameters(),
            lr=config_data['experiment']['learning_rate'])
        self.init_model()
        self.ensemble = config_data['model']['ensemble']

        # Load Experiment Data if available
        self.load_experiment()
        self.log(str(config_data))
Exemplo n.º 3
0
    def __init__(self, net, stats_manager,
                 output_dir=None, perform_validation_during_training=False):

        self.output_dir = output_dir
        batch_size = ROOT_CONFIG['batch_size']
        learning_rate = ROOT_CONFIG['learning_rate']
        num_workers = ROOT_CONFIG['num_workers']

        net = net.to(DEVICE)
        optimizer = torch.optim.Adam(net.parameters(), lr=ROOT_CONFIG['learning_rate'])

        self.best_loss = 1e6

        train_dataset, val_dataset, target_dataset = get_datasets()

        # Define data loaders
        train_loader = td.DataLoader(train_dataset, batch_size=batch_size, shuffle=True,
                                     pin_memory=True)
        val_loader = td.DataLoader(val_dataset, batch_size=batch_size, shuffle=False,
                                   pin_memory=True)
        target_loader = td.DataLoader(target_dataset, batch_size=batch_size, shuffle=False,
                                      pin_memory=True)
        # Initialize history
        history = []

        # Define checkpoint paths
        if output_dir is None:
            output_dir = 'experiment_{}'.format(time.time())
        os.makedirs(output_dir, exist_ok=True)
        checkpoint_path = os.path.join(output_dir, "checkpoint.pth.tar")
        config_path = os.path.join(output_dir, "config.txt")

        # Transfer all local arguments/variables into attributes
        locs = {k: v for k, v in locals().items() if k is not 'self'}
        self.__dict__.update(locs)

        # Load checkpoint and check compatibility
        if os.path.isfile(config_path):
            # with open(config_path, 'r') as f:
            #     if f.read()[:-1] != repr(self):
            #         print(f.read()[:-1], repr(self))
            #         raise ValueError(
            #             "Cannot create this experiment: "
            #             "I found a checkpoint conflicting with the current setting.")
            self.load()
        else:
            self.save()
Exemplo n.º 4
0
    def __init__(self, name):
        config_data = read_file_in_dir('./', name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", name)
        self.__name = config_data['experiment_name']
        self.__data_path = config_data['dataset']['data_location']
        self.__experiment_dir = os.path.join('./experiment_data', self.__name)

        # Load Datasets
        self.__train_loader, self.__test_loader = get_datasets(config_data)

        # Setup Experiment
        self.__epochs = config_data['experiment']['num_epochs']
        self.__current_epoch = 0
        self.__training_losses = []
        self.__val_losses = []
        self.__best_model = None  # Save your best model in this field and use this in test method.

        # Init Model
        self.__model = get_model(config_data)
        self.__load_experiment()
Exemplo n.º 5
0
    def __init__(self, name):
        config_data = read_file_in_dir('./',
                                       ROOT_CONFIGS_DIR + '/' + name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ",
                            ROOT_CONFIGS_DIR + '/' + name)

        self.__name = config_data['experiment_name']
        self.__experiment_dir = os.path.join(ROOT_STATS_DIR, self.__name)

        # Load Datasets
        _, self.__vocab, _, _, _ = get_datasets(config_data)

        # Setup Experiment
        self.__img_root_dir = config_data['dataset']['images_root_dir']
        self.__generation_config = config_data['generation']
        self.__epochs = config_data['experiment']['num_epochs']
        self.__lr = config_data['experiment']['learning_rate']
        self.__batch_size = config_data['dataset']['batch_size']
        self.__num_gpu = config_data['num_gpu']
        self.__vocab_size = len(self.__vocab)
        self.__current_epoch = 0
        self.__best_loss = 1000000000000.
        self.__img_size = config_data['dataset']['img_size']
        self.__best_model = None  # Save your best model in this field and use this in test method.

        # Init Model
        self.__encoder = get_model(config_data,
                                   self.__vocab,
                                   network_block='encoder')
        if config_data['model']['model_type'] == 'LSTM':
            self.__decoder = get_model(config_data,
                                       self.__vocab,
                                       network_block='decoder')
        else:
            assert (0 == 1), 'must select valid model_type'

        self.__init_model()
Exemplo n.º 6
0
    def __init__(self, name):
        config_data = read_file_in_dir('./', name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", name)

        self.__name = config_data['experiment_name']
        self.__experiment_dir = os.path.join(ROOT_STATS_DIR, self.__name)

        # Load Datasets
        self.__coco_test, self.__vocab, self.__train_loader, self.__val_loader, self.__test_loader = get_datasets(
            config_data)

        # Setup Experiment
        self.__generation_config = config_data['generation']
        self.__epochs = config_data['experiment']['num_epochs']
        self.__current_epoch = 0
        self.__training_losses = []
        self.__val_losses = []
        self.__best_model = None  # Save your best model in this field and use this in test method.

        # Init Model
        self.__model = get_model(config_data, self.__vocab)

        # TODO: Set these Criterion and Optimizers Correctly
        self.__criterion = torch.nn.CrossEntropyLoss()
        self.__optimizer = torch.optim.Adam(self.__model.parameters(), lr=0.01)
        self.__init_model()

        # Load Experiment Data if available
        self.__load_experiment()
Exemplo n.º 7
0
    def __init__(self, name):
        config_data = read_file_in_dir('./', ROOT_CONFIGS_DIR + '/' + name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", ROOT_CONFIGS_DIR + '/' + name)

        self.__name = config_data['experiment_name']
        self.__experiment_dir = os.path.join(ROOT_STATS_DIR, self.__name)

        # Load Datasets
        self.__coco_test, self.__vocab, self.__train_loader, self.__val_loader, self.__test_loader = get_datasets(
            config_data)

        # Setup Experiment
        self.__img_root_dir = config_data['dataset']['images_root_dir']
        self.__generation_config = config_data['generation']
        self.__epochs = config_data['experiment']['num_epochs']
        self.__lr = config_data['experiment']['learning_rate']
        self.__batch_size = config_data['dataset']['batch_size']
        self.__num_gpu = config_data['num_gpu']
        self.__vocab_size = len(self.__vocab)
        self.__current_epoch = 0
        self.__training_losses = []
        self.__val_losses = []
        self.__best_loss = 1000000000000.
        self.__best_model = None  # Save your best model in this field and use this in test method.

        # Init Model
        self.__encoder = get_model(config_data, self.__vocab, network_block='encoder')
        if config_data['model']['model_type'] == 'LSTM':
            self.__decoder = get_model(config_data, self.__vocab, network_block='decoder')
        elif config_data['model']['model_type'] == 'stackedLSTM':
            self.__decoder = get_model(config_data, self.__vocab, network_block='stacked_decoder')
        elif config_data['model']['model_type'] == 'RNN':
            self.__decoder = get_model(config_data, self.__vocab, network_block='RNNdecoder')
        elif config_data['model']['model_type'] == 'stackedRNN':
            self.__decoder = get_model(config_data, self.__vocab, network_block='stacked_RNNdecoder')
        else:
            assert(0 == 1), 'must select valid model_type'

        # TODO: Set these Criterion and Optimizers Correctly
        self.__criterion = nn.CrossEntropyLoss()
        self.__optimizer = torch.optim.Adam(filter(
            lambda p: p.requires_grad, nn.ModuleList([self.__encoder, self.__decoder]
                                                     ).parameters()), lr=self.__lr)
        # If you use SparseAdam, change nn.Embedding in model_factory to sparse=True

        self.__init_model()

        # Load Experiment Data if available
        self.__load_experiment()
Exemplo n.º 8
0
    def __init__(self, name):
        config_data = read_file_in_dir('./', name + '.json')
        if config_data is None:
            raise Exception("Configuration file doesn't exist: ", name)

        self.__name = config_data['experiment_name']
        self.__experiment_dir = os.path.join(ROOT_STATS_DIR, self.__name)

        # Load Datasets
        self.__coco_test, self.__vocab, self.__train_loader, self.__val_loader, self.__test_loader = get_datasets(
            config_data)

        # Setup Experiment
        self.__generation_config = config_data['generation']
        self.__epochs = config_data['experiment']['num_epochs']
        self.__learning_rate = config_data['experiment']['learning_rate']
        self.__early_stop_threshold = config_data['experiment'][
            'early_stop_threshold']
        self.__test_caption_path = config_data['dataset'][
            'test_annotation_file_path']
        self.__max_caption_count = config_data['generation']['max_length']
        self.__current_epoch = 0
        self.__training_losses = []
        self.__val_losses = []
        self.__best_model = None  # Save your best model in this field and use this in test method.
        #
        self.__best_encoder_model = None  # Save the best encoder model here
        self.__best_decoder_model = None  # Save the best decoder model here

        # Init Model
        self.__encoder_model, self.__decoder_model = get_model(
            config_data, self.__vocab)
        # TODO: Set these Criterion and Optimizers Correctly
        self.__criterion = nn.CrossEntropyLoss()
        parameters = list(self.__decoder_model.parameters()) + list(
            self.__encoder_model.parameters()) + list(
                self.__encoder_model.batchNorm.parameters())
        self.__optimizer = optim.Adam(parameters, lr=self.__learning_rate)
        self.__MODEL_NAME = self.__name + '_' + str(self.__learning_rate) + '_' + str(self.__epochs) + '_' + \
        str(config_data['model']['embedding_size']) + '_' + str(config_data['model']['hidden_size'])

        self.__use_gpu = False
        self.__init_model()