def __init__(self, image_size=128): seed_all(0) super().__init__() self.density_label_size = image_size // 4 self.conv1 = Conv2d(3, 16, kernel_size=7) self.max_pool1 = MaxPool2d(kernel_size=3, stride=2) self.conv2 = Conv2d(self.conv1.out_channels, 32, kernel_size=7) self.max_pool2 = MaxPool2d(kernel_size=3, stride=2) self.conv3 = Conv2d(self.conv2.out_channels, 64, kernel_size=5) self.conv4 = Conv2d(self.conv3.out_channels, 32, kernel_size=3) self.conv5 = Conv2d(self.conv4.out_channels, 16, kernel_size=3) self.conv6 = Conv2d(self.conv5.out_channels, 16, kernel_size=3, dilation=2) self.conv7 = Conv2d(self.conv5.out_channels, 16, kernel_size=3, dilation=2) # Feature 5 regression self.f5_fc1 = Linear(912, 1000) self.f5_density = Linear(1000, self.density_label_size**2) self.f5_count = Linear(1000, 1) # Feature 7 regression self.f7_fc1 = Linear(912, 1000) self.f7_density = Linear(1000, self.density_label_size**2) self.f7_count = Linear(1000, 1) self.features = None
def __init__(self, z_dim=256, image_size=128, conv_dim=64): seed_all(0) super().__init__() self.fc = transpose_convolution(c_in=z_dim, c_out=conv_dim * 8, k_size=int(image_size / 16), stride=2, pad=0, bn=False) #self.fc = transpose_convolution(c_in=z_dim, c_out=conv_dim * 8, k_size=4, stride=2, pad=0, bn=False) self.layer1 = transpose_convolution(c_in=conv_dim * 8, c_out=conv_dim * 8, k_size=4, stride=2) self.layer2 = transpose_convolution(c_in=conv_dim * 8, c_out=conv_dim * 8, k_size=3, stride=1) #self.layer2 = transpose_convolution(c_in=conv_dim * 8, c_out=conv_dim * 8, k_size=4,stride=1) self.layer3 = transpose_convolution(c_in=conv_dim * 8, c_out=conv_dim * 4, k_size=4, stride=2) self.layer4 = transpose_convolution(conv_dim * 4, conv_dim * 2, 4, stride=2) self.layer5 = transpose_convolution(conv_dim * 2, conv_dim, 4, stride=2) self.layer6 = transpose_convolution(conv_dim, 1, 3, stride=1, bn=False) self.input_size = z_dim """ --- OLD CODE ---
def __init__(self, dataset='train', seed=None, number_of_examples=None, map_directory_name='maps'): seed_all(seed) self.dataset_directory = os.path.join(database_directory, dataset.capitalize()) self.file_names = [name for name in os.listdir(os.path.join(self.dataset_directory, 'labels')) if name.endswith('.npy')][:number_of_examples] self.length = len(self.file_names) self.map_directory_name = map_directory_name
def __init__(self, dataset_path, start=None, end=None, gender_filter=None, seed=None, batch_size=None): if gender_filter is not None: raise NotImplementedError() self.dataset_path = dataset_path with open(os.path.abspath(os.path.join(self.dataset_path, 'meta.json'))) as json_file: json_contents = json.load(json_file) image_names, ages = [], [] for entry in json_contents: if isinstance(entry, dict): image_names.append(entry['image_name']) ages.append(entry['age']) else: image_name, age, gender = entry image_names.append(image_name) ages.append(age) seed_all(seed) image_names, ages = unison_shuffled_copies(np.array(image_names), np.array(ages)) self.image_names = np.array(image_names[start:end]) self.ages = np.array(ages[start:end], dtype=np.float32) if self.image_names.shape[0] < batch_size: repeats = math.ceil(batch_size / self.image_names.shape[0]) self.image_names = np.repeat(self.image_names, repeats) self.ages = np.repeat(self.ages, repeats) self.length = self.ages.shape[0]
def train(self): """ Run the SRGAN training for the experiment. """ self.trial_directory = os.path.join(self.settings.logs_directory, self.settings.trial_name) if (self.settings.skip_completed_experiment and os.path.exists(self.trial_directory) and '/check' not in self.trial_directory): print('`{}` experiment already exists. Skipping...'.format( self.trial_directory)) return self.trial_directory = make_directory_name_unique(self.trial_directory) print(self.trial_directory) os.makedirs( os.path.join(self.trial_directory, self.settings.temporary_directory)) self.prepare_summary_writers() seed_all(0) self.dataset_setup() self.model_setup() self.load_models() self.gpu_mode() self.train_mode() self.prepare_optimizers() self.training_loop() print('Completed {}'.format(self.trial_directory)) if self.settings.should_save_models: self.save_models()
def __init__(self, dataset='train', image_patch_size=224, label_patch_size=224, seed=None, number_of_examples=None, middle_transform=None, map_directory_name='i1nn_maps'): seed_all(seed) self.dataset_directory = os.path.join(database_directory, dataset.capitalize()) self.file_names = [ name for name in os.listdir( os.path.join(self.dataset_directory, 'labels')) if name.endswith('.npy') ][:number_of_examples] self.image_patch_size = image_patch_size self.label_patch_size = label_patch_size half_patch_size = int(self.image_patch_size // 2) self.length = 0 self.start_indexes = [] for file_name in self.file_names: self.start_indexes.append(self.length) image = np.load(os.path.join(self.dataset_directory, 'images', file_name), mmap_mode='r') y_positions = range(half_patch_size, image.shape[0] - half_patch_size + 1) x_positions = range(half_patch_size, image.shape[1] - half_patch_size + 1) image_indexes_length = len(y_positions) * len(x_positions) self.length += image_indexes_length self.middle_transform = middle_transform self.map_directory_name = map_directory_name
def dataset_setup(self): """Sets up the datasets for the application.""" if model_architecture == 'vgg': dataset_path = '../imdb_wiki_data/imdb_preprocessed_224' else: dataset_path = '../imdb_wiki_data/imdb_preprocessed_128' settings = self.settings seed_all(settings.labeled_dataset_seed) self.train_dataset = AgeDataset(dataset_path, start=0, end=settings.labeled_dataset_size) self.train_dataset_loader = DataLoader( self.train_dataset, batch_size=settings.batch_size, shuffle=True, pin_memory=self.settings.pin_memory, num_workers=settings.number_of_data_workers) self.unlabeled_dataset = AgeDataset(dataset_path, start=self.train_dataset.length, end=self.train_dataset.length + settings.unlabeled_dataset_size) self.unlabeled_dataset_loader = DataLoader( self.unlabeled_dataset, batch_size=settings.batch_size, shuffle=True, pin_memory=self.settings.pin_memory, num_workers=settings.number_of_data_workers) train_and_unlabeled_dataset_size = self.train_dataset.length + self.unlabeled_dataset.length self.validation_dataset = AgeDataset( dataset_path, start=train_and_unlabeled_dataset_size, end=train_and_unlabeled_dataset_size + settings.validation_dataset_size)
def __init__(self, dataset='train', seed=None, number_of_cameras=None, number_of_images_per_camera=None, map_directory_name=None): seed_all(seed) self.dataset_directory = database_directory with open(os.path.join(self.dataset_directory, 'viable_with_validation_and_random_test.json')) as json_file: cameras_dict = json.load(json_file) camera_names = cameras_dict[dataset] random.shuffle(camera_names) self.camera_data_list: List[CameraData] = [] camera_names = camera_names[:number_of_cameras] self.length = 0 self.start_indexes = [] for camera_name in camera_names: camera_directory = os.path.join(self.dataset_directory, camera_name) if dataset == 'unlabeled': camera_images = np.load(os.path.join(camera_directory, 'unlabeled_images.npy'), mmap_mode='r') camera_labels = None else: camera_images = np.load(os.path.join(camera_directory, 'images.npy'), mmap_mode='r') camera_labels = np.load(os.path.join(camera_directory, 'labels.npy'), mmap_mode='r') camera_roi = np.load(os.path.join(camera_directory, 'roi.npy'), mmap_mode='r') camera_perspective = np.load(os.path.join(camera_directory, 'perspective.npy'), mmap_mode='r') permutation_indexes = np.random.permutation(camera_images.shape[0]) camera_images = camera_images[permutation_indexes][:number_of_images_per_camera] if dataset != 'unlabeled': camera_labels = camera_labels[permutation_indexes][:number_of_images_per_camera] self.camera_data_list.append(CameraData(images=camera_images, labels=camera_labels, roi=camera_roi, perspective=camera_perspective)) self.start_indexes.append(self.length) self.length += camera_images.shape[0]
def __init__(self, dataset_size, observation_count, settings, seed=None): seed_all(seed) self.examples, self.labels = generate_polynomial_examples( dataset_size, observation_count) if self.labels.shape[0] < settings.batch_size: repeats = settings.batch_size / self.labels.shape[0] self.examples = np.repeat(self.examples, repeats, axis=0) self.labels = np.repeat(self.labels, repeats, axis=0) self.length = self.labels.shape[0]
def __init__(self, hidden_size=10): super().__init__() seed_all(0) self.linear1 = Linear(observation_count * irrelevant_data_multiplier, hidden_size) self.linear2 = Linear(hidden_size, hidden_size) self.linear3 = Linear(hidden_size, hidden_size) self.linear4 = Linear(hidden_size, 2) self.features = None self.gradient_sum = torch.tensor(0, device=gpu)
def __init__(self, number_of_bins=10): super().__init__() seed_all(0) self.linear1 = Linear(observation_count * irrelevant_data_multiplier, 100) self.linear2 = Linear(100, 100) self.linear3 = Linear(100, 100) self.linear4 = Linear(100, number_of_bins) self.features = None self.gradient_sum = torch.tensor(0, device=gpu)
def __init__(self, dataset_directory, camera_names, number_of_cameras=None, number_of_images_per_camera=None, transform=None, seed=None, unlabeled=False): seed_all(seed) random.shuffle(camera_names) cameras_images = [] cameras_labels = [] cameras_rois = [] cameras_perspectives = [] for camera_name in camera_names[:number_of_cameras]: camera_directory = os.path.join(dataset_directory, camera_name) if unlabeled: camera_images = np.load(os.path.join(camera_directory, 'unlabeled_images.npy'), mmap_mode='r') camera_labels = np.zeros(camera_images.shape[:3], dtype=np.float32) else: camera_images = np.load(os.path.join(camera_directory, 'images.npy'), mmap_mode='r') camera_labels = np.load(os.path.join(camera_directory, 'labels.npy'), mmap_mode='r') camera_roi = np.load(os.path.join(camera_directory, 'roi.npy'), mmap_mode='r') camera_rois = np.repeat(camera_roi[np.newaxis, :, :], camera_images.shape[0], axis=0) camera_perspective = np.load(os.path.join(camera_directory, 'perspective.npy'), mmap_mode='r') camera_perspectives = np.repeat( camera_perspective[np.newaxis, :, :], camera_images.shape[0], axis=0) permutation_indexes = np.random.permutation(len(camera_labels)) cameras_images.append(camera_images[permutation_indexes] [:number_of_images_per_camera]) cameras_labels.append(camera_labels[permutation_indexes] [:number_of_images_per_camera]) cameras_rois.append( camera_rois[permutation_indexes][:number_of_images_per_camera]) cameras_perspectives.append( camera_perspectives[permutation_indexes] [:number_of_images_per_camera]) self.images = np.concatenate(cameras_images, axis=0) self.labels = np.concatenate(cameras_labels, axis=0) self.rois = np.concatenate(cameras_rois, axis=0) self.perspectives = np.concatenate(cameras_perspectives, axis=0) self.length = self.labels.shape[0] self.transform = transform
def __init__(self, image_size=128, conv_dim=64, number_of_outputs=1): seed_all(0) super().__init__() self.number_of_outputs = number_of_outputs self.layer1 = convolution(1, conv_dim, 4, bn=False) self.layer2 = convolution(conv_dim, conv_dim * 2, 4) self.layer3 = convolution(conv_dim * 2, conv_dim * 4, 4) self.layer4 = convolution(conv_dim * 4, conv_dim * 8, 3) self.layer5 = convolution(conv_dim * 8, self.number_of_outputs, int(image_size / 16), 1, 0, False) self.features = None
def __init__(self, z_dim=256, image_size=128, conv_dim=64): seed_all(0) super().__init__() self.fc = transpose_convolution(z_dim, conv_dim * 8, int(image_size / 16), 1, 0, bn=False) self.layer1 = transpose_convolution(conv_dim * 8, conv_dim * 4, 4) self.layer2 = transpose_convolution(conv_dim * 4, conv_dim * 2, 4) self.layer3 = transpose_convolution(conv_dim * 2, conv_dim, 4) self.layer4 = transpose_convolution(conv_dim, 3, 4, bn=False) self.input_size = z_dim
def __init__(self, dataset='train', seed=None, part='part_A', number_of_examples=None, map_directory_name='knn_maps'): seed_all(seed) self.dataset_directory = os.path.join(database_directory, part, '{}_data'.format(dataset)) self.file_names = [ name for name in os.listdir( os.path.join(self.dataset_directory, 'labels')) if name.endswith('.npy') ][:number_of_examples] self.length = len(self.file_names) self.map_directory_name = map_directory_name
def __init__(self, start=None, end=None, seed=None, batch_size=None): seed_all(seed) self.dataset_path = database_directory meta = pd.read_pickle(os.path.join(self.dataset_path, 'meta.pkl')) meta = sklearn.utils.shuffle( meta, random_state=seed) # Shuffles only first axis. image_names = meta.iloc[:, 0].values angles = meta.iloc[:, 1].values self.image_names = np.array(image_names[start:end]) self.angles = np.array(angles[start:end], dtype=np.float32) # Force full batch sizes if self.image_names.shape[0] < batch_size: repeats = math.ceil(batch_size / self.image_names.shape[0]) self.image_names = np.repeat(self.image_names, repeats) self.angles = np.repeat(self.angles, repeats) self.length = self.angles.shape[0] self.image_size = 128
def train(self): """ Run the SRGAN training for the experiment. """ self.trial_directory = os.path.join(self.settings.logs_directory, self.settings.trial_name) if (self.settings.skip_completed_experiment and os.path.exists(self.trial_directory) and '/check' not in self.trial_directory and not self.settings.continue_existing_experiments): print('`{}` experiment already exists. Skipping...'.format( self.trial_directory)) return if not self.settings.continue_existing_experiments: self.trial_directory = make_directory_name_unique( self.trial_directory) else: if os.path.exists(self.trial_directory ) and self.settings.load_model_path is not None: raise ValueError( 'Cannot load from path and continue existing at the same time.' ) elif self.settings.load_model_path is None: self.settings.load_model_path = self.trial_directory elif not os.path.exists(self.trial_directory): self.settings.continue_existing_experiments = False print(self.trial_directory) os.makedirs(os.path.join(self.trial_directory, self.settings.temporary_directory), exist_ok=True) self.prepare_summary_writers() seed_all(0) self.dataset_setup() self.model_setup() self.prepare_optimizers() self.load_models() self.gpu_mode() self.train_mode() self.training_loop() print('Completed {}'.format(self.trial_directory)) if self.settings.should_save_models: self.save_models(step=self.settings.steps_to_run)
def __init__(self): seed_all(0) super().__init__() self.input_size = 100 self.conv_transpose1 = ConvTranspose2d(self.input_size, 64, kernel_size=18) self.conv_transpose2 = ConvTranspose2d( self.conv_transpose1.out_channels, 32, kernel_size=4, stride=2, padding=1) self.conv_transpose3 = ConvTranspose2d( self.conv_transpose2.out_channels, 3, kernel_size=4, stride=2, padding=1)
def __init__(self): seed_all(0) super().__init__() self.conv1 = Conv2d(3, 32, kernel_size=7, padding=3) self.max_pool1 = MaxPool2d(kernel_size=2, stride=2) self.conv2 = Conv2d(self.conv1.out_channels, 32, kernel_size=7, padding=3) self.max_pool2 = MaxPool2d(kernel_size=2, stride=2) self.conv3 = Conv2d(self.conv2.out_channels, 64, kernel_size=5, padding=2) self.conv4 = Conv2d(self.conv3.out_channels, 1000, kernel_size=18) self.conv5 = Conv2d(self.conv4.out_channels, 400, kernel_size=1) self.count_conv = Conv2d(self.conv5.out_channels, 1, kernel_size=1) self.density_conv = Conv2d(self.conv5.out_channels, 324, kernel_size=1) self.features = None
def __init__(self, seed=None, test_start=0, dataset='train', map_directory_name='i1nn_maps'): seed_all(seed) self.dataset_directory = UcfCc50Preprocessor().database_directory self.file_names = [ name for name in os.listdir( os.path.join(self.dataset_directory, 'labels')) if name.endswith('.npy') ] test_file_names = self.file_names[test_start:test_start + 10] if dataset == 'test': self.file_names = test_file_names else: for file_name in test_file_names: self.file_names.remove(file_name) self.length = len(self.file_names) self.map_directory_name = map_directory_name
def __init__(self, image_patch_size=224, label_patch_size=224, seed=None, test_start=0, dataset='train', middle_transform=None, inverse_map=False, map_directory_name='i1nn_maps'): seed_all(seed) self.dataset_directory = UcfCc50Preprocessor().database_directory self.file_names = [ name for name in os.listdir( os.path.join(self.dataset_directory, 'labels')) if name.endswith('.npy') ] test_file_names = self.file_names[test_start:test_start + 10] if dataset == 'test': self.file_names = test_file_names else: for file_name in test_file_names: self.file_names.remove(file_name) print('{} images.'.format(len(self.file_names))) self.image_patch_size = image_patch_size self.label_patch_size = label_patch_size half_patch_size = int(self.image_patch_size // 2) self.length = 0 self.start_indexes = [] for file_name in self.file_names: self.start_indexes.append(self.length) image = np.load( os.path.join(self.dataset_directory, 'images', file_name)) y_positions = range(half_patch_size, image.shape[0] - half_patch_size + 1) x_positions = range(half_patch_size, image.shape[1] - half_patch_size + 1) image_indexes_length = len(y_positions) * len(x_positions) self.length += image_indexes_length self.middle_transform = middle_transform self.inverse_map = inverse_map self.map_directory_name = map_directory_name
def __init__(self, dataset='train', image_patch_size=224, label_patch_size=224, seed=None, number_of_cameras=None, number_of_images_per_camera=None, middle_transform=None): seed_all(seed) self.dataset_directory = database_directory with open(os.path.join(self.dataset_directory, 'viable_with_validation_and_random_test.json')) as json_file: cameras_dict = json.load(json_file) camera_names = cameras_dict[dataset] random.shuffle(camera_names) self.camera_data_list: List[CameraData] = [] camera_names = camera_names[:number_of_cameras] self.image_patch_size = image_patch_size self.label_patch_size = label_patch_size half_patch_size = int(self.image_patch_size // 2) self.length = 0 self.start_indexes = [] for camera_name in camera_names: camera_directory = os.path.join(self.dataset_directory, camera_name) if dataset == 'unlabeled': camera_images = np.load(os.path.join(camera_directory, 'unlabeled_images.npy'), mmap_mode='r') camera_labels = None else: camera_images = np.load(os.path.join(camera_directory, 'images.npy'), mmap_mode='r') camera_labels = np.load(os.path.join(camera_directory, 'labels.npy'), mmap_mode='r') camera_roi = np.load(os.path.join(camera_directory, 'roi.npy'), mmap_mode='r') camera_perspective = np.load(os.path.join(camera_directory, 'perspective.npy'), mmap_mode='r') permutation_indexes = np.random.permutation(camera_images.shape[0]) camera_images = camera_images[permutation_indexes][:number_of_images_per_camera] if dataset != 'unlabeled': camera_labels = camera_labels[permutation_indexes][:number_of_images_per_camera] self.camera_data_list.append(CameraData(images=camera_images, labels=camera_labels, roi=camera_roi, perspective=camera_perspective)) y_positions = range(half_patch_size, camera_images.shape[1] - half_patch_size + 1) x_positions = range(half_patch_size, camera_images.shape[2] - half_patch_size + 1) image_indexes_length = len(y_positions) * len(x_positions) self.start_indexes.append(self.length) self.length += camera_images.shape[0] * image_indexes_length self.image_patch_size = image_patch_size self.label_patch_size = label_patch_size self.middle_transform = middle_transform
def dataset_setup(self): """Sets up the datasets for the application.""" settings = self.settings seed_all(settings.labeled_dataset_seed) self.train_dataset = SteeringAngleDataset( start=0, end=settings.labeled_dataset_size, seed=self.settings.labeled_dataset_seed, batch_size=settings.batch_size) self.train_dataset_loader = DataLoader( self.train_dataset, batch_size=settings.batch_size, shuffle=True, pin_memory=self.settings.pin_memory, num_workers=settings.number_of_data_workers, drop_last=True) self.validation_dataset = SteeringAngleDataset( start=-settings.validation_dataset_size, end=None, seed=self.settings.labeled_dataset_seed, batch_size=settings.batch_size) unlabeled_dataset_start = settings.labeled_dataset_size + settings.validation_dataset_size if settings.unlabeled_dataset_size is not None: unlabeled_dataset_end = unlabeled_dataset_start + settings.unlabeled_dataset_size else: unlabeled_dataset_end = -settings.validation_dataset_size self.unlabeled_dataset = SteeringAngleDataset( start=unlabeled_dataset_start, end=unlabeled_dataset_end, seed=self.settings.labeled_dataset_seed, batch_size=settings.batch_size) self.unlabeled_dataset_loader = DataLoader( self.unlabeled_dataset, batch_size=settings.batch_size, shuffle=True, pin_memory=self.settings.pin_memory, num_workers=settings.number_of_data_workers, drop_last=True)
def __init__(self, dataset='train', transform=None, seed=None, unlabeled=False): seed_all(seed) dataset_directory = os.path.join(database_directory, 'part_B', '{}_data'.format(dataset)) if unlabeled: self.images = np.load(os.path.join(dataset_directory, 'unlabeled_images.npy'), mmap_mode='r') self.labels = np.zeros(self.images.shape[:3], dtype=np.float32) else: self.images = np.load(os.path.join(dataset_directory, 'images.npy'), mmap_mode='r') self.labels = np.load(os.path.join(dataset_directory, 'labels.npy'), mmap_mode='r') self.length = self.labels.shape[0] self.transform = transform self.unlabeled = unlabeled
settings_.map_directory_name = ['density3e-1'] settings_.map_multiplier = 1e-3 else: raise ValueError(f'{application_name} is not an available application.') settings_.summary_step_period = 5000 settings_.labeled_dataset_seed = 0 settings_.steps_to_run = 100000 settings_.learning_rate = [1e-4] # settings.load_model_path = 'logs/k comparison i1nn_maps ShanghaiTech crowd dnn ul1e3 fl1e2 gp1e2 lr1e-4 mm1e-6 ls0 bs40' settings_.contrasting_distance_function = abs_plus_one_sqrt_mean_neg settings_.matching_distance_function = abs_mean settings_.continue_existing_experiments = False settings_.save_step_period = 20000 settings_.local_setup() settings_list = convert_to_settings_list(settings_, shuffle=True) seed_all(0) previous_trial_directory = None for settings_ in settings_list: trial_name = f'base' trial_name += f' {settings_.matching_distance_function.__name__} {settings_.contrasting_distance_function.__name__}' trial_name += f' {method_name.value}' if method_name != MethodName.srgan else '' trial_name += f' {application_name.value}' trial_name += f' {settings_.map_directory_name}' if application_name == ApplicationName.crowd else '' trial_name += f' {settings_.crowd_dataset.value}' if application_name == ApplicationName.crowd else '' if method_name != MethodName.dnn: if application_name == ApplicationName.crowd and settings_.crowd_dataset == CrowdDataset.world_expo: trial_name += f' c{settings_.number_of_cameras}i{settings_.number_of_images_per_camera}' else: trial_name += f' le{settings_.labeled_dataset_size}' trial_name += f' ue{settings_.unlabeled_dataset_size}' trial_name += f' ul{settings_.matching_loss_multiplier:e}'
def train(self): """ Run the SRGAN training for the experiment. """ self.trial_directory = os.path.join(self.settings.logs_directory, self.settings.trial_name) if (self.settings.skip_completed_experiment and os.path.exists(self.trial_directory) and '/check' not in self.trial_directory): print('`{}` experiment already exists. Skipping...'.format( self.trial_directory)) return self.trial_directory = make_directory_name_unique(self.trial_directory) print(self.trial_directory) os.makedirs( os.path.join(self.trial_directory, self.settings.temporary_directory)) self.dnn_summary_writer = SummaryWriter( os.path.join(self.trial_directory, 'DNN')) self.gan_summary_writer = SummaryWriter( os.path.join(self.trial_directory, 'GAN')) self.dnn_summary_writer.summary_period = self.settings.summary_step_period self.gan_summary_writer.summary_period = self.settings.summary_step_period seed_all(0) self.dataset_setup() self.model_setup() self.load_models() d_lr = self.settings.learning_rate g_lr = d_lr # betas = (0.9, 0.999) weight_decay = 1e-2 self.d_optimizer = Adam(self.D.parameters(), lr=d_lr, weight_decay=weight_decay) self.g_optimizer = Adam(self.G.parameters(), lr=g_lr) self.dnn_optimizer = Adam(self.DNN.parameters(), lr=d_lr, weight_decay=weight_decay) step_time_start = datetime.datetime.now() train_dataset_generator = infinite_iter(self.train_dataset_loader) unlabeled_dataset_generator = infinite_iter( self.unlabeled_dataset_loader) for step in range(self.settings.steps_to_run): # DNN. labeled_examples, labels = next(train_dataset_generator) labeled_examples, labels = labeled_examples.to(gpu), labels.to(gpu) self.dnn_training_step(labeled_examples, labels, step) # GAN. unlabeled_examples, _ = next(unlabeled_dataset_generator) unlabeled_examples = unlabeled_examples.to(gpu) self.gan_training_step(labeled_examples, labels, unlabeled_examples, step) if self.gan_summary_writer.is_summary_step(): print('\rStep {}, {}...'.format( step, datetime.datetime.now() - step_time_start), end='') step_time_start = datetime.datetime.now() self.eval_mode() self.validation_summaries(step) self.train_mode() while sys.stdin in select.select([sys.stdin], [], [], 0)[0]: line = sys.stdin.readline() if 'save' in line: torch.save( self.DNN.state_dict(), os.path.join(self.trial_directory, 'DNN_model_{}.pth'.format(step))) torch.save( self.D.state_dict(), os.path.join(self.trial_directory, 'D_model_{}.pth'.format(step))) torch.save( self.G.state_dict(), os.path.join(self.trial_directory, 'G_model_{}.pth'.format(step))) print('\rSaved model for step {}...'.format(step)) if 'quit' in line: self.signal_quit = True print('\rQuit requested after current experiment...') print('Completed {}'.format(self.trial_directory)) if self.settings.should_save_models: torch.save(self.DNN.state_dict(), os.path.join(self.trial_directory, 'DNN_model.pth')) torch.save(self.D.state_dict(), os.path.join(self.trial_directory, 'D_model.pth')) torch.save(self.G.state_dict(), os.path.join(self.trial_directory, 'G_model.pth'))