def test_str_to_int_list(self): lis = '[[1,2],[2,3]]' self.assertEqual(str_to_int_list(lis), [[1, 2], [2, 3]]) lis = [[1,2],[2,3]] self.assertEqual(str_to_int_list(lis), [[1, 2], [2, 3]]) lis = '[1,2,3]' self.assertEqual(str_to_int_list(lis), [1, 2, 3]) lis = '[[1,2,3]]' self.assertEqual(str_to_int_list(lis), [[1, 2, 3]])
def __init__(self, hparams): super(AdvancedAmphibAutoencoder, self).__init__() self.radii = str_to_int_list(hparams.radii) self.radius = min(self.radii) self.h_w = 2 * self.radius + 1 self.im_size = self.h_w * self.h_w * len(self.radii) self.encoder = nn.Sequential( nn.Conv2d(len(self.radii), 8, 3, stride=1, padding=2), nn.ReLU(True), nn.Conv2d(8, 16, 3, stride=1), nn.ReLU(True), nn.Conv2d(16, 32, 3, stride=2), nn.ReLU(True), ) self.encoder_linear = nn.Sequential( nn.Linear(2048, hparams.latent_space_size), nn.ReLU(True), ) self.decoder_linear = nn.Sequential( nn.Linear(hparams.latent_space_size, 2048), nn.Sigmoid() ) self.decoder = nn.Sequential( nn.ConvTranspose2d(32, 16, 3, stride=2), nn.ReLU(True), nn.ConvTranspose2d(16, 8, 3, stride=1), nn.ReLU(True), nn.ConvTranspose2d(8, len(self.radii), 3, stride=1, padding=2), nn.Sigmoid() )
def __init__(self, hparams): super(BasicConvNetLatentSmallForScale, self).__init__() self.radii = str_to_int_list(hparams.radii) self.radius = min(self.radii) self.w = 2 * self.radius + 1 self.h = 2 * self.radius + 1 self.patch_size = self.w * self.h self.num_classes = hparams.num_classes self.latent_space_size = hparams.latent_space_size self.features = nn.Sequential( nn.Conv2d(len(self.radii), 10, kernel_size=3), nn.SELU(inplace=True), nn.MaxPool2d(kernel_size=2), nn.Conv2d(10, 20, kernel_size=2), nn.SELU(inplace=True), nn.MaxPool2d(kernel_size=2), ) size_after_cnn_1 = self.w - 3 + 1 size_after_relu_1 = int((size_after_cnn_1 + -1 * (2 - 1) - 1) / 2 + 1) size_after_cnn_2 = size_after_relu_1 - 2 + 1 size_after_relu_2 = int((size_after_cnn_2 + -1 * (2 - 1) - 1) / 2 + 1) latent_size = 5 self.middle_seq = nn.Sequential( # nn.Dropout(), nn.Linear(20 * size_after_relu_2**2, self.latent_space_size)) self.classifier = nn.Sequential( nn.SELU(inplace=True), nn.Linear(self.latent_space_size, self.num_classes), )
def __init__(self, hparams): super(AdvancedConvNetLatent, self).__init__() self.radii = str_to_int_list(hparams.radii) self.radius = min(self.radii) self.w = 2 * self.radius + 1 self.h = 2 * self.radius + 1 self.patch_size = self.w * self.h self.num_classes = hparams.num_classes self.latent_space_size = hparams.latent_space_size self.features = nn.Sequential( nn.Conv2d(len(self.radii), 8, 3, stride=1, padding=2), nn.ReLU(True), nn.Conv2d(8, 16, 3, stride=1), nn.ReLU(True), nn.Conv2d(16, 32, 3, stride=2), nn.ReLU(True), ) # size_after_cnn_1 = self.w - 5 + 1 # size_after_relu_1 = int((size_after_cnn_1 + - 1 * (2 - 1) - 1) / 2 + 1) # size_after_cnn_2 = size_after_relu_1 - 5 + 1 # size_after_relu_2 = int((size_after_cnn_2 + - 1 * (2 - 1) - 1) / 2 + 1) latent_size = 5 self.middle_seq = nn.Sequential( # nn.Dropout(), nn.Linear(2048, self.latent_space_size)) self.classifier = nn.Sequential( nn.SELU(inplace=True), nn.Linear(self.latent_space_size, self.num_classes), torch.nn.Softmax())
def __init__(self, hparams): super(BasicAutoencoder, self).__init__() self.radii = str_to_int_list(hparams.radii) self.encoder = nn.Sequential( nn.Conv2d(len(self.radii), 4, 3, stride=1, padding=2), nn.ReLU(True), nn.Conv2d(4, 8, 5, stride=1), nn.ReLU(True), nn.Conv2d(8, 16, 5, stride=1), nn.ReLU(True), nn.Conv2d(16, 16, 5, stride=1), nn.ReLU(True), nn.Conv2d(16, 32, 5, stride=1), nn.ReLU(True), nn.Conv2d(32, 32, 3, stride=2), nn.ReLU(True), ) self.decoder = nn.Sequential( nn.ConvTranspose2d(32, 32, 3, stride=2), nn.ReLU(True), nn.ConvTranspose2d(32, 16, 5, stride=1), nn.ReLU(True), nn.ConvTranspose2d(16, 16, 5, stride=1), nn.ReLU(True), nn.ConvTranspose2d(16, 8, 5, stride=1), nn.ReLU(True), nn.ConvTranspose2d(8, 4, 5, stride=1), nn.ReLU(True), nn.ConvTranspose2d(4, len(self.radii), 3, stride=1, padding=2), nn.Sigmoid())
def __init__(self, hparams, in_channels=1, out_channels=1, bilinear=True): self.radii = str_to_int_list(hparams.radii) self.index_in = hparams.index_in self.index_out = hparams.index_out resize = int((int((2 * self.radii[hparams.index_out] + 1) / (2 * self.radii[hparams.index_in] + 1) * (2 * self.radii[0] + 1)) - 1) / 2) self.start_index = self.radii[0] - resize self.end_index = self.radii[0] + resize + 1 super(UNet, self).__init__() self.in_channels = in_channels self.out_channels = out_channels self.bilinear = bilinear self.upsample = hparams.upsample self.inc = DoubleConv(in_channels, 8) self.down1 = Down(8, 16) self.down2 = Down(16, 32) self.down3 = Down(32, 64) self.down4 = Down(64, 128) self.up1 = Up(128, 64, bilinear) self.up2 = Up(64, 32, bilinear) self.up3 = Up(32, 16, bilinear) self.up4 = Up(16, 8, bilinear) if self.upsample: self.up5 = Up(8, 4, bilinear) self.up6 = Up(4, 2, bilinear) self.outc = OutConv(2, out_channels) else: self.outc = OutConv(8, out_channels)
def __init__(self, hparams): super(BasicAutoencoder, self).__init__() self.radii = str_to_int_list(hparams.radii) self.is_outpaint = hparams.pytorch_module == 'Outpainting' in_dim = 1 if self.is_outpaint else len(self.radii) out_dim = 1 if self.is_outpaint else len(self.radii) self.index_in = hparams.index_in self.variational = False self.encoder = nn.Sequential( nn.Conv2d(in_dim, 4, 3, stride=1, padding=2), nn.ReLU(True), nn.Conv2d(4, 8, 5, stride=1), nn.ReLU(True), nn.Conv2d(8, 16, 5, stride=1), nn.ReLU(True), nn.Conv2d(16, 16, 5, stride=1), nn.ReLU(True), nn.Conv2d(16, 32, 5, stride=1), nn.ReLU(True), nn.Conv2d(32, 32, 3, stride=2), ) self.decoder = nn.Sequential( nn.ConvTranspose2d(32, 32, 3, stride=2), nn.ReLU(True), nn.ConvTranspose2d(32, 16, 5, stride=1), nn.ReLU(True), nn.ConvTranspose2d(16, 16, 5, stride=1), nn.ReLU(True), nn.ConvTranspose2d(16, 8, 5, stride=1), nn.ReLU(True), nn.ConvTranspose2d(8, 4, 5, stride=1), nn.ReLU(True), nn.ConvTranspose2d(4, out_dim, 3, stride=1, padding=2), nn.Sigmoid()) self.fc_miu = nn.Linear(32, 32) self.fc_log_std = nn.Linear(32, 32)
def __init__(self, hparams: Namespace): """ a simple classifier to train on MultiRadiusDataset dataset using hparams, containing: arch - the architecture of the classifier and all other params defined in the "multi_class_experiment" script. """ super(Classifier, self).__init__() self.hparams = hparams self.model = models.__dict__[hparams.arch](hparams) self.num_classes = hparams.num_classes self.original_radiis = str_to_int_list(hparams.original_radiis) self.radii = str_to_int_list(hparams.radii) self.loss_fn = torch.nn.CrossEntropyLoss() self.total_dataset_size = hparams.total_dataset_size self.train_portion = hparams.train_portion self.embedding_visualization_size = hparams.embedding_visualization_size self.size_test = hparams.size_test self.final_validation_accuracy = 0 self.max_validation_accuracy = 0 self.final_test_accuracy = 0 self.class_names = CLASS_NAMES logging.info(self.class_names) self.class_paths = CLASS_PATHS # for using different scales self.scales_dict = None if hparams.different_scales: self.scales_dict = PROJECT_SCALES_DICT # for the scale experiment self.scale_exp = int(hparams.scale_exp) self.scale_exp_class_name = hparams.scale_exp_class_name self.scale_exp_class_path = hparams.scale_exp_class_path self.scale_exp_random_seed = hparams.scale_exp_random_seed self.scale_exp_only_higher_than = int( hparams.scale_exp_only_higher_than) if self.scale_exp_only_higher_than == -999: self.scale_exp_only_higher_than = None
def __init__(self, hparams): super(BasicLinearAutoencoder, self).__init__() self.radii = str_to_int_list(hparams.radii) self.radius = min(self.radii) self.h_w = 2 * self.radius + 1 self.im_size = self.h_w * self.h_w * len(self.radii) self.encoder = nn.Sequential( nn.Linear(self.im_size, hparams.latent_space_size), nn.ReLU(True), ) self.decoder = nn.Sequential( nn.Linear(hparams.latent_space_size, self.im_size), nn.Sigmoid() )
def __init__(self, hparams): super(TopoResNet, self).__init__(BasicBlock, [2, 2, 2, 2]) state_dict = load_state_dict_from_url(model_urls['resnet18'], progress=True) self.load_state_dict(state_dict) self.num_classes = hparams.num_classes self.latent_space_size = 512 if not hparams.train_all_resnet: for param in self.parameters(): param.requires_grad = False self.fc = nn.Sequential( nn.SELU(inplace=True), nn.Linear(512, self.num_classes), ) self.radii = str_to_int_list(hparams.radii) self.radius = min(self.radii)
def __init__(self, hparams): super(BasicAmphibAutoencoder, self).__init__() self.radii = str_to_int_list(hparams.radii) self.radius = min(self.radii) self.h_w = 2 * self.radius + 1 self.im_size = self.h_w * self.h_w * len(self.radii) self.encoder = nn.Sequential( nn.Conv2d(len(self.radii), 4, 3, stride=1), nn.ReLU(True), nn.MaxPool2d(2, stride=2), ) self.encoder_linear = nn.Sequential( nn.Linear(196, hparams.latent_space_size), nn.ReLU(True), ) self.decoder = nn.Sequential( nn.Linear(hparams.latent_space_size, self.im_size), nn.Sigmoid() )
def load_final_model(final_model_name: str): load_path = os.path.join(FINAL_MODEL_DIR, final_model_name) global final_model_classifier final_model_classifier = Classifier(SUPERRESOLUTION_HPARAMS) final_model_classifier.load_state_dict( torch.load(load_path, map_location=torch.device('cpu'))) final_model_classifier.eval() # load_final_model('final_model81624.pt') load_final_model( 'Superresolution_UNet_[34, 136]_lr_0.0015_size_100000_num_classes_4_latent_size_600_train_all_resnet_False.pt' ) FINAL_RADII = str_to_int_list(SUPERRESOLUTION_HPARAMS.radii) FINAL_ORIGINAL_RADIIS = str_to_int_list( SUPERRESOLUTION_HPARAMS.original_radiis) ################################################################################ # module functions # ################################################################################ def _build_new_dataset_for_query( points: List[Point], original_radiis: List[int], test_radius: int, class_name: str = 'no_name') -> Tuple[ClassDataset, np.ndarray]: queried_classes_path = os.path.join(FINAL_MODEL_DIR, 'queried_classes') Path(queried_classes_path).mkdir(parents=True, exist_ok=True)