def __init__(self, ndf, n_classes, n_rkhs, tclip=20., n_depth=3, encoder_size=32, use_bn=False): super(Model, self).__init__() self.hyperparams = { 'ndf': ndf, 'n_classes': n_classes, 'n_rkhs': n_rkhs, 'tclip': tclip, 'n_depth': n_depth, 'encoder_size': encoder_size, 'use_bn': use_bn } # self.n_rkhs = n_rkhs self.tasks = ('1t5', '1t7', '5t5', '5t7', '7t7') dummy_batch = torch.zeros((2, 3, encoder_size, encoder_size)) # encoder that provides multiscale features self.encoder = Encoder(dummy_batch, num_channels=3, ndf=ndf, n_rkhs=n_rkhs, n_depth=n_depth, encoder_size=encoder_size, use_bn=use_bn) rkhs_1, rkhs_5, _ = self.encoder(dummy_batch) # convert for multi-gpu use self.encoder = nn.DataParallel(self.encoder) # configure hacky multi-gpu module for infomax costs self.g2l_loss = LossMultiNCE(tclip=tclip) # configure modules for classification with self-supervised features self.evaluator = Evaluator(n_classes, ftr_1=rkhs_1) # gather lists of self-supervised and classifier modules self.info_modules = [self.encoder.module, self.g2l_loss] self.class_modules = [self.evaluator]
def __init__( self, ndf, n_classes, n_rkhs, tclip=20.0, n_depth=3, encoder_size=32, use_bn=False, ): super(Model, self).__init__() self.hyperparams = { "ndf": ndf, "n_classes": n_classes, "n_rkhs": n_rkhs, "tclip": tclip, "n_depth": n_depth, "encoder_size": encoder_size, "use_bn": use_bn, } # self.n_rkhs = n_rkhs self.tasks = ("1t5", "1t7", "5t5", "5t7", "7t7") dummy_batch = torch.zeros((2, 3, encoder_size, encoder_size)) # encoder that provides multiscale features self.encoder = Encoder( dummy_batch, num_channels=3, ndf=ndf, n_rkhs=n_rkhs, n_depth=n_depth, encoder_size=encoder_size, use_bn=use_bn, ) rkhs_1, rkhs_5, _ = self.encoder(dummy_batch) # convert for multi-gpu use self.encoder = nn.DataParallel(self.encoder) # configure hacky multi-gpu module for infomax costs self.g2l_loss = LossMultiNCE(tclip=tclip) # configure modules for classification with self-supervised features self.evaluator = Evaluator(n_classes, ftr_1=rkhs_1) # gather lists of self-supervised and classifier modules self.info_modules = [self.encoder.module, self.g2l_loss] self.class_modules = [self.evaluator]
def __init__(self, ndf, n_classes, n_rkhs, tclip=20., n_depth=3, encoder_size=32, use_bn=False, decoder_training = False): # Nawid - Added parameter to control the auxillary loss super(Model, self).__init__() self.hyperparams = { 'ndf': ndf, 'n_classes': n_classes, 'n_rkhs': n_rkhs, 'tclip': tclip, 'n_depth': n_depth, 'encoder_size': encoder_size, 'use_bn': use_bn } self.decoder_training = decoder_training # Nawid - Used to control whether the train the decoder # self.n_rkhs = n_rkhs self.tasks = ('1t5', '1t7', '5t5', '5t7', '7t7') # Nawid - I believe these are the features from different parts of the layer dummy_batch = torch.zeros((2, 3, encoder_size, encoder_size)) # Nawid - Used to configure the module # encoder that provides multiscale features self.encoder = Encoder(dummy_batch, num_channels=3, ndf=ndf, n_rkhs=n_rkhs, n_depth=n_depth, encoder_size=encoder_size, use_bn=use_bn) # Nawid - Used to make the encoder rkhs_1, rkhs_5, _ = self.encoder(dummy_batch) # Nawid -Used to make rkhs_1 which is used to provide the dimensions for the evaluator # convert for multi-gpu use self.encoder = nn.DataParallel(self.encoder) # configure hacky multi-gpu module for infomax costs self.g2l_loss = LossMultiNCE(tclip=tclip) # Nawid - Loss function # configure modules for classification with self-supervised features self.evaluator = Evaluator(n_classes, ftr_1=rkhs_1) # Nawid - Configure module for decoder with self-supervised features self.decoder = Decoder(ftr_1 =rkhs_1) # Nawid - This instantiates the decoder # gather lists of self-supervised and classifier modules self.info_modules = [self.encoder.module, self.g2l_loss] self.class_modules = [self.evaluator] # Nawid - Added module for decoder self.decoder_modules = [self.decoder] # Nawid - Used in
def __init__(self, ndf, n_classes, n_rkhs, tclip=20., n_depth=3, use_bn=False, dataset=Dataset.STL10): super(Model, self).__init__() self.n_rkhs = n_rkhs self.tasks = ('1t5', '1t7', '5t5', '5t7', '7t7') encoder_size = self._get_encoder_size(dataset) dummy_batch = torch.zeros((2, 3, encoder_size, encoder_size)) # encoder that provides multiscale features self.encoder = Encoder(dummy_batch, nc=3, ndf=ndf, n_rkhs=n_rkhs, n_depth=n_depth, encoder_size=encoder_size, use_bn=use_bn) rkhs_1, rkhs_5, _ = self.encoder(dummy_batch) # convert for multi-gpu use self.encoder = nn.DataParallel(self.encoder) # configure hacky multi-gpu module for infomax costs self.g2l_loss = LossMultiNCE(tclip=tclip) # configure modules for classification with self-supervised features self.evaluator = Evaluator(n_classes, ftr_1=rkhs_1) # gather lists of self-supervised and classifier modules self.info_modules = [self.encoder.module, self.g2l_loss] self.class_modules = [self.evaluator]