def __init__(self, num_classes=61, mem_size=512, REGRESSOR=False):
        super(SelfSupAttentionModel, self).__init__()
        self.num_classes = num_classes
        self.resNet = resnetMod.resnet34(True, True)
        self.mem_size = mem_size
        self.weight_softmax = self.resNet.fc.weight
        self.lstm_cell = MyConvLSTMCell(512, mem_size)
        self.avgpool = nn.AvgPool2d(7)
        self.dropout = nn.Dropout(0.7)
        self.fc = nn.Linear(mem_size, self.num_classes)
        self.classifier = nn.Sequential(self.dropout, self.fc)

        # Adding flag for the regression option
        self.REGR = REGRESSOR

        #Secondary, self-supervised, task branch
        #Relu+conv+flatten+fullyconnected to get a 2*7*7 = 96 length 
        self.mmapPredictor = nn.Sequential()
        self.mmapPredictor.add_module('mmap_relu',nn.ReLU(True))
        self.mmapPredictor.add_module('convolution', nn.Conv2d(512, 100, kernel_size=1))
        self.mmapPredictor.add_module('flatten',Flatten())
        
        # Different dimensions for the standard selfSup and regSelfSul tasks
        if self.REGR == True:
            self.mmapPredictor.add_module('fc_2',nn.Linear(100*7*7,7*7))
        else:
            self.mmapPredictor.add_module('fc_2',nn.Linear(100*7*7,2*7*7))
Esempio n. 2
0
 def __init__(self, num_classes=51, mem_size=512, c_cam_classes=1000):
     super(attentionModelLSTA, self).__init__()
     self.num_classes = num_classes
     self.resNet = resnetMod.resnet34(True, True)
     self.mem_size = mem_size
     self.lsta_cell = MyConvLSTACell(512, mem_size, c_cam_classes)
     self.avgpool = nn.AvgPool2d(7)
     self.dropout = nn.Dropout(0.7)
     self.fc = nn.Linear(mem_size, self.num_classes)
     self.classifier = nn.Sequential(self.dropout, self.fc)
Esempio n. 3
0
 def __init__(self, num_classes=61, mem_size=512):
     super(clstm_Model, self).__init__()
     self.num_classes = num_classes
     self.resNet = resnetMod.resnet34(True, True)
     self.mem_size = mem_size
     self.lstm_cell = MyConvLSTMCell(512, mem_size)
     self.avgpool = nn.AvgPool2d(7)
     self.dropout = nn.Dropout(0.7)
     self.fc = nn.Linear(mem_size, self.num_classes)
     self.classifier = nn.Sequential(self.dropout, self.fc)
 def __init__(self, num_classes=61, mem_size=512):
     super(attentionModel, self).__init__()
     self.num_classes = num_classes
     # Initialize the ResNet
     self.resNet = resnetMod.resnet34(True, True)
     self.mem_size = mem_size
     # Get the weighs of the last fc layer of the ResNet,
     # we need this to comput the CAM and the attentionMAP
     self.weight_softmax = self.resNet.fc.weight
     # Initialize the convLSTM
     self.lstm_cell = MyConvLSTMCell(512, mem_size)
     # Here I initialize another avgpool needed after the convLSTM
     self.avgpool = nn.AvgPool2d(7)
     self.dropout = nn.Dropout(0.7)
     # Here I initialize the last classifier
     self.fc = nn.Linear(mem_size, self.num_classes)
     self.classifier = nn.Sequential(self.dropout, self.fc)
    def __init__(self, num_classes=61, mem_size=512):
        super(RegSelfSupAttentionModel, self).__init__()
        self.num_classes = num_classes
        self.resNet = resnetMod.resnet34(True, True)
        self.mem_size = mem_size
        self.weight_softmax = self.resNet.fc.weight
        self.lstm_cell = MyConvLSTMCell(512, mem_size)
        self.avgpool = nn.AvgPool2d(7)
        self.dropout = nn.Dropout(0.7)
        self.fc = nn.Linear(mem_size, self.num_classes)
        self.classifier = nn.Sequential(self.dropout, self.fc)

        #Secondary task branch
        self.mmapPredictor = nn.Sequential()
        self.mmapPredictor.add_module('mmap_relu', nn.ReLU(True))
        self.mmapPredictor.add_module('convolution',
                                      nn.Conv2d(512, 100, kernel_size=1))
        self.mmapPredictor.add_module('flatten', Flatten())
        self.mmapPredictor.add_module('fc_2', nn.Linear(100 * 7 * 7, 7 * 7))