def __init__(self, n_classes, bc_learning=True, nobias=False, dr_ratio=0.5): super(ConvNet, self).__init__() self.dr_ratio = dr_ratio self.bc_learning = bc_learning if self.bc_learning: self.loss = kl_divergence else: self.loss = F.softmax_cross_entropy # architecture kwargs = {'ksize': 3, 'stride': 1, 'pad': 1, 'nobias': nobias} with self.init_scope(): self.conv1_1 = Conv2DBNActiv(3, 64, **kwargs) self.conv1_2 = Conv2DBNActiv(64, 64, **kwargs) self.conv2_1 = Conv2DBNActiv(64, 128, **kwargs) self.conv2_2 = Conv2DBNActiv(128, 128, **kwargs) self.conv3_1 = Conv2DBNActiv(128, 256, **kwargs) self.conv3_2 = Conv2DBNActiv(256, 256, **kwargs) self.conv3_3 = Conv2DBNActiv(256, 256, **kwargs) self.conv3_4 = Conv2DBNActiv(256, 256, **kwargs) self.fc4 = L.Linear(1024, initialW=Uniform(1. / math.sqrt(256 * 4 * 4))) self.fc5 = L.Linear(1024, initialW=Uniform(1. / math.sqrt(1024))) self.fc6 = L.Linear(n_classes, initialW=Uniform(1. / math.sqrt(1024)))
def __init__(self, n_in, n_hidden): super(DependentSelectionLayer, self).__init__() # input dim is incoming feature size (n_in) + previous selection z (1) with self.init_scope(): self.rnn = RCNN(n_in + 1, n_hidden, 2) self.l = L.Linear( n_in + n_hidden, 1, initialW=Uniform(0.05), initial_bias=Uniform(0.05))
def __init__(self, n_classes): super(ConvNet, self).__init__( conv11=ConvBNReLU(3, 64, 3, pad=1), conv12=ConvBNReLU(64, 64, 3, pad=1), conv21=ConvBNReLU(64, 128, 3, pad=1), conv22=ConvBNReLU(128, 128, 3, pad=1), conv31=ConvBNReLU(128, 256, 3, pad=1), conv32=ConvBNReLU(256, 256, 3, pad=1), conv33=ConvBNReLU(256, 256, 3, pad=1), conv34=ConvBNReLU(256, 256, 3, pad=1), fc4=L.Linear(256 * 4 * 4, 1024, initialW=Uniform(1. / math.sqrt(256 * 4 * 4))), fc5=L.Linear(1024, 1024, initialW=Uniform(1. / math.sqrt(1024))), fc6=L.Linear(1024, n_classes, initialW=Uniform(1. / math.sqrt(1024))) )