def __init__(self, **kwargs): self._numhid = kwargs.pop('numhid', 64) self._outnum = kwargs.pop('outnum', 10) self._outact = kwargs.pop('outact', None) self._patest = kwargs.pop('patest', 'linear') self._outest = kwargs.pop('outest', self._patest) super().__init__(**kwargs) with self.name_scope(): # _numhid x 28 x 28 self.add( Conv2D(self._numhid, 4, strides=2, padding=1, use_bias=False, regimes=estimators[self._patest](), isinput=True)) self.add(ReLU(regimes=estimators[self._patest]())) # _numhid x 14 x 14 self.add( Conv2D(self._numhid * 2, 4, strides=2, padding=1, use_bias=False, regimes=estimators[self._patest]())) self.add(ReLU(regimes=estimators[self._patest]())) # _numhid x 7 x 7 self.add( Conv2D(self._numhid * 4, 4, strides=1, padding=0, use_bias=False, regimes=estimators[self._patest]())) self.add(ReLU(regimes=estimators[self._patest]())) # _numhid x 4 x 4 self.add( Conv2D(self._numhid * 8, 4, strides=1, padding=0, use_bias=False, regimes=estimators[self._patest]())) self.add(ReLU(regimes=estimators[self._patest]())) # filters x 1 x 1 self.add(Dense(self._outnum, regimes=estimators[self._outest]())) if self._outact == 'relu': self.add(ReLU(regimes=estimators[self._outest]())) else: self.add(Identity(regimes=estimators[self._outest]()))
def __init__(self, **kwargs): outnum = kwargs.pop('outnum', 1) numhid = kwargs.pop('numhid', 64) outact = kwargs.pop('outact', None) patest = kwargs.pop('patest', 'linear') outest = kwargs.pop('outest', patest) super().__init__(**kwargs) with self.name_scope(): self.add(Dense(numhid, regimes=estimators[patest]())) self.add(ReLU(regimes=estimators[patest]())) self.add(Dense(numhid, regimes=estimators[patest]())) self.add(ReLU(regimes=estimators[patest]())) self.add(Dense(numhid, regimes=estimators[patest]())) self.add(ReLU(regimes=estimators[patest]())) self.add(Dense(outnum, regimes=estimators[outest]())) self.add(Identity(regimes=estimators[outest]()))
def __init__(self, **kwargs): self._outnum = kwargs.pop('outnum', 1) self._numhid = kwargs.pop('numhid', 64) self._outact = kwargs.pop('outact', None) self._patest = kwargs.pop('patest', 'linear') self._outest = kwargs.pop('outest', self._patest) pat_est = estimators[self._patest] out_est = estimators[self._outest] super().__init__(**kwargs) with self.name_scope(): self.add(Dense(self._numhid, regimes=pat_est())) self.add(ReLU(regimes=pat_est())) self.add(Dense(self._numhid, regimes=pat_est())) self.add(ReLU(regimes=pat_est())) self.add(Dense(self._numhid, regimes=pat_est())) self.add(ReLU(regimes=pat_est())) self.add(Dense(self._outnum, regimes=out_est())) self.add(Identity(regimes=out_est()))
def __init__(self, **kwargs): outnum = kwargs.pop('outnum', 2) outact = kwargs.pop('outact', None) numhid = kwargs.pop('numhid', 512) droprate = kwargs.pop('droprate', 0.25) use_bias = kwargs.pop('use_bias', False) # patest = dict(relu='relu', out='clip', pixel='relu', gauss='relu') patest = dict(relu='linear', out='linear', pixel='linear', gauss='linear') patest.update(kwargs.pop('patest', {})) explain = dict(relu='zplus', out='zplus', pixel='zb', gauss='wsquare') explain.update(kwargs.pop('explain', {})) super().__init__(**kwargs) with self.name_scope(): self += Flatten() self += Dense(numhid, explain=explain['relu'], regimes=estimators[patest['relu']]()) self += ReLU() self += Dropout(droprate) self += Dense(numhid, explain=explain['relu'], regimes=estimators[patest['relu']]()) self += ReLU() self += Dropout(droprate) self += Dense(outnum, explain=explain['relu'], regimes=estimators[patest['relu']]()) if outact == 'relu': self += ReLU() else: self += Identity()
def __init__(self, **kwargs): self._numhid = kwargs.pop('numhid', 64) self._outnum = kwargs.pop('outnum', 10) self._patest = kwargs.pop('patest', 'linear') self._outest = kwargs.pop('outest', self._patest) super().__init__(**kwargs) with self.name_scope(): # 3 x 28 x 28 self.add( Conv2D(self._numhid, 5, strides=1, padding=0, use_bias=True, regimes=estimators[self._patest]())) self.add(ReLU(regimes=estimators[self._patest]())) # filt x 22 x 22 self.add(MaxPool2D(pool_size=2, strides=2)) # filt x 11 x 11 self.add( Conv2D(self._numhid * 2, 4, strides=1, padding=0, use_bias=True, regimes=estimators[self._patest]())) # filt x 8 x 8 self.add(ReLU(regimes=estimators[self._patest]())) self.add(MaxPool2D(pool_size=2, strides=2)) # filt x 4 x 4 self.add( Dense(self._numhid * 2, regimes=estimators[self._patest]())) self.add(Dense(self._outnum, regimes=estimators[self._outest]())) self.add(Identity(regimes=estimators[self._outest]()))
def __init__(self, **kwargs): outnum = kwargs.pop('outnum', 1) outact = kwargs.pop('outact', None) numhid = kwargs.pop('numhid', 512) droprate = kwargs.pop('droprate', 0.25) use_bias = kwargs.pop('use_bias', False) # patest = dict(relu='relu', out='clip', pixel='relu', gauss='relu') patest = dict(relu='linear', out='linear', pixel='linear', gauss='linear') patest.update(kwargs.pop('patest', {})) explain = dict(relu='zplus', out='zplus', pixel='zb', gauss='wsquare') explain.update(kwargs.pop('explain', {})) super().__init__(**kwargs) with self.name_scope(): # 28 x 28 self.add( Conv2D(128, 3, strides=1, padding=1, use_bias=use_bias, explain=explain['pixel'], regimes=estimators[patest['pixel']]())) self.add(ReLU()) self.add(MaxPool2D(pool_size=2, strides=2)) # 14 x 14 self.add( Conv2D(128, 3, strides=1, padding=1, use_bias=use_bias, explain=explain['pixel'], regimes=estimators[patest['pixel']]())) self.add(ReLU()) self.add(MaxPool2D(pool_size=2, strides=2)) # 7 x 7 self.add( Conv2D(128, 3, strides=1, padding=1, use_bias=use_bias, explain=explain['pixel'], regimes=estimators[patest['pixel']]())) self.add(ReLU()) self.add(MaxPool2D(pool_size=2, strides=2)) # 3 x 3 self.add( Conv2D(128, 3, strides=1, padding=1, use_bias=use_bias, explain=explain['pixel'], regimes=estimators[patest['pixel']]())) self.add(ReLU()) self.add(MaxPool2D(pool_size=2, strides=2)) # 2 x 2 self.add(Flatten()) self.add( Dense(numhid, explain=explain['relu'], regimes=estimators[patest['relu']]())) self.add(ReLU()) self.add(Dropout(droprate)) self.add( Dense(outnum, explain=explain['relu'], regimes=estimators[patest['relu']]())) if outact == 'relu': self.add(ReLU()) else: self.add(Identity())
def __init__(self, **kwargs): outnum = kwargs.pop('outnum', 1) outact = kwargs.pop('outact', None) numhid = kwargs.pop('numhid', 64) clip = kwargs.pop('clip', [-1., 1.]) use_bias = kwargs.pop('use_bias', False) # patest = dict(relu='relu', out='clip', pixel='relu', gauss='relu') patest = dict(relu='linear', out='linear', pixel='linear', gauss='linear') patest.update(kwargs.pop('patest', {})) explain = dict(relu='zplus', out='zclip', pixel='zb', gauss='wsquare') explain.update(kwargs.pop('explain', {})) super().__init__(**kwargs) with self.name_scope(): self.add(Concat()) # Same as Dense + reshape since we're coming from 1x1 self.add( Conv2DTranspose(numhid * 8, 4, strides=1, padding=0, use_bias=use_bias, explain=explain['gauss'], regimes=estimators[patest['gauss']]())) self.add(BatchNorm()) self.add(ReLU()) # _numhid x 4 x 4 self.add( Conv2DTranspose(numhid * 4, 5, strides=1, padding=0, output_padding=0, use_bias=use_bias, explain=explain['relu'], regimes=estimators[patest['relu']]())) self.add(BatchNorm()) self.add(ReLU()) # _numhid x 8 x 8 self.add( Conv2DTranspose(numhid * 2, 5, strides=1, padding=0, output_padding=0, use_bias=use_bias, explain=explain['relu'], regimes=estimators[patest['relu']]())) self.add(BatchNorm()) self.add(ReLU()) # _numhid x 12 x 12 self.add( Conv2DTranspose(numhid * 2, 5, strides=1, padding=0, output_padding=0, use_bias=use_bias, explain=explain['relu'], regimes=estimators[patest['relu']]())) self.add(BatchNorm()) self.add(ReLU()) # _numhid x 16 x 16 self.add( Conv2DTranspose(numhid, 5, strides=1, padding=0, output_padding=0, use_bias=use_bias, explain=explain['relu'], regimes=estimators[patest['relu']]())) self.add(BatchNorm()) self.add(ReLU()) # _numhid x 20 x 20 self.add( Conv2DTranspose(numhid, 5, strides=1, padding=0, output_padding=0, use_bias=use_bias, explain=explain['relu'], regimes=estimators[patest['relu']]())) self.add(BatchNorm()) self.add(ReLU()) # _numhid x 24 x 24 self.add( Conv2DTranspose(numhid, 5, strides=1, padding=0, output_padding=0, use_bias=use_bias, explain=explain['relu'], regimes=estimators[patest['relu']]())) self.add(BatchNorm()) self.add(ReLU()) # _numhid x 28 x 28 self.add( Conv2DTranspose(outnum, 5, strides=1, padding=0, output_padding=0, use_bias=use_bias, explain=explain['out'], regimes=estimators[patest['out']](), noregconst=-1.)) if outact == 'relu': self.add(ReLU()) elif outact == 'clip': self.add(Clip(low=clip[0], high=clip[1])) elif outact == 'tanh': self.add(Tanh()) elif outact == 'batchnorm': self.add(BatchNorm(scale=False, center=False)) self.add(Identity()) else: self.add(Identity())
def __init__(self, **kwargs): outnum = kwargs.pop('outnum', 1) outact = kwargs.pop('outact', None) numhid = kwargs.pop('numhid', 64) leakage = kwargs.pop('leakage', 0.1) use_bias = kwargs.pop('use_bias', False) # patest = dict(relu='relu', out='clip', pixel='relu', gauss='relu') patest = dict(relu='linear', out='linear', pixel='linear', gauss='linear') patest.update(kwargs.pop('patest', {})) explain = dict(relu='zplus', out='zplus', pixel='zb', gauss='wsquare') explain.update(kwargs.pop('explain', {})) super().__init__(**kwargs) with self.name_scope(): # _numhid x 32 x 32 self.add( Conv2D(numhid, 4, strides=2, padding=1, use_bias=use_bias, explain=explain['pixel'], regimes=estimators[patest['pixel']]())) self.add(LeakyReLU(leakage)) # _numhid x 16 x 14 self.add( Conv2D(numhid * 2, 4, strides=2, padding=1, use_bias=use_bias, explain=explain['relu'], regimes=estimators[patest['relu']]())) self.add(BatchNorm()) self.add(LeakyReLU(leakage)) # _numhid x 8 x 8 self.add( Conv2D(numhid * 4, 4, strides=2, padding=1, use_bias=use_bias, explain=explain['relu'], regimes=estimators[patest['relu']]())) self.add(BatchNorm()) self.add(LeakyReLU(leakage)) # _numhid x 4 x 4 self.add( Conv2D(outnum, 4, strides=1, padding=0, use_bias=use_bias, explain=explain['out'], regimes=estimators[patest['out']]())) self.add(Flatten()) # self.add(BatchNorm()) # self.add(LeakyReLU(leakage)) # # filters x 1 x 1 # self.add(Dense(outnum, # explain=explain['relu'], regimes=estimators[patest['relu']]())) if outact == 'relu': self.add(ReLU()) else: self.add(Identity())
def __init__(self, **kwargs): outnum = kwargs.pop('outnum', 1) outact = kwargs.pop('outact', None) numhid = kwargs.pop('numhid', 64) clip = kwargs.pop('clip', [-1., 1.]) use_bias = kwargs.pop('use_bias', False) use_bnrm = kwargs.pop('use_bnorm', True) # patest = dict(relu='relu', out='clip', pixel='relu', gauss='relu') patest = dict(relu='linear', out='linear', pixel='linear', gauss='linear') patest.update(kwargs.pop('patest', {})) explain = dict(relu='zplus', out='zclip', pixel='zb', gauss='wsquare') explain.update(kwargs.pop('explain', {})) super().__init__(**kwargs) with self.name_scope(): self.add(Concat()) self.add( Conv2DTranspose(numhid * 8, 4, strides=1, padding=0, use_bias=use_bias, explain=explain['gauss'], regimes=estimators[patest['gauss']]())) if use_bnrm: self.add(BatchNorm()) self.add(ReLU()) # _numhid x 3 x 3 self.add( Conv2DTranspose(numhid * 4, 4, strides=1, padding=0, use_bias=use_bias, explain=explain['relu'], regimes=estimators[patest['relu']]())) if use_bnrm: self.add(BatchNorm()) self.add(ReLU()) # _numhid x 7 x 7 self.add( Conv2DTranspose(numhid * 2, 4, strides=2, padding=1, use_bias=use_bias, explain=explain['relu'], regimes=estimators[patest['relu']]())) if use_bnrm: self.add(BatchNorm()) self.add(ReLU()) # _numhid x 14 x 14 self.add( Conv2DTranspose(outnum, 4, strides=2, padding=1, use_bias=use_bias, explain=explain['out'], regimes=estimators[patest['out']](), noregconst=-1.)) if outact == 'relu': self.add(ReLU()) elif outact == 'clip': self.add(Clip(low=clip[0], high=clip[1])) elif outact == 'tanh': self.add(Tanh()) elif outact == 'batchnorm': self.add(BatchNorm(scale=False, center=False)) self.add(Identity()) else: self.add(Identity())
def __init__(self, **kwargs): self._patest = kwargs.pop('patest', 'linear') super().__init__(**kwargs) with self.name_scope(): self.add(Dense(1, regimes=estimators[self._patest]())) self.add(Identity(regimes=estimators[self._patest]()))