def __init__(self, nb_states, nb_actions, is_target=True, hidden1=400, hidden2=300, init_w=3e-3): super(UAActor, self).__init__() self.min_variance = 1e-4 self.keep_variance_fn = lambda x: keep_variance( x, min_variance=self.min_variance) self._noise_variance = 1e-4 self.dropout_p = 0.1 self.dropout_n = 3 self.is_target = is_target self.action_count = 0 self.linear1 = adf.Linear(nb_states, hidden1, keep_variance_fn=self.keep_variance_fn) self.linear2 = adf.Linear(hidden1, hidden2, keep_variance_fn=self.keep_variance_fn) self.linear3 = adf.Linear(hidden2, nb_actions, keep_variance_fn=self.keep_variance_fn) self.ReLU = adf.ReLU(keep_variance_fn=self.keep_variance_fn) self.dropout = adf.Dropout(p=self.dropout_p, keep_variance_fn=self.keep_variance_fn) self.init_weights(init_w)
def __init__(self, block, num_blocks, num_classes=10, p=0.2, noise_variance=1e-3, min_variance=1e-3, initialize_msra=False): super(ResNetADFDropout, self).__init__() self.keep_variance_fn = lambda x: keep_variance( x, min_variance=min_variance) self._noise_variance = noise_variance self.in_planes = 64 self.conv1 = adf.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False, keep_variance_fn=self.keep_variance_fn) self.bn1 = adf.BatchNorm2d(64, keep_variance_fn=self.keep_variance_fn) self.ReLU = adf.ReLU(keep_variance_fn=self.keep_variance_fn) self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1, p=p, keep_variance_fn=self.keep_variance_fn) self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2, p=p, keep_variance_fn=self.keep_variance_fn) self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2, p=p, keep_variance_fn=self.keep_variance_fn) self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2, p=p, keep_variance_fn=self.keep_variance_fn) self.linear = adf.Linear(512 * block.expansion, num_classes, keep_variance_fn=self.keep_variance_fn) self.AvgPool2d = adf.AvgPool2d(keep_variance_fn=self.keep_variance_fn) self.dropout = adf.Dropout(p=p, keep_variance_fn=self.keep_variance_fn)
def __init__(self, in_planes, planes, stride=1, p=0.2, keep_variance_fn=None): super(Bottleneck, self).__init__() self.keep_variance_fn = keep_variance_fn self.conv1 = adf.Conv2d(in_planes, planes, kernel_size=1, bias=False, keep_variance_fn=self.keep_variance_fn) self.bn1 = adf.BatchNorm2d(planes, keep_variance_fn=self.keep_variance_fn) self.conv2 = adf.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False, keep_variance_fn=self.keep_variance_fn) self.bn2 = adf.BatchNorm2d(planes, keep_variance_fn=self.keep_variance_fn) self.conv3 = adf.Conv2d(planes, self.expansion * planes, kernel_size=1, bias=False, keep_variance_fn=self.keep_variance_fn) self.bn3 = adf.BatchNorm2d(self.expansion * planes, keep_variance_fn=self.keep_variance_fn) self.ReLU = adf.ReLU(keep_variance_fn=self.keep_variance_fn) self.shortcut = adf.Sequential() if stride != 1 or in_planes != self.expansion * planes: self.shortcut = adf.Sequential( adf.Conv2d(in_planes, self.expansion * planes, kernel_size=1, stride=stride, bias=False, keep_variance_fn=self.keep_variance_fn), adf.BatchNorm2d(self.expansion * planes, keep_variance_fn=self.keep_variance_fn)) self.dropout = adf.Dropout(p=p, keep_variance_fn=self.keep_variance_fn)
def __init__(self, img_channels, output_dim, noise_variance=1e-3, min_variance=1e-3, initialize_msra=False): super(Resnet8_MCDO_adf, self).__init__() p = FLAGS.dropout self._keep_variance_fn = lambda x: keep_variance(x, min_variance=min_variance) self._noise_variance = noise_variance self.layer1 = adf.Sequential( adf.Conv2d( in_channels=img_channels, out_channels=32, kernel_size=5, padding=5//2, stride=2, bias=True, keep_variance_fn=self._keep_variance_fn), adf.Dropout(p, keep_variance_fn=self._keep_variance_fn), adf.MaxPool2d(keep_variance_fn=self._keep_variance_fn)) self.residual_block_1a = adf.Sequential( adf.BatchNorm2d(32), adf.ReLU(), adf.Conv2d( in_channels=32, out_channels=32, kernel_size=3, padding=3//2, stride=2, bias=True, keep_variance_fn=self._keep_variance_fn), adf.Dropout(p, keep_variance_fn=self._keep_variance_fn), adf.BatchNorm2d(32), adf.ReLU(), adf.Conv2d( in_channels=32, out_channels=32, kernel_size=3, padding=3//2, bias=True, keep_variance_fn=self._keep_variance_fn), adf.Dropout(p, keep_variance_fn=self._keep_variance_fn)) self.parallel_conv_1 = adf.Sequential( adf.Conv2d( in_channels=32, out_channels=32, kernel_size=1, padding=1//2, stride=2, bias=True, keep_variance_fn=self._keep_variance_fn), adf.Dropout(p, keep_variance_fn=self._keep_variance_fn)) self.residual_block_2a = adf.Sequential( adf.BatchNorm2d(32), adf.ReLU(), adf.Conv2d( in_channels=32, out_channels=64, kernel_size=3, padding=3//2, stride=2, bias=True, keep_variance_fn=self._keep_variance_fn), adf.Dropout(p, keep_variance_fn=self._keep_variance_fn), adf.BatchNorm2d(64), adf.ReLU(), adf.Conv2d( in_channels=64, out_channels=64, kernel_size=3, padding=3//2, bias=True, keep_variance_fn=self._keep_variance_fn), adf.Dropout(p, keep_variance_fn=self._keep_variance_fn)) self.parallel_conv_2 = adf.Sequential( adf.Conv2d( in_channels=32, out_channels=64, kernel_size=1, padding=1//2, stride=2, bias=True, keep_variance_fn=self._keep_variance_fn), adf.Dropout(p, keep_variance_fn=self._keep_variance_fn)) self.residual_block_3a = adf.Sequential( adf.BatchNorm2d(64), adf.ReLU(), adf.Conv2d( in_channels=64, out_channels=128, kernel_size=3, padding=3//2, stride=2, bias=True, keep_variance_fn=self._keep_variance_fn), adf.Dropout(p, keep_variance_fn=self._keep_variance_fn), adf.BatchNorm2d(128), adf.ReLU(), adf.Conv2d( in_channels=128, out_channels=128, kernel_size=3, padding=3//2, bias=True, keep_variance_fn=self._keep_variance_fn), adf.Dropout(p, keep_variance_fn=self._keep_variance_fn)) self.parallel_conv_3 = adf.Sequential( adf.Conv2d( in_channels=64, out_channels=128, kernel_size=1, padding=1//2, stride=2, bias=True, keep_variance_fn=self._keep_variance_fn), adf.Dropout(p, keep_variance_fn=self._keep_variance_fn)) self.output_dim = output_dim self.last_block = adf.Sequential( adf.ReLU(), adf.Linear(6272,self.output_dim)) # Initialize layers exactly as in Keras for layer in self.modules(): if isinstance(layer, adf.Conv2d) or isinstance(layer, adf.Linear): # convolution: bias=0, weight=msra nn.init.xavier_uniform_(layer.weight, gain=nn.init.calculate_gain('relu')) if layer.bias is not None: nn.init.constant_(layer.bias, 0) elif isinstance(layer, adf.BatchNorm2d): nn.init.constant_(layer.weight, 1) nn.init.constant_(layer.bias, 0)
def __init__(self, args, initialize_msra=False, noise_variance=1e-3, min_variance=1e-3, log_variance=False): super(AllConvNetADF, self).__init__() self._keep_variance_fn = lambda x: keep_variance( x, min_variance=min_variance) self._noise_variance = noise_variance self.conv1 = make_conv(3, 96, kernel_size=3, stride=1, keep_variance_fn=self._keep_variance_fn) self.conv1_1 = make_conv(96, 96, kernel_size=3, stride=1, keep_variance_fn=self._keep_variance_fn) self.conv1_2 = make_conv(96, 96, kernel_size=3, stride=2, nonlinear=False, keep_variance_fn=self._keep_variance_fn) self.dropout1 = adf.Dropout(0.5, keep_variance_fn=self._keep_variance_fn) self.conv2 = make_conv(96, 192, kernel_size=3, stride=1, keep_variance_fn=self._keep_variance_fn) self.conv2_1 = make_conv(192, 192, kernel_size=3, stride=1, keep_variance_fn=self._keep_variance_fn) self.conv2_2 = make_conv(192, 192, kernel_size=3, stride=2, nonlinear=False, keep_variance_fn=self._keep_variance_fn) self.dropout2 = adf.Dropout(0.5, keep_variance_fn=self._keep_variance_fn) self.conv3 = make_conv(192, 192, kernel_size=3, stride=1, keep_variance_fn=self._keep_variance_fn) self.conv3_1 = make_conv(192, 192, kernel_size=1, stride=1, keep_variance_fn=self._keep_variance_fn) self.conv3_2 = make_conv(192, 10, kernel_size=1, stride=1, nonlinear=False, keep_variance_fn=self._keep_variance_fn) if initialize_msra: finitialize_msra(self.modules(), small=False) else: finitialize_xavier(self.modules(), small=False)