示例#1
0
def get_act_func(config, logger):
    """This function retruns the specified activation function from the config."""

    if config["activation_function"] == "ReLU":
        if "ReLU" in config:
            logger.debug("activation function: changed ReLu to leakyReLU with secified slope!")
            return nn.LeakyReLU(negative_slope=config["ReLu"])
        else:
            logger.debug("activation function: ReLu")
            return nn.ReLU(True)  
    if config["activation_function"] == "LeakyReLU":
        if "LeakyReLU_negative_slope" in config:
            logger.debug("activation_function: LeakyReLU")
            return nn.LeakyReLU(negative_slope=config["LeakyReLU_negative_slope"])
        elif "LeakyReLU" in config:
            logger.debug("activation_function: LeakyReLU")
            return nn.LeakyReLU(negative_slope=config["LeakyReLU"])
        else:
            logger.debug("activation function: LeakyReLu changed to ReLU because no slope value could be found")
            return nn.LeakyReLU()
    if config["activation_function"] == "Sigmoid":
        logger.debug("activation_function: Sigmoid")
        return nn.Sigmoid
    if config["activation_function"] == "LogSigmoid":
        logger.debug("activation_function: LogSigmoid")
        return nn.LogSigmoid
    if config["activation_function"] == "Tanh":
        logger.debug("activation_function: Tanh")
        return nn.Tanh
    if config["activation_function"] == "SoftMax":
        logger.debug("activation_function: SoftMax")
        return nn.SoftMax()
示例#2
0
def FCNN():
	num_classes = 2
	n_layers_enc = 32
	n_layers_ctx = 128
	n_input = 5
	prob_drop = 0.25
	layers = []
	# Encoder
	pool = nn2.SpatialMaxPooling(2,2,2,2)
	layers.append(nn2.SpatialConvolution(n_input, n_layers_enc, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialConvolution(n_layers_enc, n_layers_enc, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(pool)
	# Context Module
	layers.append(nn2.SpatialDilatedConvolution(n_layers_enc, n_layers_ctx, 3, 3, 1, 1, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 2, 2, 2, 2))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 4, 4, 4, 4))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 8, 8, 8, 8))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 16, 16, 16, 16))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 32, 32, 32, 32))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_ctx, 3, 3, 1, 1, 64, 64, 64, 64))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialDropout(prob_drop))
	layers.append(nn2.SpatialDilatedConvolution(n_layers_ctx, n_layers_enc, 1, 1))
	layers.append(nn.ELU())	# Nao havia no paper
	# Decoder
	layers.append(nn2.SpatialMaxUnpooling(pool))
	layers.append(nn2.SpatialConvolution(n_layers_enc, n_layers_enc, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn2.SpatialConvolution(n_layers_enc, num_classes, 3, 3, 1, 1, 1, 1))
	layers.append(nn.ELU())
	layers.append(nn.SoftMax()) # Nao havia no paper
	return nn.Sequential(*layers)
示例#3
0
  def __init__(self):
    super(NeuralNetwork, self).__init__()
    self.features = nn.Sequential(
					nn.Conv2d(1, 64, kernel_size=3, stride=2),
					nn.ReLU(inplace=True),
					nn.MaxPool2d(kernel_size=3, stride=2)
			)
    self.residual_block = nn.Sequential(
    		nn.Conv2d(64, 64, kernel_size=3, stride=1),
				nn.ReLU(inplace=True),
				nn.Conv2d(64, 64, kernel_size=3, stride=1),
				nn.ReLU(inplace=True),
				nn.ZeroPad2d(2)
    )
    self.avgpool = nn.AdaptiveAvgPool2d(output_size = (7, 7))

    self.classifier = nn.Sequential(
					nn.Linear(64*7*7, 2048),
					nn.ReLU(inplace=True),
					nn.Linear(2048, 10),
					nn.SoftMax()
			)
def Activation(cell_info):
    if cell_info['mode'] == 'none':
        return nn.Sequential()
    elif cell_info['mode'] == 'tanh':
        return nn.Tanh()
    elif cell_info['mode'] == 'hardtanh':
        return nn.Hardtanh()
    elif cell_info['mode'] == 'relu':
        return nn.ReLU(inplace=True)
    elif cell_info['mode'] == 'prelu':
        return nn.PReLU()
    elif cell_info['mode'] == 'elu':
        return nn.ELU(inplace=True)
    elif cell_info['mode'] == 'selu':
        return nn.SELU(inplace=True)
    elif cell_info['mode'] == 'celu':
        return nn.CELU(inplace=True)
    elif cell_info['mode'] == 'sigmoid':
        return nn.Sigmoid()
    elif cell_info['mode'] == 'softmax':
        return nn.SoftMax()
    else:
        raise ValueError('Not valid activation')
    return
示例#5
0
class Classification_block(self, inputChannel, outputChannel):
	def super(Classification_block, self).__init__():
		a1 = nn.AvgPool2d(7)
		l1 = nn.Linear(inputChannel, outputChannel)
		s1 = nn.SoftMax()