def __init__(self, y, a): """ The mean squared error cost function. Should be used as the last layer for a network. """ # Call the base class' constructor. Layer.__init__(self, [y, a])
def __init__(self, axis=None, momentum=None, epsilon=None, center=None, scale=None, beta_initializer=None, gamma_initializer=None, moving_mean_initializer=None, moving_variance_initializer=None, beta_regularizer=None, gamma_regularizer=None, beta_constraint=None, gamma_constraint=None): Layer.__init__(self) self.axis = axis self.momentum = momentum self.epsilon = epsilon self.center = center self.scale = scale self.beta_initializer = beta_initializer self.gamma_initializer = gamma_initializer self.moving_mean_initializer = moving_mean_initializer self.moving_variance_initializer = moving_variance_initializer self.beta_regularizer = beta_regularizer self.gamma_regularizer = gamma_regularizer self.beta_constraint = beta_constraint self.gamma_constraint = gamma_constraint self.name = 'BatchNormalization'
def __init__(self, units, activation, recurrent_activation, use_bias, kernel_initializer, recurrent_initializer, bias_initializer, kernel_regularizer, recurrent_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, recurrent_constraint, bias_constraint, dropout, recurrent_dropout, implementation, return_sequences, return_state, go_backwards, stateful, unroll): Layer.__init__(self) KernelBiasSupport.__init__(self, use_bias, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, bias_constraint) self.units = units self.activation = activation self.recurrent_activation = recurrent_activation self.recurrent_initializer = recurrent_initializer self.recurrent_regularizer = recurrent_regularizer self.dropout = dropout self.recurrent_dropout = recurrent_dropout self.implementation = implementation self.return_sequencies = return_sequences self.return_state = return_state self.go_backwards = go_backwards self.stateful = stateful self.unroll = unroll self.recurrent_constraint = recurrent_constraint
def __init__(self, d={}, name='image', verbose=False): self.name = name Layer.__init__(self, d, verbose) self.i = Layer.arg(d) self.threshold = self.attr('threshold', 200) self.imgs = {} self.mod = None
def __init__(self, shape=None, batch_shape=None, dtype=None, sparse=None, tensor=None): Layer.__init__(self) self.shape = shape self.batch_shape = batch_shape self.dtype = dtype self.sparse = sparse self.tensor = tensor self.name = 'Input'
def __init__(self, units=None, activation=None, use_bias=True, kernel_initializer=None, bias_initializer=None, kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None): Layer.__init__(self) KernelBiasSupport.__init__(self, use_bias, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, bias_constraint) self.units = units self.activation = activation self.name = 'Dense'
def __init__(self, d, verbose=False): Layer.__init__(self, d, verbose) assert self.attr('box') self.attr('units') self.outline = Layer.arg(d) self.radius = self.attr('corner-radius', 0) self.width = self.attr('line-width', 1) self.fillColor = self.attr('fill-color', None) assert self.radius==0 or self.width < self.radius
def __init__(self, name, pWidth, pHeight, inputWidth, inputHeight, channel, prev): self._poolInfo = [pWidth, pHeight] self._inputInfo = [inputWidth, inputHeight, channel] self._calcWindows() windowSize = self._windowNum[0] * self._windowNum[1] Layer.__init__(self, name, windowSize * self._inputInfo[CHANNEL], prev) assert prev != None assert prev._size == inputWidth * inputHeight * channel
def __init__(self, shape, input_shape, activation=tf.nn.tanh): self.shape = shape self.input_shape = input_shape self.activation = (lambda x: x) if activation is None else activation Layer.__init__(self, True, [ {'name': 'b1', 'type': 'b', 'shape': self.shape[-1]}, {'name': 'b2', 'type': 'b', 'shape': self.shape[-2]}, {'name': 'w', 'type': 'w', 'shape': self.shape} ])
def __init__(self, d, verbose=False): Layer.__init__(self, d, verbose) self.color = Layer.arg(d) self.r1 = self.attr('radius', 2) self.r2 = self.attr('distance', 5) + self.r1 self.grad = self.attr('grad', True) self.opacity = int(255 * min(self.attr('opacity', 100.0), 100.0) / 100.0) assert self.attr('box') self.attr('units')
def __init__(self, data=None, projection=None, geotransform=None, name=None, keywords=None, style_info=None): """Initialise object with either data or filename NOTE: Doc strings in constructor are not harvested and exposed in online documentation. Hence the details are specified in the class docstring. """ # Invoke common layer constructor Layer.__init__(self, name=name, projection=projection, keywords=keywords, style_info=style_info) # Input checks if data is None: # Instantiate empty object self.geotransform = None self.rows = self.columns = 0 return # Initialisation if isinstance(data, basestring): self.read_from_file(data) elif isinstance(data, QgsRasterLayer): self.read_from_qgis_native(data) else: # Assume that data is provided as a numpy array # with extra keyword arguments supplying metadata self.data = numpy.array(data, dtype='d', copy=False) proj4 = self.get_projection(proj4=True) if 'longlat' in proj4 and 'WGS84' in proj4: # This is only implemented for geographic coordinates # Omit check for projected coordinate systems check_geotransform(geotransform) self.geotransform = geotransform self.rows = data.shape[0] self.columns = data.shape[1] self.number_of_bands = 1 # We assume internal numpy layers are using nan correctly # FIXME (Ole): If read from file is refactored to load the data # this should be taken care of there self.nodata_value = numpy.nan
def __init__(self, shape): self.shape = shape Layer.__init__(self, False, [{ 'name': '_marginal_init', 'type': 'b', 'shape': shape[1:] }]) # Define state placeholder self.marginal = tf.placeholder(tf.float32, [None, self.shape[-1]]) self._marginal = self.marginal
def __init__(self, name, prev): assert prev != None Layer.__init__(self, name, prev._size, prev) self._average = np.zeros((prev._size, 1)) self._variance = np.zeros((prev._size, 1)) self._iter = 0 self._isTraining = False self._beta = np.zeros((prev._size, 1)) self._beta.fill(1) self._gamma = np.zeros((prev._size, 1)) self._gamma.fill(1)
def __init__(self, input_dim=None, output_dim=None, embeddings_initializer=None, embeddings_regularizer=None, activity_regularizer=None, embeddings_constraint=None, mask_zero=None, input_length=None): Layer.__init__(self) self.input_dim = input_dim self.output_dim = output_dim self.embedding_initializer = embeddings_initializer self.embedding_regularizer = embeddings_regularizer self.activity_regularizer = activity_regularizer self.embedding_constraint = embeddings_constraint self.mask_zero = mask_zero self.input_length = input_length self.name = 'Embedding'
def __init__(self, name, size, prev): Layer.__init__(self, name, size, prev) assert prev != None # randomly initiate weight and bias self._w = np.random.uniform(0, 2.0 / prev._size, size=(size, prev._size)) self._b = np.zeros((size, 1)) self._dLdw = np.zeros((size, prev._size)) self._dLdb = np.zeros((size, 1)) self._Vdw = np.zeros((size, prev._size)) self._Vdb = np.zeros((size, 1)) self._Sdw = np.zeros((size, prev._size)) self._Sdb = np.zeros((size, 1))
def __init__(self, filters=None, kernel_size=None, strides=1, padding='valid', data_format='channels_last', dilation_rate=1, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None): Layer.__init__(self) KernelBiasSupport.__init__(self, use_bias, kernel_initializer, bias_initializer, kernel_regularizer, bias_regularizer, activity_regularizer, kernel_constraint, bias_constraint) self.strides = strides self.activation = activation self.filters = filters self.kernel_size = kernel_size self.padding = padding self.data_format = data_format self.dilation_rate = dilation_rate
def __init__(self, K, N): Layer.__init__(self) # input number of neuron self.K_ = K # output number of neuron self.N_ = N self.bias_term_ = None self.bias_multiplier_ = None self.transpose_ = False self.W = Blob() self.b = Blob() self.blobs_.append(self.W) self.blobs_.append(self.b)
def __init__(self, index, data_mgr, config_topography, size_total, ocean_altitude_relative, border_start_relative): Layer.__init__(self, "topography", index, data_mgr) self.requires_layers = [] # parameters: self.height = config_topography.getfloat("height") self.scale = config_topography.getfloat("scale") self.ocean_altitude = ocean_altitude_relative * self.height self.border_start_relative = border_start_relative # self.perlin_seed = 9601 self.perlin_levels = [] # [(scale,amplitude), ...] # components: self.perlin = None # internal variables: self._size_half = int(self.data_mgr.size_total / 2.0) * self.scale self._border_start = self.border_start_relative * self._size_half
def __init__(self, name, depth, kWidth, kHeight, stride, prev, inputWidth, inputHeight, inputChannel): self._kernalInfo = [kWidth, kHeight, depth, stride] self._inputInfo = [inputWidth, inputHeight, inputChannel] self._calcWindows() assert prev != None assert prev._size == inputWidth * inputHeight * inputChannel Layer.__init__( self, name, self._windowNum[0] * self._windowNum[1] * self._kernalInfo[DEPTH], prev) self._kernal = np.random.normal( size=(self._kernalInfo[WIDTH] * self._kernalInfo[HEIGHT] * self._inputInfo[CHANNEL], self._kernalInfo[DEPTH])) self._bias = np.random.normal(size=(1, self._kernalInfo[DEPTH])) self._dLdKernal = np.zeros(self._kernal.shape) self._dLdb = np.zeros(self._bias.shape)
def __init__(self, hh, ww, fout, pad, stride): Layer.__init__(self) self.kernel_shape_ = None self.stride_ = None self.pad_ = None self.dilation_ = None self.conv_input_shape_ = None self.col_buffer_shape_ = None self.output_shape_ = None self.bottom_shape_ = None self.num_spatial_axes_ = None self.bottom_dim_ = None self.top_dim_ = None self.channel_axis_ = None self.num_ = None self.channels_ = None self.group_ = None self.out_spatial_dim_ = None self.weight_offset_ = None self.num_output_ = None self.bias_term_ = None self.is_1x1_ = None self.force_nd_im2col_ = None #self.N = N #self.C = C self.hh = hh self.ww = ww self.fout = fout self.pad = pad self.stride = stride self.W = Blob() self.b = Blob() self.blobs_.append(self.W) self.blobs_.append(self.b)
def __init__(self, shape): self.shape = shape self.input_shape = shape[0] Layer.__init__(self, True, [{ 'name': 'b1', 'type': 'b', 'shape': shape[-1] }, { 'name': 'b2', 'type': 'b', 'shape': shape[-1] }, { 'name': 'b3', 'type': 'b', 'shape': shape[-1] }, { 'name': 'w1', 'type': 'w', 'shape': (shape[0] + shape[-1], shape[-1]) }, { 'name': 'w2', 'type': 'w', 'shape': (shape[0] + shape[-1], shape[-1]) }, { 'name': 'w3', 'type': 'w', 'shape': (shape[0] + shape[-1], shape[-1]) }, { 'name': '_state_init', 'type': 'b', 'shape': (shape[-1], ) }]) # Define state placeholder self.state = tf.placeholder(tf.float32, [None, self.shape[-1]]) self._state = self.state
def __init__(self, d, verbose=False): Layer.__init__(self, d, verbose) self.color = Layer.arg(d) self.radius = self.attr('gauss-blur-radius', 2)
def __init__(self, batch_size): Layer.__init__(self) self.batch_size_ = batch_size self.datasets_ = None self.cur_ = 0
def __init__(self, tree, act_citation): Layer.__init__(self, tree) self.act_citation = act_citation
def __init__(self, d, verbose=False): Layer.__init__(self, d, verbose) self.color = Layer.arg(d)
def __init__(self, d, verbose=False): Layer.__init__(self, d, verbose) self.opacity = min(Layer.arg(d), 100.0) / 100.0
def __init__(self, d, verbose=False): Layer.__init__(self, d, verbose) self.level = int(Layer.arg(d))
def __init__(self, d, verbose=False): Layer.__init__(self, d, verbose) self.color = Layer.arg(d) self.range = self.attr('range', [.2, .5])
def __init__(self, size): Layer.__init__(self, "input", size, None)
def __init__(self, d, verbose=False): Layer.__init__(self, d, verbose) self.mode = Layer.arg(d)
def __init__(self, data=None, projection=None, geometry=None, geometry_type=None, name='', keywords=None, style_info=None): """Initialise object with either geometry or filename Input data: Can be either * a filename of a vector file format known to GDAL * List of dictionaries of fields associated with point coordinates * None projection: Geospatial reference in WKT format. Only used if geometry is provide as a numeric array, if None, WGS84 geographic is assumed geometry: A list of either point coordinates or polygons/lines (see note below) geometry_type: Desired interpretation of geometry. Valid options are 'point', 'line', 'polygon' or the ogr types: 1, 2, 3 If None, a geometry_type will be inferred name: Optional name for layer. Only used if geometry is provide as a numeric array keywords: Optional dictionary with keywords that describe the layer. When the layer is stored, these keywords will be written into an associated file with extension .keywords. Keywords can for example be used to display text about the layer in a web application. style_info: Dictionary with information about how this layer should be styled. See impact_functions/styles.py for examples. Notes If data is a filename, all other arguments are ignored as they will be inferred from the file. The geometry type will be inferred from the dimensions of geometry. If each entry is one set of coordinates the type will be ogr.wkbPoint, if it is an array of coordinates the type will be ogr.wkbPolygon. Each polygon or line feature take the form of an Nx2 array representing vertices where line segments are joined """ # Invoke common layer constructor Layer.__init__(self, name=name, projection=projection, keywords=keywords, style_info=style_info) # Input checks if data is None and geometry is None: # Instantiate empty object self.geometry_type = None self.extent = [0, 0, 0, 0] return if isinstance(data, basestring): self.read_from_file(data) else: # Assume that data is provided as sequences provided as # arguments to the Vector constructor # with extra keyword arguments supplying metadata msg = 'Geometry must be specified' verify(geometry is not None, msg) msg = 'Geometry must be a sequence' verify(is_sequence(geometry), msg) self.geometry = geometry self.geometry_type = get_geometry_type(geometry, geometry_type) if data is None: # Generate default attribute as OGR will do that anyway # when writing data = [] for i in range(len(geometry)): data.append({'ID': i}) # Check data self.data = data if data is not None: msg = 'Data must be a sequence' verify(is_sequence(data), msg) msg = ('The number of entries in geometry and data ' 'must be the same') verify(len(geometry) == len(data), msg)
def __init__(self, tree, notices): Layer.__init__(self, tree) self.notices = notices
def __init__(self, data=None, projection=None, geometry=None, geometry_type=None, name=None, keywords=None, style_info=None, sublayer=None): """Initialise object with either geometry or filename Args: * data: Can be either * A filename of a vector file format known to GDAL. * List of dictionaries of field names and attribute values associated with each point coordinate. * None * projection: Geospatial reference in WKT format. Only used if geometry is provided as a numeric array, if None, WGS84 geographic is assumed. * geometry: A list of either point coordinates or polygons/lines (see note below). * geometry_type: Desired interpretation of geometry. Valid options are 'point', 'line', 'polygon' or the ogr types: 1, 2, 3. If None, a geometry_type will be inferred from the data. * name: Optional name for layer. If None, basename is used. * keywords: Optional dictionary with keywords that describe the layer. When the layer is stored, these keywords will be written into an associated file with extension '.keywords'. Keywords can for example be used to display text about the layer in an application. * style_info: Dictionary with information about how this layer should be styled. See impact_functions/styles.py for examples. * sublayer: str Optional sublayer (band name in the case of raster, table name in case of sqlite etc.) to load. Only applicable to those dataformats supporting more than one layer in the data file. Returns: * An instance of class Vector. Raises: * Propogates any exceptions encountered. Notes: If data is a filename, all other arguments are ignored as they will be inferred from the file. The geometry type will be inferred from the dimensions of geometry. If each entry is one set of coordinates the type will be ogr.wkbPoint, if it is an array of coordinates the type will be ogr.wkbPolygon. To cast array entries as lines set geometry_type explicitly to 'line' in the call to Vector. Otherwise, they will default to polygons. Each polygon or line feature take the form of an Nx2 array representing vertices where line segments are joined. If polygons have holes, their geometry must be passed in as a list of polygon geometry objects (as defined in module geometry.py) """ # Invoke common layer constructor Layer.__init__(self, name=name, projection=projection, keywords=keywords, style_info=style_info, sublayer=sublayer) # Input checks if data is None and geometry is None: # Instantiate empty object self.geometry_type = None self.extent = [0, 0, 0, 0] return if isinstance(data, basestring): self.read_from_file(data) else: # Assume that data is provided as sequences provided as # arguments to the Vector constructor # with extra keyword arguments supplying metadata msg = 'Geometry must be specified' verify(geometry is not None, msg) msg = 'Geometry must be a sequence' verify(is_sequence(geometry), msg) if len(geometry) > 0 and isinstance(geometry[0], Polygon): self.geometry_type = ogr.wkbPolygon self.geometry = geometry else: self.geometry_type = get_geometry_type(geometry, geometry_type) # Convert to objects if input is a list of simple arrays if self.is_polygon_data: self.geometry = [Polygon(outer_ring=x) for x in geometry] else: self.geometry = geometry if data is None: # Generate default attribute as OGR will do that anyway # when writing data = [] for i in range(len(geometry)): data.append({'ID': i}) # Check data self.data = data if data is not None: msg = 'Data must be a sequence' verify(is_sequence(data), msg) msg = ('The number of entries in geometry and data ' 'must be the same') verify(len(geometry) == len(data), msg) # Establish extent if len(geometry) == 0: # Degenerate layer self.extent = [0, 0, 0, 0] return # Compute bounding box for each geometry type minx = miny = sys.maxint maxx = maxy = -minx if self.is_point_data: A = numpy.array(self.get_geometry()) minx = min(A[:, 0]) maxx = max(A[:, 0]) miny = min(A[:, 1]) maxy = max(A[:, 1]) elif self.is_line_data: for g in self.get_geometry(): A = numpy.array(g) minx = min(minx, min(A[:, 0])) maxx = max(maxx, max(A[:, 0])) miny = min(miny, min(A[:, 1])) maxy = max(maxy, max(A[:, 1])) elif self.is_polygon_data: # Do outer ring only for g in self.get_geometry(as_geometry_objects=False): A = numpy.array(g) minx = min(minx, min(A[:, 0])) maxx = max(maxx, max(A[:, 0])) miny = min(miny, min(A[:, 1])) maxy = max(maxy, max(A[:, 1])) self.extent = [minx, maxx, miny, maxy]
def __init__(self, data=None, projection=None, geotransform=None, name='', keywords=None, style_info=None): """Initialise object with either data or filename Input data: Can be either * a filename of a raster file format known to GDAL * an MxN array of raster data * None (FIXME (Ole): Remove this option) projection: Geospatial reference in WKT format. Only used if data is provide as a numeric array, if None, WGS84 geographic is assumed geotransform: GDAL geotransform (6-tuple). (top left x, w-e pixel resolution, rotation, top left y, rotation, n-s pixel resolution). See e.g. http://www.gdal.org/gdal_tutorial.html Only used if data is provide as a numeric array, name: Optional name for layer. Only used if data is provide as a numeric array, keywords: Optional dictionary with keywords that describe the layer. When the layer is stored, these keywords will be written into an associated file with extension .keywords. Keywords can for example be used to display text about the layer in a web application. style_info: Dictionary with information about how this layer should be styled. See impact_functions/styles.py for examples. Note that if data is a filename, all other arguments are ignored as they will be inferred from the file. """ # Invoke common layer constructor Layer.__init__(self, name=name, projection=projection, keywords=keywords, style_info=style_info) # Input checks if data is None: # Instantiate empty object self.geotransform = None self.rows = self.columns = 0 return # Initialisation if isinstance(data, basestring): self.read_from_file(data) else: # Assume that data is provided as an array # with extra keyword arguments supplying metadata self.data = numpy.array(data, dtype='d', copy=False) self.geotransform = geotransform self.rows = data.shape[0] self.columns = data.shape[1] self.number_of_bands = 1
def __init__(self, data=None, projection=None, geometry=None, geometry_type=None, name=None, keywords=None, style_info=None, sublayer=None): """Initialise object with either geometry or filename NOTE: Doc strings in constructor are not harvested and exposed in online documentation. Hence the details are specified in the class docstring. """ # Invoke common layer constructor Layer.__init__(self, name=name, projection=projection, keywords=keywords, style_info=style_info, sublayer=sublayer) # Input checks if data is None and geometry is None: # Instantiate empty object self.geometry_type = None self.extent = [0, 0, 0, 0] return if isinstance(data, basestring): self.read_from_file(data) else: # Assume that data is provided as sequences provided as # arguments to the Vector constructor # with extra keyword arguments supplying metadata msg = 'Geometry must be specified' verify(geometry is not None, msg) msg = 'Geometry must be a sequence' verify(is_sequence(geometry), msg) if len(geometry) > 0 and isinstance(geometry[0], Polygon): self.geometry_type = ogr.wkbPolygon self.geometry = geometry else: self.geometry_type = get_geometry_type(geometry, geometry_type) if self.is_polygon_data: # Convert to objects if input is a list of simple arrays self.geometry = [Polygon(outer_ring=x) for x in geometry] else: # Convert to list if input is an array if isinstance(geometry, numpy.ndarray): self.geometry = geometry.tolist() else: self.geometry = geometry if data is None: # Generate default attribute as OGR will do that anyway # when writing data = [] for i in range(len(geometry)): data.append({'ID': i}) # Check data self.data = data if data is not None: msg = 'Data must be a sequence' verify(is_sequence(data), msg) msg = ('The number of entries in geometry and data ' 'must be the same') verify(len(geometry) == len(data), msg) # Establish extent if len(geometry) == 0: # Degenerate layer self.extent = [0, 0, 0, 0] return # Compute bounding box for each geometry type minx = miny = sys.maxint maxx = maxy = -minx if self.is_point_data: A = numpy.array(self.get_geometry()) minx = min(A[:, 0]) maxx = max(A[:, 0]) miny = min(A[:, 1]) maxy = max(A[:, 1]) elif self.is_line_data: for g in self.get_geometry(): A = numpy.array(g) minx = min(minx, min(A[:, 0])) maxx = max(maxx, max(A[:, 0])) miny = min(miny, min(A[:, 1])) maxy = max(maxy, max(A[:, 1])) elif self.is_polygon_data: # Do outer ring only for g in self.get_geometry(as_geometry_objects=False): A = numpy.array(g) minx = min(minx, min(A[:, 0])) maxx = max(maxx, max(A[:, 0])) miny = min(miny, min(A[:, 1])) maxy = max(maxy, max(A[:, 1])) self.extent = [minx, maxx, miny, maxy]
def __init__(self, d, verbose=False): Layer.__init__(self, d, verbose) self.angle = float(Layer.arg(d))