def process_batch_normalization_layer(layer, apply_padding, mean_vals, variance_vals, scale_vals): """Returns ELL layers corresponding to a Darknet batch normalization layer""" # Batch normalization in Darknet corresponds to BatchNormalizationLayer, ScalingLayer in ELL layers = [] # Create BatchNormalizationLayer layerParameters = create_layer_parameters(layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros, layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros) meanVector = ell.FloatVector(mean_vals.ravel()) varianceVector = ell.FloatVector(variance_vals.ravel()) layers.append( ell.FloatBatchNormalizationLayer(layerParameters, meanVector, varianceVector, 1e-6, ell.EpsilonSummand_sqrtVariance)) # Create Scaling Layer if (apply_padding): layerParameters = create_layer_parameters( layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros, layer['outputShape'], layer['outputPadding'], layer['outputPaddingScheme']) else: layerParameters = create_layer_parameters( layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros, layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros) layers.append(ell.FloatScalingLayer(layerParameters, scale_vals.ravel())) return layers
def get_float_vector_from_cntk_trainable_parameter(tensorParameter): """Returns an ell.FloatVector from a trainable parameter Note that ELL's ordering is row, column, channel. CNTK has them in filter, channel, row, column order. """ tensorShape = tensorParameter.shape tensorValue = tensorParameter.value orderedWeights = np.zeros(tensorValue.size, dtype=np.float) i = 0 for columnValue in tensorValue: orderedWeights[i] = columnValue i += 1 return ell.FloatVector(orderedWeights)
def get_bias_layer(layer, apply_padding, bias_vals): """Return an ELL bias layer from a darknet layer""" if (apply_padding): layerParameters = create_layer_parameters( layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros, layer['outputShape'], layer['outputPadding'], layer['outputPaddingScheme']) else: layerParameters = create_layer_parameters( layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros, layer['outputShapeMinusPadding'], 0, ell.PaddingScheme.zeros) biasVector = ell.FloatVector(bias_vals.ravel()) return ell.FloatBiasLayer(layerParameters, biasVector)
def test_float(): testing = Testing() # empty vector e = ell.FloatVector() np.testing.assert_equal(e.size(), 0) # vector from list of floats l = [1.1,2.2,3.3,4.4] e = ell.FloatVector(l) assert_compare_floats(e, l) # vector from numpy array a = np.array(range(10), dtype=np.float32) e = ell.FloatVector(a) np.testing.assert_equal(np.asarray(e), a) # convert to numpy using array b = np.array(e).ravel() np.testing.assert_equal(a, b) # copy_from numpy array e = ell.FloatVector() e.copy_from(a) np.testing.assert_equal(np.asarray(e), a) # convert data types a = a.astype(np.float) e = ell.FloatVector(a) np.testing.assert_equal(np.asarray(e), a) # enumerating array for i in range(a.shape[0]): x = a[i] y = e[i] np.testing.assert_equal(x, y) # auto-ravel numpy arrays a = np.ones((10,10), dtype=np.float32) a *= range(10) e = ell.FloatVector(a) np.testing.assert_equal(np.asarray(e), a.ravel()) testing.ProcessTest("FloatVector test", True)
def get_float_vector_from_constant(constant, size): # Workaround: For some reason, np.full is not returning a type that SWIG can parse. So just manually walk the array setting the scalar array = np.zeros(size, dtype=np.float) for i in range(array.size): array[i] = constant return ell.FloatVector(array)