def output(m, d): return { "matmul": Matmul.specification( name="matmul", num_nodes=m, num_features=d, weights_initialization_scheme="he", weights_optimizer_specification=optimizer.SGD.specification( lr=0.05, l2=1e-3)), "loss": CrossEntropyLogLoss.specification(name="loss", num_nodes=m) }
def test(): M = 1 D = 2 N = 100 X, T, V = linear_separable(d=D, n=N) x_min, x_max = X[:, 0].min(), X[:, 0].max() y_min, y_max = X[:, 1].min(), X[:, 1].max() sigmoid_classifier_specification = { _NAME: "softmax_classifier", _NUM_NODES: M, _LOG_LEVEL: logging.ERROR, _COMPOSITE_LAYER_SPEC: { "matmul01": Matmul.specification( name="matmul", num_nodes=M, num_features=D, weights_initialization_scheme="he", weights_optimizer_specification=SGD.specification( lr=TYPE_FLOAT(0.2), l2=TYPE_FLOAT(1e-3))), "loss": CrossEntropyLogLoss.specification( name="loss", num_nodes=M, loss_function=sigmoid_cross_entropy_log_loss.__qualname__) } } logistic_classifier = SequentialNetwork.build( specification=sigmoid_classifier_specification, ) for i in range(50): logistic_classifier.train(X=X, T=T) prediction = logistic_classifier.predict( np.array([-1., -1.], dtype=TYPE_FLOAT)) np.isin(prediction, [0, 1]) print(prediction)
def multilayer_network_specification_bn_to_fail(D, M01, M02, M): sequential_layer_specification_bn_to_fail = { "matmul01": layer.Matmul.specification( name="matmul01", num_nodes=M01, num_features=D, weights_initialization_scheme="he", weights_optimizer_specification=optimiser.SGD.specification( lr=TYPE_FLOAT(0.05), l2=TYPE_FLOAT(1e-3) ) ), "bn01": layer.BatchNormalization.specification( name="bn01", num_nodes=M01, gamma_optimizer_specification=optimiser.SGD.specification( lr=TYPE_FLOAT(0.05), l2=TYPE_FLOAT(1e-3) ), beta_optimizer_specification=optimiser.SGD.specification( lr=TYPE_FLOAT(0.05), l2=TYPE_FLOAT(1e-3), ), momentum=TYPE_FLOAT(0.9) ), "relu01": layer.ReLU.specification( name="relu01", num_nodes=M01, ), "matmul02": layer.Matmul.specification( name="matmul01", num_nodes=M02, num_features=M01, weights_initialization_scheme="he", weights_optimizer_specification=optimiser.SGD.specification( lr=TYPE_FLOAT(0.05), l2=TYPE_FLOAT(1e-3) ) ), "bn02": layer.BatchNormalization.specification( name="bn02", num_nodes=M02, gamma_optimizer_specification=optimiser.SGD.specification( lr=TYPE_FLOAT(0.05), l2=TYPE_FLOAT(1e-3) ), beta_optimizer_specification=optimiser.SGD.specification( lr=TYPE_FLOAT(0.05), l2=TYPE_FLOAT(1e-3), ), momentum=TYPE_FLOAT(0.9) ), "relu02": layer.ReLU.specification( name="relu02", num_nodes=M02, ), "matmul03": layer.Matmul.specification( name="matmul03", num_nodes=M, num_features=M02, weights_initialization_scheme="he", weights_optimizer_specification=optimiser.SGD.specification( lr=TYPE_FLOAT(0.05), l2=TYPE_FLOAT(1e-3) ) ), "bn03": layer.BatchNormalization.specification( name="bn03", num_nodes=M, gamma_optimizer_specification=optimiser.SGD.specification( lr=TYPE_FLOAT(0.05), l2=TYPE_FLOAT(1e-3) ), beta_optimizer_specification=optimiser.SGD.specification( lr=TYPE_FLOAT(0.05), l2=TYPE_FLOAT(1e-3), ), momentum=TYPE_FLOAT(0.9) ), "loss": CrossEntropyLogLoss.specification( name="loss001", num_nodes=M ) } return { _NAME: "two_layer_classifier_with_batch_normalization", _NUM_NODES: M, _LOG_LEVEL: logging.ERROR, _COMPOSITE_LAYER_SPEC: sequential_layer_specification_bn_to_fail }