def create_graph(): """Create a graph which prints hello for each even number x in the input stream, using a conditional RuleBasedModel node and a HelloPrinter h1.Action.""" graph = h1.Graph() graph.start()\ .add(h1.Decision(RuleBasedModel(), result_field="predictions"))\ .add(yes=HelloPrinter(), no=h1.NoOp()) graph.end() return graph
def create_autocyber_graph(): graph = h1.Graph() graph.start()\ .add(WindowGenerator())\ .add(h1.Decision(MsgFreqEventDetectorModel().load(), decision_field="WindowInAttack"))\ .add(yes=GradientBoostingMsgClassifierModel().load(), no=NoOp()) graph.end() return graph
m2.evaluate(prepared_data) m2.metrics['confusion_matrix'] from AutomotiveCybersecurity.graph import WindowGenerator class NoOp(h1.Action): def call(self, command, inputs): pass graph = h1.Graph() graph.start()\ .add(WindowGenerator())\ .add(h1.Decision(m, decision_field="WindowInAttack"))\ .add(yes=m2, no=NoOp()) graph.end() df = pd.read_csv(data['test_attack_files'][0]) df.columns = [ 'Timestamp', 'Label', 'CarSpeed', 'SteeringAngle', 'YawRate', 'Gx', 'Gy', ]
prepared_data = m2.prep_data(data) m2.train(prepared_data) m2.evaluate(prepared_data) m2.persist() class NoOp(h1.Action): def call(self, command, inputs): pass graph = h1.Graph() graph.start()\ .add(WindowGenerator())\ .add(h1.Decision(MsgFreqEventDetectorModel().load(), decision_field="WindowInAttack"))\ .add(yes=GradientBoostingMsgClassifierModel().load(), no=NoOp()) graph.end() results = graph.predict({"df": df}) results.keys() Now let's evaluate the whole graph, especially focusing on the event-level TPR & FPR since they are crucial in the safe-mode deployment use case. from AutomotiveCybersecurity.util import evaluate_event_graph evaluate_event_graph(graph, data['test_attack_files']) Woa! We ran through all 400ms windows in the test samples and got event-level FPR=0.0% with zero false positives! (Note that this is still a subsample of the data, but once you've tried it on the full dataset the results should be the same: zero false positive at event-level.)