def dump_equivalent_state(self): """ function added to have a serialized version of the app with only the necessary state """ import utils di = Component.dump_equivalent_state(self) di["dp_port_stats"] = utils.copy_state(self.dp_port_stats) di["energyLtoR"] = utils.copy_state(self.energyLtoR) di["energyRtoL"] = utils.copy_state(self.energyRtoL) di["lastSwitchList"] = utils.copy_state(self.lastSwitchList) return di
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["actions"] = utils.copy_state(self.actions) filtered_dict["attrs"] = utils.copy_state(self.attrs) filtered_dict["priority"] = utils.copy_state(self.priority) filtered_dict["send_flow_rem"] = utils.copy_state(self.send_flow_rem) return filtered_dict
def serializedState(self): state = self.nodesState() if self.useDpor: self.actionList.sort() state["actionList"] = utils.copy_state(self.actionList) state["fault_injection_count"] = utils.copy_state(self.fault_injection_count) state["switch_failures_count"] = utils.copy_state(self.switch_failures_count) state["packet_counters"] = utils.copy_state(self.packet_counters) state["useDpor"] = utils.copy_state(self.useDpor) return state
def serializedState(self): state = self.nodesState() if self.useDpor: self.actionList.sort() state["actionList"] = utils.copy_state(self.actionList) state["fault_injection_count"] = utils.copy_state( self.fault_injection_count) state["switch_failures_count"] = utils.copy_state( self.switch_failures_count) state["packet_counters"] = utils.copy_state(self.packet_counters) state["useDpor"] = utils.copy_state(self.useDpor) return state
def dump_equivalent_state(self): filtered_dict = Node.dump_equivalent_state(self) filtered_dict["command_queue"] = [] for c in self.command_queue: utils.copy_state(c) filtered_dict["flow_table"] = utils.copy_state(self.flow_table) filtered_dict["packet_store"] = utils.copy_state(self.packet_store) if self.ALWAYS_NEW_STATE: filtered_dict["state_cnt"] = utils.copy_state(self.state_cnt) return filtered_dict
def dump_equivalent_state(self): filtered_dict = Node.dump_equivalent_state(self) filtered_dict["component"] = utils.copy_state(self.component) filtered_dict["in_connections"] = {} keys = self.in_connections.keys() keys.sort() for j in keys: filtered_dict["in_connections"][j] = [] for m in self.in_connections[j][1]: filtered_dict["in_connections"][j].append(utils.copy_state(m)) # get only the buffer return filtered_dict
def dump_equivalent_state(self): filtered_dict = packet_base.dump_equivalent_state(self) filtered_dict["srcip"] = utils.copy_state(self.srcip) filtered_dict["dstip"] = utils.copy_state(self.dstip) filtered_dict["protocol"] = utils.copy_state(self.protocol) filtered_dict["tos"] = utils.copy_state(self.tos) filtered_dict["id"] = utils.copy_state(self.id) filtered_dict["flags"] = utils.copy_state(self.flags) filtered_dict["frag"] = utils.copy_state(self.frag) filtered_dict["ttl"] = utils.copy_state(self.ttl) filtered_dict["csum"] = utils.copy_state(self.csum) if next != None: filtered_dict["next"] = utils.copy_state(self.next) return filtered_dict
def dump_equivalent_state(self): filtered_dict = Node.dump_equivalent_state(self) filtered_dict["component"] = utils.copy_state(self.component) filtered_dict["in_connections"] = {} keys = self.in_connections.keys() keys.sort() for j in keys: filtered_dict["in_connections"][j] = [] for m in self.in_connections[j][1]: filtered_dict["in_connections"][j].append( utils.copy_state(m)) # get only the buffer return filtered_dict
def dump_equivalent_state(self): filtered_dict = packet_base.dump_equivalent_state(self) filtered_dict["hwtype"] = utils.copy_state(self.hwtype) filtered_dict["opcode"] = utils.copy_state(self.opcode) filtered_dict["prototype"] = utils.copy_state(self.prototype) filtered_dict["hwsrc"] = utils.copy_state(self.hwsrc) filtered_dict["hwdst"] = utils.copy_state(self.hwdst) filtered_dict["protosrc"] = utils.copy_state(self.protosrc) filtered_dict["protodst"] = utils.copy_state(self.protodst)
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["name"] = utils.copy_state(self.name) filtered_dict["annotation"] = utils.copy_state(self.annotation) # filtered_dict["packet_id"] = None # filtered_dict["fault_injection"] = False filtered_dict["src"] = utils.copy_state(self.src) filtered_dict["dst"] = utils.copy_state(self.dst) filtered_dict["type"] = utils.copy_state(self.type) if self.next != None: filtered_dict["next"] = utils.copy_state(self.next) return filtered_dict
def train_model(epoch): i = 0 hidden_init = model.state0(batch_size) if options.cuda: embedding.cuda() model.cuda() hidden_init = utils.make_cuda(hidden_init) loss_avg = 0 for s in range(num_batches - 1): embed_optimizer.zero_grad() model_optimizer.zero_grad() batch = Variable( train_x.narrow(0, s * seq_length, seq_length + 1).long()) start = time.time() hidden = hidden_init if options.cuda: batch = batch.cuda() loss = 0 for t in range(seq_length): emb = embedding(batch[t]) hidden, output = model(emb, hidden) loss_step = loss_fn(output, batch[t + 1]) loss += loss_step writer.add_scalar('loss per step', loss_step, i) i += 1 writer.add_scalar('loss per batch ', loss, s) loss.backward() hidden_init = utils.copy_state(hidden) gn = utils.calc_grad_norm(model) utils.clip_gradient(model, model_settings['clip_gradient']) utils.clip_gradient(embedding, model_settings['clip_gradient']) embed_optimizer.step() model_optimizer.step() loss_avg = .99 * loss_avg + .01 * loss.data[0] / seq_length if s % 10 == 0: print( f'epoch: {epoch} | batch: {s}/{num_batches} | step loss: {loss.data[0] / seq_length} | batch loss: {loss.data[0]} | avg loss: {loss_avg} | time: {time.time() - start}s' )
def dump_equivalent_state(self): return {"ctxt": utils.copy_state(self.ctxt)}
def dump_equivalent_state(self): filtered_dict = Node.dump_equivalent_state(self) filtered_dict["arp_table"] = utils.copy_state(self.arp_table) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["enabled_actions"] = utils.copy_state(self.enabled_actions) filtered_dict["ports"] = utils.serialize_dict(self.ports) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["in_move"] = utils.copy_state(self.in_move) filtered_dict["store"] = utils.copy_state(self.packet_store) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["in_buffer"] = utils.copy_state(self.in_buffer) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["port_no"] = utils.copy_state(self.port_no) filtered_dict["state"] = utils.copy_state(self.state)
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["packet"] = utils.copy_state(self.packet) filtered_dict["actions"] = utils.copy_state(self.actions) filtered_dict["inport"] = utils.copy_state(self.inport) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["balance"] = utils.copy_state(self.packet_balance) return filtered_dict
def dump_equivalent_state(self): filtered_dict = Host.dump_equivalent_state(self) filtered_dict["move_host"] = utils.copy_state(self.move_host) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["store"] = utils.copy_state(self.packet_store) filtered_dict["type"] = str(self.packet_type) return filtered_dict
def dump_equivalent_state(self): filtered_dict = Host.dump_equivalent_state(self) filtered_dict["sent_packets"] = utils.copy_state(self.sent_pkts) filtered_dict["input_counter"] = utils.copy_state(self.input_counter) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["node_name"] = utils.copy_state(self.node_name) filtered_dict["target"] = utils.copy_state(self.target) filtered_dict["args"] = utils.copy_state(self.args) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["exchanges"] = utils.copy_state(self.exchanges) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["counts"] = utils.copy_state(self.packet_counts) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["command"] = utils.copy_state(self.command) filtered_dict["arguments"] = utils.copy_state(self.arguments) return filtered_dict
def dump_equivalent_state(self): filtered_dict = packet_base.dump_equivalent_state(self) filtered_dict["srcport"] = utils.copy_state(self.srcport) filtered_dict["dstport"] = utils.copy_state(self.dstport) filtered_dict["seq"] = utils.copy_state(self.seq) filtered_dict["ack"] = utils.copy_state(self.ack) filtered_dict["off"] = utils.copy_state(self.off) filtered_dict["res"] = utils.copy_state(self.res) filtered_dict["flags"] = utils.copy_state(self.flags) filtered_dict["win"] = utils.copy_state(self.win) filtered_dict["csum"] = utils.copy_state(self.csum) filtered_dict["urg"] = utils.copy_state(self.urg) filtered_dict["tcplen"] = utils.copy_state(self.tcplen) filtered_dict["options"] = utils.copy_state(self.options) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["data"] = utils.copy_state(self.data) return filtered_dict
def serializedState(self): inv_dict = {} for i in sorted(self.invariants): inv_dict[i.name] = utils.copy_state(i) return utils.copy_state(inv_dict)
def main(): global steps_since_averaging # clocktime time_spent_training = 0. time_spent_evaluating = 0. train_df = pd.DataFrame() eval_df = pd.DataFrame() for epoch in range(1, args.epochs + 1): logging.info(120 * '=') logging.info('Starting epoch %d / %d' % (epoch, args.epochs)) logging.info('Param group LR''s = %s' % ( [g['lr'] for g in optimizer.param_groups],)) logging.info(f'{steps_since_averaging} steps since averaging') if args.averaging.startswith('epoch'): if average_every == 'auto': cond = lr_factor_func(epoch - 1) != lr_factor_func(epoch - 2) else: cond = (epoch - 1) % average_every == 0 if cond and epoch > 1: steps_since_averaging = 1 copy_state(avg_model, model) logging.info('Resetting averaging now') time0 = time.time() train_data = train(args, model, robust_loss, device, loader_train, optimizer, epoch, avg_model=None if args.averaging == 'none' else avg_model) train_df = train_df.append(pd.DataFrame(train_data), ignore_index=True) time1 = time.time() time_spent_training += time1 - time0 logging.info(120 * '=') if epoch % args.eval_freq == 0 or epoch == args.epochs: time2 = time.time() eval_data = {'epoch': int(epoch), 'samples': train_df.batch_size.sum(), 'train_clocktime': 0., 'val_clocktime': 0., } if eval_train: eval_data.update( eval(epoch, model if args.averaging == 'none' else avg_model, robust_losses_eval, device, 'train', loader_eval_train) ) eval_data.update( eval(epoch, model if args.averaging == 'none' else avg_model, robust_losses_eval, device, 'test', loader_eval_test) ) time3 = time.time() time_spent_evaluating += time3 - time2 eval_data.update({ 'train_clocktime': time_spent_training, 'val_clocktime': time_spent_evaluating, }) eval_df = eval_df.append( pd.DataFrame([eval_data], index=[0]), ignore_index=True) if not args.save_losses: train_df.to_csv(os.path.join(output_dir, 'stats_train.csv')) eval_df.to_csv(os.path.join(output_dir, 'stats_eval.csv')) else: train_df.to_pickle( os.path.join(output_dir, 'stats_train.pickle')) eval_df.to_pickle(os.path.join(output_dir, 'stats_eval.pickle')) if np.isnan(eval_data['weight_norm']): logging.error( 'Detected NaN value in weights; breaking training') break loss_value = (eval_data['test/robust_loss'] + 0.5 * args.weight_decay * np.square(eval_data['weight_norm'])) if np.isnan(loss_value) or loss_value > MAX_ALLOWED_LOSS: logging.error( 'Loss exceeded threshold value; breaking training') break lr_scheduler.step() # save checkpoint if epoch % args.save_freq == 0 or epoch == args.epochs: torch.save(dict(num_classes=num_classes, state_dict=model.state_dict()), os.path.join(output_dir, 'checkpoint-epoch{}.pt'.format(epoch))) torch.save(optimizer.state_dict(), os.path.join(output_dir, 'opt-checkpoint_epoch{}.tar'.format(epoch)))
def dump_equivalent_state(self): filtered_dict = Host.dump_equivalent_state(self) filtered_dict["received_pkt_count"] = utils.copy_state(self.received_pkt_count) return filtered_dict
def dump_equivalent_state(self): filtered_dict = Host.dump_equivalent_state(self) filtered_dict["received_pkt_count"] = utils.copy_state( self.received_pkt_count) return filtered_dict
def dump_equivalent_state(self): return { "ctxt": utils.copy_state(self.ctxt) }
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["enabled_actions"] = utils.copy_state( self.enabled_actions) filtered_dict["ports"] = utils.serialize_dict(self.ports) return filtered_dict
def dump_equivalent_state(self): filtered_dict = {} filtered_dict["flows"] = utils.copy_state(self.flows) return filtered_dict