def get_action_param_pb(self, action_name, param_name, value): """ Converts an action parameter value to something readable by the switch. The user provides a single value that will be passed to the action function when the associated table entry is matched. """ p4info_param = self.get_action_param(action_name, param_name) p4runtime_param = p4runtime_pb2.Action.Param() p4runtime_param.param_id = p4info_param.id p4runtime_param.value = encode(value, p4info_param.bitwidth) return p4runtime_param
def build_packet_in(self, payload, metadata=None): packet_in = p4runtime_pb2.PacketIn() packet_in.payload = payload if not metadata: return packet_in for name, value in metadata.items(): p4info_meta = self.get_packet_metadata("packet_in", name) meta = packet_in.metadata.add() meta.metadata_id = p4info_meta.id meta.value = encode(value, p4info_meta.bitwidth) return packet_in
def save_training_data(images, labels, filename): assert images.shape[0] == labels.shape[0] num_examples = images.shape[0] with tf.python_io.TFRecordWriter(filename) as writer: for index in range(num_examples): image = images[index] label = labels[index] example = encode(image, label) writer.write(example.SerializeToString())
def get_match_field_pb(self, table_name, match_field_name, value): p4info_match = self.get_match_field(table_name, match_field_name) bitwidth = p4info_match.bitwidth p4runtime_match = p4runtime_pb2.FieldMatch() p4runtime_match.field_id = p4info_match.id match_type = p4info_match.match_type #print(dir(p4info_pb2.MatchField)) if match_type == p4info_pb2.MatchField.VALID: valid = p4runtime_match.valid valid.value = bool(value) elif match_type == p4info_pb2.MatchField.EXACT: exact = p4runtime_match.exact exact.value = encode(value, bitwidth) elif match_type == p4info_pb2.MatchField.LPM: lpm = p4runtime_match.lpm lpm.value = encode(value[0], bitwidth) lpm.prefix_len = value[1] elif match_type == p4info_pb2.MatchField.TERNARY: lpm = p4runtime_match.ternary lpm.value = encode(value[0], bitwidth) lpm.mask = encode(value[1], bitwidth) elif match_type == p4info_pb2.MatchField.RANGE: lpm = p4runtime_match.range lpm.low = encode(value[0], bitwidth) lpm.high = encode(value[1], bitwidth) else: raise Exception("Unsupported match type with type %r" % match_type) return p4runtime_match
def get_match_field_pb(self, table_name, match_field_name, value): """ Converts a match field value to something readable by the switch. For each match type, the user provides: EXACT: <value> LPM: [<string (ip address)>, <prefix length>] TERNARY: [<value>, <bit mask>] RANGE: [<low value>, <high value>] """ p4info_match = self.get_match_field(table_name, match_field_name) bitwidth = p4info_match.bitwidth p4runtime_match = p4runtime_pb2.FieldMatch() p4runtime_match.field_id = p4info_match.id match_type = p4info_match.match_type if match_type == p4info_pb2.MatchField.EXACT: exact = p4runtime_match.exact exact.value = encode(value, bitwidth) elif match_type == p4info_pb2.MatchField.LPM: lpm = p4runtime_match.lpm lpm.value = encode(value[0], bitwidth) lpm.prefix_len = value[1] elif match_type == p4info_pb2.MatchField.TERNARY: lpm = p4runtime_match.ternary lpm.value = encode(value[0], bitwidth) lpm.mask = encode(value[1], bitwidth) elif match_type == p4info_pb2.MatchField.RANGE: lpm = p4runtime_match.range lpm.low = encode(value[0], bitwidth) lpm.high = encode(value[1], bitwidth) else: raise Exception("Unsupported match type with type %r" % match_type) return p4runtime_match
def get_action_param_pb(self, action_name, param_name, value): p4info_param = self.get_action_param(action_name, param_name) p4runtime_param = p4runtime_pb2.Action.Param() p4runtime_param.param_id = p4info_param.id p4runtime_param.value = encode(value, p4info_param.bitwidth) return p4runtime_param
def get_packetout_metadata_pb(self, metadata_name, value): p4info_meta = self.get_packetout_meta("packet_out", metadata_name) p4runtime_metadata = p4runtime_pb2.PacketMetadata() p4runtime_metadata.metadata_id = p4info_meta.id p4runtime_metadata.value = encode(value, p4info_meta.bitwidth) return p4runtime_metadata
def main(): #---initialize---# args = get_test_args() HPS = Hps(args.hps_path) hps = HPS.get_tuple() trainer = get_trainer(args.hps_path, args.encoder_path, hps.g_mode, hps.enc_mode) if args.eval_t == 'None': print( '[Tacotron] - None is not a valid evaluation target! Please specify target manually, must be either V001, or V002.' ) return # Tacotron implementation: https://github.com/andi611/TTS-Tacotron-Pytorch model = Tacotron(n_vocab=len(symbols), embedding_dim=config.embedding_dim, mel_dim=config.num_mels, linear_dim=config.num_freq, r=config.outputs_per_step, padding_idx=config.padding_idx, attention=config.attention, use_mask=config.use_mask) #---handle path---# result_dir = os.path.join(args.result_dir, args.sub_result_dir) os.makedirs(result_dir, exist_ok=True) checkpoint_path = os.path.join(args.ckpt_dir, args.model_name) if args.dataset == 'english' and not os.path.isdir( './ckpt_tacotron_english'): print( '[Tacotron] - Recommand using the following name for ckpt_dir: ./ckpt_tacotron_english/' ) elif args.dataset == 'surprise' and not os.path.isdir( './ckpt_tacotron_surprise'): print( '[Tacotron] - Recommand using the following name for ckpt_dir: ./ckpt_tacotron_surprise/' ) #---load and set model---# print('[Tacotron] - Testing on the {} set.'.format(args.dataset)) print('[Tacotron] - Loading model: ', checkpoint_path) checkpoint = torch.load(checkpoint_path) model.load_state_dict(checkpoint["state_dict"]) #---load and set mappings---# print('[Tacotron] - Loading mapping files: ', args.speaker2id_path) valid_arguments(valid_target=args.dataset, arg=args.speaker2id_path) with open(args.speaker2id_path, 'r') as f_json: speaker2id = json.load(f_json) print('[Tacotron] - Loading mapping files: ', args.multi2idx_path) with open(args.multi2idx_path, 'r') as f_json: multi2idx = json.load(f_json) if not args.test_single: #---parse testing list---# print('[Tacotron] - Testing from list: ', args.synthesis_list) valid_arguments(valid_target=args.dataset, arg=args.synthesis_list) feeds = [] with open(args.synthesis_list, 'r') as f: file = f.readlines() for line in file: line = line.split('\n')[0].split(' ') feeds.append({ 's_id': line[0].split('/')[1].split('_')[0], 'utt_id': line[0].split('/')[1].split('_')[1], 't_id': line[1], }) print('[Tester] - Number of files to be resynthesize: ', len(feeds)) for feed in tqdm(feeds): if feed['t_id'] == args.eval_t: wav_path = os.path.join( args.testing_dir, feed['s_id'] + '_' + feed['utt_id'] + '.wav') _, spec = get_spectrograms(wav_path) encodings = encode(spec, trainer, hps.seg_len, save=False) encodings = parse_encodings(encodings) line = ''.join([multi2idx[encoding] for encoding in encodings]) print(line) out_path = os.path.join( result_dir, feed['t_id'] + '_' + feed['utt_id'] + '.wav') synthesis_speech(model, text=line, path=out_path) else: wav_path = './data/english/train/voice/V002_0674932509.wav' # wav_path = './data/english/train/voice/V002_2252538703.wav' # wav_path = './data/english/train/voice/V002_1665800749.wav' _, spec = get_spectrograms(wav_path) encodings = encode(spec, trainer, hps.seg_len, save=False) write_encodings(path='./result/result.wav', encodings=encodings) parsed_encodings = parse_encodings(encodings) line = ''.join([multi2idx[encoding] for encoding in parsed_encodings]) print(line) synthesis_speech(model, text=line, path='./result/result.wav') # model.decoder.max_decoder_steps = config.max_decoder_steps # Set large max_decoder steps to handle long sentence outputs sys.exit(0)