def fwd_graph(popart_model, torch_model, mapping=None, transform=None): # ------------------- PopART -------------------- config = popart_model.config builder = popart_model.builder sequence_info = popart.TensorInfo( "INT32", [config.batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) positions = builder.addInputTensor(sequence_info) segments = builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (config.batch_size * config.sequence_length)).astype( np.int32), positions: np.random.randint(0, config.sequence_length, (config.batch_size * config.sequence_length)).astype( np.int32), segments: np.random.randint(0, 2, (config.batch_size * config.sequence_length)).astype( np.int32) } output = popart_model.build_graph(indices, positions, segments) proto = builder.getModelProto() outputs, post_proto = run_py( proto, data, output, ipus=math.ceil(config.num_layers / config.layers_per_ipu) + popart_model.layer_offset) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = { "input_ids": data[indices].reshape(config.batch_size, config.sequence_length), "position_ids": data[positions].reshape(config.batch_size, config.sequence_length), "token_type_ids": data[segments].reshape(config.batch_size, config.sequence_length) } torch_to_onnx = get_mapping(config, init=mapping) transform_weights = get_transform(config, init=transform) # ------------------- PyTorch ------------------------- # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, torch_to_onnx, transform_weights) torch_outputs = run_fwd_model(inputs, torch_model) check_tensors(torch_outputs, outputs)
def test_split_embedding(custom_ops, weight_transposed, phase): """Test serialised embedding. Args: weight_transposed (bool): If True, weights are constructed transposed for the embedding layer. phase (str): Fwd pass or backward pass. custom_ops : Custom op module. """ np.random.seed(1984) config = BertConfig(vocab_length=4864, micro_batch_size=1, hidden_size=4096, sequence_length=128, popart_dtype="FLOAT", no_dropout=True, embedding_serialization_vocab_steps=num_splits) data, outputs, proto, post_proto = popart_result_and_model( config, weight_transposed, is_bwd=(phase == 'bwd')) inputs = [ t.reshape(config.micro_batch_size, config.sequence_length).astype(np.int32) for t in data ] torch_output, torch_model = pytorch_result_and_model( config, inputs, proto, weight_transposed, is_bwd=(phase == 'bwd')) check_tensors(torch_output, outputs) if phase == 'bwd': initializers = get_initializers(post_proto, weight_transposed) for name, weight in torch_model.named_parameters(): check_tensors(weight.data.numpy(), initializers[name])
def test_tied_gather_pattern_outlining_correctness(phase, custom_ops): train = phase == "bwd" outputs_1, proto_1 = session(train, skip_execution=False, splits=4, outline=True) outputs_2, proto_2 = session(train, skip_execution=False, include_patterns=False, splits=4, outline=True) check_tensors(outputs_1, outputs_2) if train: check_onnx_model(proto_1, proto_2)
def test_tied_gather_pattern_correctness(splits, phase, optimizer, custom_ops): train = phase == "bwd" outputs_1, proto_1 = session(train, skip_execution=False, splits=splits, optim=optimizer) outputs_2, proto_2 = session(train, skip_execution=False, include_patterns=False, splits=splits, optim=optimizer) check_tensors(outputs_1, outputs_2) if train: check_onnx_model(proto_1, proto_2)
def run_models(config, proto, indices, positions, segments, output, popart_model, torch_model): onnx_proto = onnx.load_model_from_string(proto) check_model(torch_model, onnx_proto, get_mapping(config), get_transform(config)) # Run the models popart_inputs = { indices: np.random.randint(0, config.vocab_length, (config.batch_size * config.sequence_length)).astype( np.uint32), positions: np.random.randint( 0, config.sequence_length, (config.batch_size * config.sequence_length), ).astype(np.uint32), segments: np.random.randint( 0, 2, (config.batch_size * config.sequence_length), ).astype(np.uint32), } popart_outputs, post_proto = run_py( proto, popart_inputs, output, ipus=popart_model.total_ipus, ) torch_inputs = { "input_ids": popart_inputs[indices].reshape(config.batch_size, config.sequence_length), "position_ids": popart_inputs[positions].reshape(config.batch_size, config.sequence_length), "token_type_ids": popart_inputs[segments].reshape(config.batch_size, config.sequence_length), } torch_model.eval() torch_outputs = run_fwd_model(torch_inputs, torch_model) check_model(torch_model, post_proto, get_mapping(config), get_transform(config)) check_tensors(torch_outputs, popart_outputs) print("Test succeeded")
def test_activation_function(mode, phase, momentum, micro_batch_size, batch_serialization_factor): set_library_seeds(0) popart_act_function, pytorch_activation = ACTIVATIONS["Gelu"] config = BertConfig(vocab_length=128, micro_batch_size=micro_batch_size, hidden_size=768, sequence_length=128, popart_dtype="FLOAT", no_dropout=True, activation_type=str(popart_act_function)) data, outputs, proto, post_proto = popart_result_and_model( config, mode, batch_serialization_factor, is_bwd=False if phase is 'fwd' else True, momentum=momentum) inputs = [ data.reshape(config.micro_batch_size, config.sequence_length, config.hidden_size) ] # ------------------- PyTorch ------------------------- torch_config = TorchBertConfig(config.vocab_length, config.hidden_size, config.num_layers, config.attention_heads, layer_norm_eps=config.layer_norm_eps, hidden_dropout_prob=0., hidden_act=pytorch_activation) torch_output, torch_model = pytorch_result_and_model( torch_config, inputs, proto, mode, is_bwd=False if phase is 'fwd' else True, momentum=momentum) check_tensors(torch_output, outputs, margin=7e-6) if phase is 'bwd': check_model(torch_model, post_proto, TORCH_TO_ONNX[mode], transform=TRANSPOSE_WEIGHTS, margin=7e-6)
def test_weight_decay(weight_decay): lr = 0.01 l1_lambda = 0.1 # ------------------- PopART ------------------------- config = BertConfig(vocab_length=128, batch_size=1, hidden_size=768, sequence_length=128, popart_dtype="FLOAT", no_dropout=True, custom_ops=[], activation_type='Gelu') data, outputs, proto, post_proto = popart_result_and_model( config, weight_decay=weight_decay, lr=lr, l1_lambda=l1_lambda) # ------------------- PyTorch ------------------------- torch_config = TorchBertConfig(config.vocab_length, config.hidden_size, config.num_layers, config.attention_heads, layer_norm_eps=config.layer_norm_eps, hidden_dropout_prob=0., hidden_act=nn.functional.gelu) inputs = [ data.reshape(config.batch_size, config.sequence_length, config.hidden_size) ] torch_output, torch_model = pytorch_result_and_model( torch_config, inputs, proto, weight_decay=weight_decay, lr=lr, l1_lambda=l1_lambda) # ------------------- Check outputs ------------------------- check_tensors(torch_output, outputs) check_model(torch_model, post_proto, TORCH_TO_ONNX, transform=TRANSPOSE_WEIGHTS)
def test_activation_function(activation_function, phase, custom_ops): popart_act_function, pytorch_activation = ACTIVATIONS[activation_function] config = BertConfig(vocab_length=128, batch_size=1, hidden_size=768, sequence_length=128, popart_dtype="FLOAT", no_dropout=True, custom_ops=[], activation_type=str(popart_act_function)) data, outputs, proto, post_proto = popart_result_and_model( config, is_bwd=False if phase is 'fwd' else True) inputs = [ data.reshape(config.batch_size, config.sequence_length, config.hidden_size) ] # ------------------- PyTorch ------------------------- torch_config = TorchBertConfig(config.vocab_length, config.hidden_size, config.num_layers, config.attention_heads, layer_norm_eps=config.layer_norm_eps, hidden_dropout_prob=0., hidden_act=pytorch_activation) torch_output, torch_model = pytorch_result_and_model( torch_config, inputs, proto, is_bwd=False if phase is 'fwd' else True) check_tensors(torch_output, outputs) if phase is 'bwd': check_model(torch_model, post_proto, TORCH_TO_ONNX, transform=TRANSPOSE_WEIGHTS)
def fwd_graph(popart_model, torch_model, mapping=None, transform=None, replication_factor=1, replicated_tensor_sharding=False): # ------------------- PopART -------------------- config = popart_model.config builder = popart_model.builder sequence_info = popart.TensorInfo( "UINT32", [config.micro_batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) positions = builder.addInputTensor(sequence_info) segments = builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32), positions: np.random.randint(0, config.sequence_length, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32), segments: np.random.randint(0, 2, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32) } output = popart_model.build_graph(indices, positions, segments) ipus = popart_model.total_ipus proto = builder.getModelProto() outputs, _ = run_py(proto, data, output, replication_factor=replication_factor, replicated_tensor_sharding=replicated_tensor_sharding, ipus=ipus) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = { "input_ids": data[indices].reshape(replication_factor * config.micro_batch_size, config.sequence_length).astype(np.int32), "position_ids": data[positions].reshape(replication_factor * config.micro_batch_size, config.sequence_length).astype(np.int32), "token_type_ids": data[segments].reshape(replication_factor * config.micro_batch_size, config.sequence_length).astype(np.int32) } torch_to_onnx = get_mapping(config, init=mapping) transform_weights = get_transform(config, init=transform) # ------------------- PyTorch ------------------------- # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, torch_to_onnx, transform_weights) torch_outputs = run_fwd_model(inputs, torch_model) check_tensors(torch_outputs, outputs)
def test_embedding_fwd(custom_ops): # ------------------- PopART -------------------- config = BertConfig(task="SQUAD", vocab_length=9728, micro_batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, inference=True) popart_model = Bert(config) sequence_info = popart.TensorInfo( "UINT32", [config.micro_batch_size * config.sequence_length]) indices = popart_model.builder.addInputTensor(sequence_info) positions = popart_model.builder.addInputTensor(sequence_info) segments = popart_model.builder.addInputTensor(sequence_info) data = { indices: np.random.randint( 0, config.vocab_length, (config.micro_batch_size * config.sequence_length)).astype( np.uint32), positions: np.random.randint( 0, config.max_positional_length, (config.micro_batch_size * config.sequence_length)).astype( np.uint32), segments: np.random.randint( 0, 2, (config.micro_batch_size * config.sequence_length)).astype( np.uint32) } user_options = {"enableStochasticRounding": True} with popart_model.builder.nameScope("Embedding"): output = popart_model.embedding(indices, positions, segments) proto = popart_model.builder.getModelProto() outputs, post_proto = run_py(proto, data, output, user_options=user_options) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[t].reshape(config.micro_batch_size, config.sequence_length).astype(np.int32) for t in [indices, positions, segments] ] # ------------------- PyTorch ------------------------- torch_model = BertEmbeddings( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps)) torch_model.eval() copy_weights_to_torch(torch_model, proto, TORCH_TO_ONNX, {}) torch_outputs = run_fwd_model(inputs, torch_model) check_tensors(torch_outputs, outputs, margin=5e-7)
def test_lamb_serialised_pattern_correctness(splits, custom_ops): outputs_1, proto_1 = session(splits=1) outputs_2, proto_2 = session(splits=splits) check_tensors(outputs_1, outputs_2) check_onnx_model(proto_1, proto_2)
def test_embedding_bwd(custom_ops): l1_lambda = 0.1 # ------------------- PopART -------------------- builder = popart.Builder(opsets={ "ai.onnx": 9, "ai.onnx.ml": 1, "ai.graphcore": 1 }) config = BertConfig(vocab_length=9728, batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, custom_ops=['gather']) popart_model = Bert(config, builder=builder) # Prevent virtualGraph attributes being added to the ops. popart_model.embedding_scope = popart_model.device_scope(None, None) popart_model.embedding_split_scope = popart_model.embedding_scope sequence_info = popart.TensorInfo( "UINT32", [config.batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) positions = builder.addInputTensor(sequence_info) segments = builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (config.batch_size * config.sequence_length)).astype( np.uint32), positions: np.random.randint(0, config.max_positional_length, (config.batch_size * config.sequence_length)).astype( np.uint32), segments: np.random.randint(0, 2, (config.batch_size * config.sequence_length)).astype( np.uint32) } output = popart_model.embedding(indices, positions, segments) proto = builder.getModelProto() l1 = popart.L1Loss(output, "l1LossVal", l1_lambda) optimizer = popart.ConstSGD(0.01) outputs, post_proto = run_py( proto, data, output, loss=l1, optimizer=optimizer, user_options={"enableStochasticRounding": True}) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[t].reshape(config.batch_size, config.sequence_length).astype(np.int32) for t in [indices, positions, segments] ] torch_to_onnx = { "word_embeddings.weight": "Embedding_Dict", "position_embeddings.weight": "Positional_Dict", "token_type_embeddings.weight": "Segment_Dict", "LayerNorm.weight": "Gamma", "LayerNorm.bias": "Beta" } transposed_weights = { "word_embeddings.weight": np.transpose, "position_embeddings.weight": np.transpose, } # ------------------- PyTorch ------------------------- torch_model = BertEmbeddings( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps)) # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, torch_to_onnx, transform=transposed_weights) optim = torch.optim.SGD(torch_model.parameters(), 0.01, weight_decay=0.0, momentum=0.0) torch_output = torch_model(*[torch.from_numpy(t).long() for t in inputs]) torch_loss = l1_lambda * torch.norm(torch_output, 1) torch_loss.backward() optim.step() torch_outputs = [torch_output.detach().numpy()] check_tensors(torch_outputs, outputs) check_model(torch_model, post_proto, torch_to_onnx, transform=transposed_weights)
def test_embedding_projection_bwd(custom_ops): l1_lambda = 0.1 # ------------------- PopART -------------------- builder = popart.Builder(opsets={ "ai.onnx": 9, "ai.onnx.ml": 1, "ai.graphcore": 1 }) config = BertConfig(vocab_length=9728, embedding_serialization_vocab_steps=4, micro_batch_size=1, hidden_size=288, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, no_cls_layer=False, # Currently updating embedding dict with projection is only # available with momentum. And PopART != Pytorch momentum # due to a bootstrapping step on iter 0. update_embedding_dict=False) popart_model = Bert(config, builder=builder) sequence_info = popart.TensorInfo( "UINT32", [config.micro_batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (config.micro_batch_size * config.sequence_length)).astype( np.uint32) } x = popart_model.gather( indices, config.vocab_length, "Embedding_Dict") x = popart_model.norm(x) x = popart_model.dropout(x) with popart_model.device_scope(nameScope="CLS"): x = popart_model.lm_prediction_head(x) output = popart_model.projection(x) l1 = builder.aiGraphcore.l1loss( [output], l1_lambda, debugPrefix="l1LossVal", reduction=popart.ReductionType.Sum) proto = builder.getModelProto() optimizer = popart.ConstSGD(0.01) outputs, post_proto = run_py(proto, data, output, loss=l1, optimizer=optimizer) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [data[indices].reshape( config.micro_batch_size, config.sequence_length).astype(np.int32)] # ------------------- PyTorch ------------------------- torch_model = EmbeddingProjectionModel( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps, no_cls_layer=config.no_cls_layer, update_embedding_dict=config.update_embedding_dict)) # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, TORCH_TO_ONNX, transform=TRANSPOSE_WEIGHTS) optim = torch.optim.SGD(torch_model.parameters(), 0.01, weight_decay=0.0, momentum=0.0) torch_output = torch_model(*[torch.from_numpy(t).long() for t in inputs]) torch_loss = l1_lambda * torch.norm(torch_output, 1) torch_loss.backward() optim.step() check_tensors([torch_output.detach().numpy()], outputs, margin=1e-5) check_model(torch_model, post_proto, TORCH_TO_ONNX, transform=TRANSPOSE_WEIGHTS)
def test_embedding_fwd(custom_ops): # ------------------- PopART -------------------- builder = popart.Builder(opsets={ "ai.onnx": 9, "ai.onnx.ml": 1, "ai.graphcore": 1 }) config = BertConfig(task="SQUAD", vocab_length=9728, batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, inference=True) popart_model = Bert(config, builder=builder) # Prevent virtualGraph attributes being added to the ops. popart_model.embedding_scope = popart_model.device_scope(None, None) popart_model.embedding_split_scope = popart_model.embedding_scope sequence_info = popart.TensorInfo( "UINT32", [config.batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) positions = builder.addInputTensor(sequence_info) segments = builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (config.batch_size * config.sequence_length)).astype( np.uint32), positions: np.random.randint(0, config.max_positional_length, (config.batch_size * config.sequence_length)).astype( np.uint32), segments: np.random.randint(0, 2, (config.batch_size * config.sequence_length)).astype( np.uint32) } output = popart_model.embedding(indices, positions, segments) proto = builder.getModelProto() outputs, post_proto = run_py(proto, data, output) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[t].reshape(config.batch_size, config.sequence_length).astype(np.int32) for t in [indices, positions, segments] ] # ------------------- PyTorch ------------------------- torch_model = BertEmbeddings( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps)) torch_model.eval() copy_weights_to_torch(torch_model, proto, TORCH_TO_ONNX, {}) torch_outputs = run_fwd_model(inputs, torch_model) check_tensors(torch_outputs, outputs)
def test_attention_bwd(mode): l1_lambda = 0.1 # ------------------- PopART -------------------- config = BertConfig(task="PRETRAINING", vocab_length=9728, batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, no_attn_dropout=True) popart_model = get_model(config, mode, 'attention') input_info = popart.TensorInfo( config.popart_dtype, [config.batch_size * config.sequence_length, config.hidden_size]) input_tensor = popart_model.builder.addInputTensor(input_info) mask_info = popart.TensorInfo("UINT32", [config.batch_size]) mmask_tensor = popart_model.builder.addInputTensor(mask_info) smask_tensor = popart_model.builder.addInputTensor(mask_info) data = { input_tensor: np.random.normal(0, 0.02, input_info.shape()).astype(config.dtype), mmask_tensor: np.random.randint(0, config.mask_tokens + 1, (config.batch_size, )).astype(np.uint32), smask_tensor: np.random.randint(config.mask_tokens, config.sequence_length + 1, (config.batch_size, )).astype(np.uint32) } user_options = {} if mode == ExecutionMode.PHASED: user_options = { "batchSerializationFactor": 1, "executionPhases": popart_model.total_execution_phases } output = popart_model(input_tensor, [mmask_tensor, smask_tensor]) with popart_model.scope_provider(popart_model.builder, popart_model.norm.scope): l1 = popart_model.builder.aiGraphcore.l1loss( [output], l1_lambda, debugPrefix="l1LossVal", reduction=popart.ReductionType.Sum) else: user_options = {"enableStochasticRounding": True} output = popart_model.attention(input_tensor, [mmask_tensor, smask_tensor]) l1 = popart_model.builder.aiGraphcore.l1loss( [output], l1_lambda, reduction=popart.ReductionType.Sum) proto = popart_model.builder.getModelProto() optimizer = popart.ConstSGD(0.01) outputs, post_proto = run_py(proto, data, (output, l1), loss=l1, optimizer=optimizer, user_options=user_options, execution_mode=mode) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[input_tensor].reshape(config.batch_size, config.sequence_length, config.hidden_size), get_torch_mask(config, [data[mmask_tensor], data[smask_tensor]]) ] split_qkv = { "self.query.weight": lambda arr: arr[:, 0:config.hidden_size].T, "self.key.weight": lambda arr: arr[:, config.hidden_size:config.hidden_size * 2].T, "self.value.weight": lambda arr: arr[:, config.hidden_size * 2:config.hidden_size * 3].T, "output.dense.weight": np.transpose } # ------------------- PyTorch ------------------------- torch_model = BertAttention( TorchBertConfig(config.vocab_length, config.hidden_size, config.num_layers, config.attention_heads, layer_norm_eps=config.layer_norm_eps)) # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, TORCH_TO_ONNX[mode], transform=split_qkv) optim = torch.optim.SGD(torch_model.parameters(), 0.01, weight_decay=0.0, momentum=0.0) torch_output = torch_model(*[torch.from_numpy(t).float() for t in inputs])[0] torch_loss = l1_lambda * torch.norm(torch_output, 1) torch_loss.backward() optim.step() check_tensors([torch_output.detach().numpy()], outputs) check_model(torch_model, post_proto, TORCH_TO_ONNX[mode], transform=split_qkv)
def test_attention_fwd(mode, micro_batch_size, batch_serialisation_factor, number_attention_splits, attention_bias, split_qkv): # ------------------- PopART -------------------- config = BertConfig(task="PRETRAINING", vocab_length=9728, micro_batch_size=micro_batch_size, hidden_size=768, attention_heads=4, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, no_attn_dropout=True, inference=True, split_qkv=split_qkv, attention_bias=attention_bias, num_attention_splits=number_attention_splits) popart_model = get_model(config, mode, 'attention') input_info = popart.TensorInfo( config.popart_dtype, [config.micro_batch_size * config.sequence_length, config.hidden_size]) input_tensor = popart_model.builder.addInputTensor(input_info) mask_info = popart.TensorInfo( "UINT32", [config.micro_batch_size, config.sequence_length]) mmask_tensor = popart_model.builder.addInputTensor(mask_info) smask_tensor = popart_model.builder.addInputTensor(mask_info) data = { input_tensor: np.random.normal(0, 0.02, input_info.shape()).astype(config.dtype), mmask_tensor: np.random.randint(0, config.mask_tokens + 1, ( config.micro_batch_size, config.sequence_length, )).astype(np.uint32), smask_tensor: np.random.randint(config.mask_tokens, config.sequence_length + 1, ( config.micro_batch_size, config.sequence_length, )).astype(np.uint32) } user_options = {} if mode == ExecutionMode.PHASED: user_options = { "batchSerializationFactor": batch_serialisation_factor, "executionPhases": popart_model.total_execution_phases } output = popart_model(input_tensor, [mmask_tensor, smask_tensor]) else: user_options = {"enableStochasticRounding": True} output = popart_model.attention(input_tensor, [mmask_tensor, smask_tensor]) proto = popart_model.builder.getModelProto() outputs, post_proto = run_py(proto, data, output, user_options=user_options, execution_mode=mode) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[input_tensor].reshape(config.micro_batch_size, config.sequence_length, config.hidden_size).astype(np.float32), get_torch_mask(config, [data[mmask_tensor], data[smask_tensor]]) ] # ------------------- PyTorch ------------------------- torch_model = BertAttention( TorchBertConfig(config.vocab_length, config.hidden_size, config.num_layers, config.attention_heads, attention_bias=config.attention_bias, layer_norm_eps=config.layer_norm_eps)) # Turn off dropout torch_model.eval() mapping = TORCH_TO_ONNX[mode] if split_qkv: mapping = TORCH_TO_ONNX_SPLIT_QKV[mode] copy_weights_to_torch(torch_model, proto, mapping, transform=get_transform(split_qkv, config.hidden_size)) # Model to test against torch_outputs = run_fwd_model(inputs, torch_model) check_tensors(torch_outputs, outputs)
def test_attention_bwd(mode, momentum, micro_batch_size, batch_serialisation_factor, number_attention_splits, attention_bias): l1_lambda = 0.1 num_reps = 5 np.random.seed(1984) torch.manual_seed(1984) split_qkv = False # ------------------- PopART -------------------- config = BertConfig(task="PRETRAINING", vocab_length=9728, micro_batch_size=micro_batch_size, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, no_attn_dropout=True, split_qkv=split_qkv, attention_bias=attention_bias, num_attention_splits=number_attention_splits) popart_model = get_model(config, mode, 'attention') input_info = popart.TensorInfo( config.popart_dtype, [config.micro_batch_size * config.sequence_length, config.hidden_size]) input_tensor = popart_model.builder.addInputTensor(input_info) mask_info = popart.TensorInfo( "UINT32", [config.micro_batch_size, config.sequence_length]) mmask_tensor = popart_model.builder.addInputTensor(mask_info) smask_tensor = popart_model.builder.addInputTensor(mask_info) data = { input_tensor: np.random.normal(0, 0.02, input_info.shape()).astype(config.dtype), mmask_tensor: np.random.randint(0, config.mask_tokens + 1, ( config.micro_batch_size, config.sequence_length, )).astype(np.uint32), smask_tensor: np.random.randint(config.mask_tokens, config.sequence_length + 1, ( config.micro_batch_size, config.sequence_length, )).astype(np.uint32) } user_options = {} if mode == ExecutionMode.PHASED: user_options = { "batchSerializationFactor": batch_serialisation_factor, "executionPhases": popart_model.total_execution_phases } output = popart_model(input_tensor, [mmask_tensor, smask_tensor]) with popart_model.scope_provider(popart_model.builder, popart_model.norm.scope): l1 = popart_model.builder.aiGraphcore.l1loss( [output], l1_lambda, debugPrefix="l1LossVal", reduction=popart.ReductionType.Sum) else: user_options = {} output = popart_model.attention(input_tensor, [mmask_tensor, smask_tensor]) l1 = popart_model.builder.aiGraphcore.l1loss( [output], l1_lambda, reduction=popart.ReductionType.Sum) proto = popart_model.builder.getModelProto() if momentum: optimizer = popart.SGD({ "defaultLearningRate": (0.01, True), "defaultMomentum": (momentum, True) }) else: optimizer = popart.ConstSGD(0.01) outputs, post_proto = run_py(proto, data, (output, l1), loss=l1, optimizer=optimizer, num_reps=num_reps, user_options=user_options, execution_mode=mode) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[input_tensor].reshape(config.micro_batch_size, config.sequence_length, config.hidden_size), get_torch_mask(config, [data[mmask_tensor], data[smask_tensor]]) ] # ------------------- PyTorch ------------------------- torch_model = BertAttention( TorchBertConfig(config.vocab_length, config.hidden_size, config.num_layers, config.attention_heads, attention_bias=config.attention_bias, layer_norm_eps=config.layer_norm_eps)) # Turn off dropout torch_model.eval() mapping = TORCH_TO_ONNX[mode] if split_qkv: mapping = TORCH_TO_ONNX_SPLIT_QKV[mode] copy_weights_to_torch(torch_model, proto, mapping, transform=get_transform(split_qkv, config.hidden_size)) optim = torch.optim.SGD(torch_model.parameters(), 0.01, weight_decay=0.0, momentum=momentum) if momentum: for group in optim.param_groups: for p in group['params']: optim.state[p]['momentum_buffer'] = p.data * 0 optim.state[p]['exp_avg'] = p.data * 0 optim.state[p]['exp_avg_sq'] = p.data * 0 optim.state[p]['step'] = 0 for _ in range(num_reps): torch_output = torch_model( *[torch.from_numpy(t).float() for t in inputs])[0] torch_loss = l1_lambda * torch.norm(torch_output, 1) torch_loss.backward() optim.step() optim.zero_grad() check_tensors([torch_output.detach().numpy()], outputs, margin=6e-07) check_model(torch_model, post_proto, mapping, transform=get_transform(split_qkv, config.hidden_size), margin=2e-7)
def test_attention_bwd(custom_ops): l1_lambda = 0.1 # ------------------- PopART -------------------- builder = popart.Builder() config = BertConfig(task="PRETRAINING", vocab_length=9728, batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, custom_ops=['attention']) popart_model = Bert(config, builder=builder) input_info = popart.TensorInfo( config.popart_dtype, [config.batch_size * config.sequence_length, config.hidden_size]) input_tensor = builder.addInputTensor(input_info) mask_info = popart.TensorInfo("INT32", [config.batch_size]) mmask_tensor = builder.addInputTensor(mask_info) smask_tensor = builder.addInputTensor(mask_info) data = { input_tensor: np.random.normal(0, 0.02, input_info.shape()).astype(config.dtype), mmask_tensor: np.random.randint(0, config.mask_tokens + 1, (config.batch_size, )).astype(np.int32), smask_tensor: np.random.randint(config.mask_tokens, config.sequence_length + 1, (config.batch_size, )).astype(np.int32) } output = popart_model.attention(input_tensor, [mmask_tensor, smask_tensor]) proto = builder.getModelProto() l1 = popart.L1Loss(output, "l1LossVal", l1_lambda) optimizer = popart.ConstSGD(0.01) outputs, post_proto = run_py(proto, data, (output, l1.output(0)), loss=l1, optimizer=optimizer) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[input_tensor].reshape(config.batch_size, config.sequence_length, config.hidden_size), get_torch_mask(config, [data[mmask_tensor], data[smask_tensor]]) ] torch_to_onnx = { "self.query.weight": "QKV", "self.key.weight": "QKV", "self.value.weight": "QKV", "output.dense.weight": "Out", "output.LayerNorm.weight": "Gamma", "output.LayerNorm.bias": "Beta" } split_qkv = { "self.query.weight": lambda arr: arr[:, 0:config.hidden_size].T, "self.key.weight": lambda arr: arr[:, config.hidden_size:config.hidden_size * 2].T, "self.value.weight": lambda arr: arr[:, config.hidden_size * 2:config.hidden_size * 3].T, "output.dense.weight": np.transpose } # ------------------- PyTorch ------------------------- torch_model = BertAttention( TorchBertConfig(config.vocab_length, config.hidden_size, config.num_layers, config.attention_heads, layer_norm_eps=config.layer_norm_eps)) # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, torch_to_onnx, transform=split_qkv) optim = torch.optim.SGD(torch_model.parameters(), 0.01, weight_decay=0.0, momentum=0.0) torch_output = torch_model(*[torch.from_numpy(t).float() for t in inputs])[0] torch_loss = l1_lambda * torch.norm(torch_output, 1) torch_loss.backward() optim.step() check_tensors([torch_output.detach().numpy()], outputs) check_model(torch_model, post_proto, torch_to_onnx, transform=split_qkv)
def test_embedding(config, phase): # define input indices = np.random.randint( 0, test_config.vocab_size, (test_config.batch_size, test_config.sequence_length)).astype(np.int32) positions = np.reshape( np.arange(test_config.sequence_length), (test_config.batch_size, test_config.sequence_length)).astype(np.int32) segments = np.random.randint( 0, 2, (test_config.batch_size, test_config.sequence_length)).astype(np.int32) inputs = [d for d in [indices, positions, segments]] # build model # PyTorch model torch_config = TorchBertConfig( vocab_size_or_config_json_file=test_config.vocab_size, hidden_size=test_config.hidden_size, hidden_act=test_config.hidden_act, num_attention_heads=test_config.num_attention_heads, hidden_dropout_prob=test_config.hidden_dropout_prob, max_position_embeddings=test_config.max_position_embeddings, type_vocab_size=test_config.type_vocab_size, update_embedding_dict=True, layer_norm_eps=test_config.layer_norm_eps) torch_model = TorchBertEmbeddings(torch_config) torch_model.eval() # TF model tf_config = TFBertConfig( vocab_size=test_config.vocab_size, hidden_size=test_config.hidden_size, hidden_act=test_config.hidden_act, num_attention_heads=test_config.num_attention_heads, max_position_embeddings=test_config.max_position_embeddings, max_predictions_per_seq=test_config.max_predictions_per_seq, hidden_dropout_prob=test_config.hidden_dropout_prob, type_vocab_size=test_config.type_vocab_size, initializer_range=test_config.initializer_range, dtype=test_config.dtype, matmul_serialize_factor=test_config.matmul_serialize_factor, static_mask=False) # farward check if phase == "fwd": torch_outputs = run_fwd_model(inputs, torch_model) with tf.Graph().as_default(): tf_model = TFBertModel(tf_config, is_training=True) with ops.device('cpu'): input_ids = tf.placeholder(shape=[ test_config.batch_size, test_config.sequence_length ], dtype=tf.int32) position_ids = tf.placeholder(shape=[ test_config.batch_size, test_config.sequence_length ], dtype=tf.int32) segment_ids = tf.placeholder(shape=[ test_config.batch_size, test_config.sequence_length ], dtype=tf.int32) cfg = utils.create_ipu_config() cfg = utils.auto_select_ipus(cfg, 1) utils.configure_ipu_system(cfg) utils.move_variable_initialization_to_cpu() with ops.device("/device:IPU:0"): opt = ipu_compiler.compile( tf_model.embeddings_layer, inputs=[input_ids, position_ids, segment_ids]) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) # copy pytorch weight to tf var_and_init = copy_torch_weights_to_tf( torch_model, tf_model, TF_TO_TORCH, {}, sess) sess.run(var_and_init) # run tf feed feed farward tf_outputs = sess.run( opt, { input_ids: indices, position_ids: positions, segment_ids: segments }) # compare tf output with pytorch output check_tensors(tf_outputs, torch_outputs, margin=1.5e-8) # backward check elif phase == "bwd": l1_lambda = 0.1 base_lr = 0.01 optim = torch.optim.SGD(torch_model.parameters(), base_lr, weight_decay=0.0, momentum=0.0) torch_output = torch_model( *[torch.from_numpy(t).long() for t in inputs]) # pytorch backward torch_loss = l1_lambda * torch.norm(torch_output, 1) torch_loss.backward() # calculate gradients optim.step() # update gradients torch_outputs = [torch_output.detach().numpy()] # TF with tf.Graph().as_default(): tf_model = TFBertModel(tf_config, is_training=True) with ops.device('cpu'): input_ids = tf.placeholder(shape=[ test_config.batch_size, test_config.sequence_length ], dtype=tf.int32) position_ids = tf.placeholder(shape=[ test_config.batch_size, test_config.sequence_length ], dtype=tf.int32) segment_ids = tf.placeholder(shape=[ test_config.batch_size, test_config.sequence_length ], dtype=tf.int32) cfg = utils.create_ipu_config() cfg = utils.auto_select_ipus(cfg, 1) utils.configure_ipu_system(cfg) utils.move_variable_initialization_to_cpu() def embedding_graph(input_ids, position_ids, segment_ids): embedding_output = tf_model.embeddings_layer( input_ids, position_ids, segment_ids) l1_loss = l1_lambda * tf.norm(embedding_output, 1) optimizer = tf.train.GradientDescentOptimizer(base_lr) train_step = optimizer.minimize(l1_loss) return embedding_output, l1_loss, train_step with ops.device("/device:IPU:0"): opt = ipu_compiler.compile( embedding_graph, inputs=[input_ids, position_ids, segment_ids]) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) var_and_init = copy_torch_weights_to_tf( torch_model, tf_model, TF_TO_TORCH, {}, sess) sess.run(var_and_init) tvars = sess.run({v.name: v for v in tf.trainable_variables()}) print(tvars) tf_outputs, tf_loss = sess.run( opt, { input_ids: indices, position_ids: positions, segment_ids: segments }) # sess.run(opt, {input_ids: indices, position_ids: positions, segment_ids: segments}) # Compare the farward output check_tf_torch_model(sess, torch_model, TF_TO_TORCH, margin=5e-7) check_tensors(torch_outputs, tf_outputs, margin=5e-7) else: raise ValueError( f"`phase` only can be set to [`fwd`, `bwd`] which mean farward or backward respectively." )
def test_attention_fwd(custom_ops): # ------------------- PopART -------------------- builder = popart.Builder() config = BertConfig(task="PRETRAINING", vocab_length=9728, batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, custom_ops=['attention'], inference=True) popart_model = Bert(config, builder=builder) input_info = popart.TensorInfo( config.popart_dtype, [config.batch_size * config.sequence_length, config.hidden_size]) input_tensor = builder.addInputTensor(input_info) mask_info = popart.TensorInfo("INT32", [config.batch_size]) mmask_tensor = builder.addInputTensor(mask_info) smask_tensor = builder.addInputTensor(mask_info) data = { input_tensor: np.random.normal(0, 0.02, input_info.shape()).astype(config.dtype), mmask_tensor: np.random.randint(0, config.mask_tokens + 1, (config.batch_size, )).astype(np.int32), smask_tensor: np.random.randint(config.mask_tokens, config.sequence_length + 1, (config.batch_size, )).astype(np.int32) } output = popart_model.attention(input_tensor, [mmask_tensor, smask_tensor]) proto = builder.getModelProto() outputs, post_proto = run_py(proto, data, output) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[input_tensor].reshape(config.batch_size, config.sequence_length, config.hidden_size).astype(np.float32), get_torch_mask(config, [data[mmask_tensor], data[smask_tensor]]) ] torch_to_onnx = { "self.query.weight": "QKV", "self.key.weight": "QKV", "self.value.weight": "QKV", "output.dense.weight": "Out", "output.LayerNorm.weight": "Gamma", "output.LayerNorm.bias": "Beta" } split_qkv = { "self.query.weight": lambda arr: arr[:, 0:config.hidden_size].T, "self.key.weight": lambda arr: arr[:, config.hidden_size:config.hidden_size * 2].T, "self.value.weight": lambda arr: arr[:, config.hidden_size * 2:config.hidden_size * 3].T, "output.dense.weight": np.transpose } # ------------------- PyTorch ------------------------- torch_model = BertAttention( TorchBertConfig(config.vocab_length, config.hidden_size, config.num_layers, config.attention_heads, layer_norm_eps=config.layer_norm_eps)) # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, torch_to_onnx, transform=split_qkv) # Model to test against torch_outputs = run_fwd_model(inputs, torch_model) check_tensors(torch_outputs, outputs)
def test_embedding_projection_fwd(custom_ops): # ------------------- PopART -------------------- builder = popart.Builder(opsets={ "ai.onnx": 9, "ai.onnx.ml": 1, "ai.graphcore": 1 }) config = BertConfig(vocab_length=9728, embedding_serialization_vocab_steps=4, micro_batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, no_cls_layer=False, inference=True) popart_model = Bert(config, builder=builder) sequence_info = popart.TensorInfo( "UINT32", [config.micro_batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (config.micro_batch_size * config.sequence_length)).astype( np.uint32) } x = popart_model.gather( indices, config.vocab_length, "Embedding_Dict") x = popart_model.norm(x) x = popart_model.dropout(x) with popart_model.builder.nameScope("CLS"): x = popart_model.lm_prediction_head(x) output = popart_model.projection(x) proto = builder.getModelProto() outputs, post_proto = run_py(proto, data, output) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [data[indices].reshape( config.micro_batch_size, config.sequence_length).astype(np.int32)] # ------------------- PyTorch ------------------------- torch_model = EmbeddingProjectionModel( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps, no_cls_layer=config.no_cls_layer)) torch_model.eval() copy_weights_to_torch(torch_model, proto, TORCH_TO_ONNX, TRANSPOSE_WEIGHTS) torch_model.tie_weights() torch_outputs = run_fwd_model(inputs, torch_model) check_tensors(torch_outputs, outputs)
def test_embedding_projection_bwd(custom_ops): l1_lambda = 0.1 # ------------------- PopART -------------------- builder = popart.Builder(opsets={ "ai.onnx": 9, "ai.onnx.ml": 1, "ai.graphcore": 1 }) config = BertConfig(vocab_length=9728, batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, custom_ops=['gather']) popart_model = Bert(config, builder=builder) sequence_info = popart.TensorInfo( "INT32", [config.batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (config.batch_size * config.sequence_length)).astype( np.int32) } x = popart_model.embedding_custom( indices, config.vocab_length, "Embedding_Dict", detach=True) x = popart_model.norm(x) x = popart_model.dropout(x) with popart_model.device_scope(nameScope="CLS"): x = popart_model.lm_prediction_head(x) output = popart_model.projection(x) proto = builder.getModelProto() l1 = popart.L1Loss(output, "l1LossVal", l1_lambda) optimizer = popart.ConstSGD(0.01) outputs, post_proto = run_py(proto, data, output, loss=l1, optimizer=optimizer, user_options={"enableStochasticRounding": True}) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [data[indices].reshape(config.batch_size, config.sequence_length)] # ------------------- PyTorch ------------------------- torch_model = EmbeddingProjectionModel( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps)) # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, torch_to_onnx, transform=transposed_weights) optim = torch.optim.SGD(torch_model.parameters(), 0.01, weight_decay=0.0, momentum=0.0) torch_output = torch_model(*[torch.from_numpy(t).long() for t in inputs]) torch_loss = l1_lambda * torch.norm(torch_output, 1) torch_loss.backward() optim.step() check_tensors([torch_output.detach().numpy()], outputs, margin=1e-5) check_model(torch_model, post_proto, torch_to_onnx, transform=transposed_weights)
def test_embedding_fwd(custom_ops): # ------------------- PopART -------------------- builder = popart.Builder(opsets={ "ai.onnx": 9, "ai.onnx.ml": 1, "ai.graphcore": 1 }) config = BertConfig(vocab_length=9728, batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, custom_ops=['gather'], inference=True) popart_model = Bert(config, builder=builder) # Prevent virtualGraph attributes being added to the ops. popart_model.embedding_scope = popart_model.device_scope(None, None) popart_model.embedding_split_scope = popart_model.embedding_scope sequence_info = popart.TensorInfo( "UINT32", [config.batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) positions = builder.addInputTensor(sequence_info) segments = builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (config.batch_size * config.sequence_length)).astype( np.uint32), positions: np.random.randint(0, config.max_positional_length, (config.batch_size * config.sequence_length)).astype( np.uint32), segments: np.random.randint(0, 2, (config.batch_size * config.sequence_length)).astype( np.uint32) } # Use the custom embedding for layout output = popart_model.embedding(indices, positions, segments) proto = builder.getModelProto() outputs, post_proto = run_py( proto, data, output, user_options={"enableStochasticRounding": True}) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[t].reshape(config.batch_size, config.sequence_length).astype(np.int32) for t in [indices, positions, segments] ] torch_to_onnx = { "word_embeddings.weight": "Embedding_Dict", "position_embeddings.weight": "Positional_Dict", "token_type_embeddings.weight": "Segment_Dict", "LayerNorm.weight": "Gamma", "LayerNorm.bias": "Beta" } transposed_weights = { "word_embeddings.weight": np.transpose, "position_embeddings.weight": np.transpose, } # ------------------- PyTorch ------------------------- torch_model = BertEmbeddings( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps)) torch_model.eval() copy_weights_to_torch(torch_model, proto, torch_to_onnx, transposed_weights) torch_outputs = run_fwd_model(inputs, torch_model) check_tensors(torch_outputs, outputs)
def test_embedding_projection_fwd(custom_ops): # ------------------- PopART -------------------- builder = popart.Builder(opsets={ "ai.onnx": 9, "ai.onnx.ml": 1, "ai.graphcore": 1 }) config = BertConfig(vocab_length=9728, batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, custom_ops=['gather'], inference=True) popart_model = Bert(config, builder=builder) sequence_info = popart.TensorInfo( "INT32", [config.batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (config.batch_size * config.sequence_length)).astype( np.int32) } x = popart_model.embedding_custom( indices, config.vocab_length, "Embedding_Dict", detach=True) x = popart_model.norm(x) x = popart_model.dropout(x) with popart_model.device_scope(nameScope="CLS"): x = popart_model.lm_prediction_head(x) output = popart_model.projection(x) proto = builder.getModelProto() outputs, post_proto = run_py(proto, data, output, user_options={"enableStochasticRounding": True}) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [data[indices].reshape(config.batch_size, config.sequence_length)] # ------------------- PyTorch ------------------------- torch_model = EmbeddingProjectionModel( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps)) torch_model.eval() copy_weights_to_torch(torch_model, proto, torch_to_onnx, transposed_weights) torch_model.tie_weights() torch_outputs = run_fwd_model(inputs, torch_model) check_tensors(torch_outputs, outputs)
def test_embedding_bwd(custom_ops): # ------------------- PopART -------------------- config = BertConfig(task="SQUAD", vocab_length=9728, micro_batch_size=1, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, update_embedding_dict=True) popart_model = Bert(config) # Prevent virtualGraph attributes being added to the ops sequence_info = popart.TensorInfo( "UINT32", [config.micro_batch_size * config.sequence_length]) indices = popart_model.builder.addInputTensor(sequence_info) positions = popart_model.builder.addInputTensor(sequence_info) segments = popart_model.builder.addInputTensor(sequence_info) data = { indices: np.random.randint( 0, config.vocab_length, (config.micro_batch_size * config.sequence_length)).astype( np.uint32), positions: np.random.randint( 0, config.max_positional_length, (config.micro_batch_size * config.sequence_length)).astype( np.uint32), segments: np.random.randint( 0, 2, (config.micro_batch_size * config.sequence_length)).astype( np.uint32) } optimizer = popart.ConstSGD(0.01) l1_lambda = 0.1 with popart_model.builder.nameScope("Embedding"): output = popart_model.embedding(indices, positions, segments) l1 = popart_model.builder.aiGraphcore.l1loss( [output], l1_lambda, debugContext="l1LossVal", reduction=popart.ReductionType.Sum) num_reps = 5 proto = popart_model.builder.getModelProto() outputs, post_proto = run_py(proto, data, output, ipus=1, loss=l1, num_reps=num_reps, optimizer=optimizer) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[t].reshape(config.micro_batch_size, config.sequence_length).astype(np.int32) for t in [indices, positions, segments] ] # ------------------- PyTorch ------------------------- torch_model = BertEmbeddings( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps, update_embedding_dict=config.update_embedding_dict)) # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, TORCH_TO_ONNX, {}) optim = torch.optim.SGD(torch_model.parameters(), 0.01) for _ in range(num_reps): torch_output = torch_model( *[torch.from_numpy(t).long() for t in inputs]) torch_loss = l1_lambda * torch.norm(torch_output, 1) torch_loss.backward() optim.step() optim.zero_grad() torch_outputs = [torch_output.detach().numpy()] check_tensors(torch_outputs, outputs, margin=7e-6) check_model(torch_model, post_proto, TORCH_TO_ONNX, {}, margin=7e-06)
def test_load_from_chkpt(config_path, chkpt_path, custom_ops): """ Compare the model loaded into our popart model against the modified PyTorch model: - Load tf weights into BERT using torch impl -> run fwd model - Load tf weights into BERT using popart impl -> run fwd model - Compare output tensors """ config = load_bert_config_tf(config_path) builder = popart.Builder(opsets={ "ai.onnx": 9, "ai.onnx.ml": 1, "ai.graphcore": 1 }) # Load Torch version torch_model = TorchModel( TorchBertConfig( config.vocab_length, config.hidden_size, num_hidden_layers=config.num_layers, num_attention_heads=config.attention_heads, intermediate_size=config.ff_size, hidden_act="relu", max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps, mask_tokens=config.mask_tokens, )) torch_model.eval() torch_model = load_tf_weights_in_bert(torch_model, config, chkpt_path) # Load Popart model sequence_info = popart.TensorInfo( "INT32", [config.batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) positions = builder.addInputTensor(sequence_info) popart_model, proto, output = load_from_tf(chkpt_path, True, config, indices, positions, builder=builder) # Run the models popart_inputs = { indices: np.random.randint(0, config.vocab_length, (config.batch_size * config.sequence_length)).astype( np.int32), positions: np.random.randint( 0, config.sequence_length, (config.batch_size * config.sequence_length), ).astype(np.int32), } torch_inputs = { "input_ids": popart_inputs[indices].reshape(config.batch_size, config.sequence_length), "position_ids": popart_inputs[positions].reshape(config.batch_size, config.sequence_length), } torch_outputs = run_fwd_model(torch_inputs, torch_model) popart_outputs, post_proto = run_py( proto, popart_inputs, output, ipus=math.ceil(config.num_layers / config.layers_per_ipu) + 1, ) check_tensors(torch_outputs, popart_outputs) print("Test succeeded")
def bwd_graph(popart_model, torch_model, popart_loss_fn, torch_loss_fn, mapping=None, transform=None): np.random.seed(1984) random.seed(1984) torch.manual_seed(1984) # ------------------- PopART -------------------- config = popart_model.config builder = popart_model.builder sequence_info = popart.TensorInfo( "UINT32", [config.batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) positions = builder.addInputTensor(sequence_info) segments = builder.addInputTensor(sequence_info) data = { indices: np.random.randint( 0, config.vocab_length, (config.batch_size * config.sequence_length)).astype(np.uint32), positions: np.random.randint( 0, config.sequence_length, (config.batch_size * config.sequence_length)).astype(np.uint32), segments: np.random.randint( 0, 2, (config.batch_size * config.sequence_length)).astype(np.uint32) } output = popart_model.build_graph(indices, positions, segments) proto = builder.getModelProto() losses = popart_loss_fn(output) optimizer = popart.ConstSGD(0.01) outputs, post_proto = run_py( proto, data, output, loss=losses, optimizer=optimizer, ipus=math.ceil(config.num_layers / config.layers_per_ipu) + popart_model.layer_offset) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = { "input_ids": data[indices].reshape(config.batch_size, config.sequence_length).astype(np.int32), "position_ids": data[positions].reshape(config.batch_size, config.sequence_length).astype(np.int32), "token_type_ids": data[segments].reshape(config.batch_size, config.sequence_length).astype(np.int32) } torch_to_onnx = get_mapping(config, init=mapping) transform_weights = get_transform(config, init=transform) # ------------------- PyTorch ------------------------- # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, torch_to_onnx, transform_weights) optim = torch.optim.SGD(torch_model.parameters(), 0.01, weight_decay=0.0, momentum=0.0) torch_outputs = torch_model( **{k: torch.from_numpy(t).long() for k, t in inputs.items()}) torch_loss = torch_loss_fn(torch_outputs) torch_loss.backward() optim.step() check_tensors([output.detach().numpy() for output in torch_outputs], outputs) check_model(torch_model, post_proto, torch_to_onnx, transform_weights, margin=6e-7)
def test_embedding_fwd(custom_ops, mode, batch_size, batch_serialization_factor, embedding_serialization_vocab_steps): # ------------------- PopART -------------------- config = BertConfig( task="SQUAD", vocab_length=9728, batch_size=batch_size, hidden_size=768, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, inference=True, embedding_serialization_vocab_steps=embedding_serialization_vocab_steps ) popart_model = get_model(config, mode, 'embedding') sequence_info = popart.TensorInfo( "UINT32", [config.batch_size * config.sequence_length]) indices = popart_model.builder.addInputTensor(sequence_info) positions = popart_model.builder.addInputTensor(sequence_info) segments = popart_model.builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (config.batch_size * config.sequence_length)).astype( np.uint32), positions: np.random.randint(0, config.max_positional_length, (config.batch_size * config.sequence_length)).astype( np.uint32), segments: np.random.randint(0, 2, (config.batch_size * config.sequence_length)).astype( np.uint32) } user_options = {} if mode == ExecutionMode.PHASED: user_options = { "batchSerializationFactor": batch_serialization_factor, "executionPhases": popart_model.total_execution_phases } output = popart_model(indices, positions, segments) else: user_options = {"enableStochasticRounding": True} with popart_model.builder.nameScope("Embedding"): output = popart_model.embedding(indices, positions, segments) proto = popart_model.builder.getModelProto() outputs, post_proto = run_py(proto, data, output, user_options=user_options, execution_mode=mode) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[t].reshape(config.batch_size, config.sequence_length).astype(np.int32) for t in [indices, positions, segments] ] # ------------------- PyTorch ------------------------- torch_model = BertEmbeddings( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps)) torch_model.eval() expanded_name_map, remapped_transform_map = expand_torch_to_onnx_map( TORCH_TO_ONNX[mode], config, mode) copy_weights_to_torch(torch_model, proto, expanded_name_map, remapped_transform_map) torch_outputs = run_fwd_model(inputs, torch_model) check_tensors(torch_outputs, outputs, margin=5e-7)
def bwd_graph(popart_model, torch_model, popart_loss_fn, torch_loss_fn, mapping=None, transform=None, replication_factor=1, replicated_tensor_sharding=False, opt_type="SGD"): np.random.seed(1984) random.seed(1984) torch.manual_seed(1984) # ------------------- PopART -------------------- config = popart_model.config builder = popart_model.builder sequence_info = popart.TensorInfo( "UINT32", [config.micro_batch_size * config.sequence_length]) indices = builder.addInputTensor(sequence_info) positions = builder.addInputTensor(sequence_info) segments = builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32), positions: np.random.randint(0, config.sequence_length, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32), segments: np.random.randint(0, 2, (replication_factor, config.micro_batch_size * config.sequence_length)).astype(np.uint32) } num_reps = 5 output = popart_model.build_graph(indices, positions, segments) ipus = popart_model.total_ipus loss = popart_loss_fn(output) proto = builder.getModelProto() if opt_type == "SGD": optimizer = popart.ConstSGD(1e-3) elif opt_type == "LAMB": optMap = { "defaultLearningRate": (1e-3, True), "defaultBeta1": (0.9, True), "defaultBeta2": (0.999, True), "defaultWeightDecay": (0.0, True), "maxWeightNorm": (10.0, True), "defaultEps": (1e-8, True), "lossScaling": (1.0, True), } optimizer = popart.Adam(optMap, mode=popart.AdamMode.Lamb) elif opt_type == "LAMB_NO_BIAS": optMap = { "defaultLearningRate": (1, False), "defaultBeta1": (0, False), "defaultBeta2": (0, False), "defaultWeightDecay": (0.0, False), "defaultEps": (1e-8, False), "lossScaling": (1.0, False), } optimizer = popart.Adam(optMap, mode=popart.AdamMode.LambNoBias) else: raise ValueError(f"Unknown opt_type={opt_type}") outputs, post_proto = run_py( proto, data, output, loss=loss, optimizer=optimizer, replication_factor=replication_factor, replicated_tensor_sharding=replicated_tensor_sharding, ipus=ipus, num_reps=num_reps) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = { "input_ids": data[indices].reshape(replication_factor * config.micro_batch_size, config.sequence_length).astype(np.int32), "position_ids": data[positions].reshape(replication_factor * config.micro_batch_size, config.sequence_length).astype(np.int32), "token_type_ids": data[segments].reshape(replication_factor * config.micro_batch_size, config.sequence_length).astype(np.int32) } torch_to_onnx = get_mapping(config, init=mapping) transform_weights = get_transform(config, init=transform) # ------------------- PyTorch ------------------------- # Turn off dropout torch_model.eval() copy_weights_to_torch(torch_model, proto, torch_to_onnx, transform_weights) if opt_type == "SGD": optim = torch.optim.SGD(torch_model.parameters(), 1e-3, weight_decay=0.0, momentum=0.0) elif opt_type == "LAMB": optim = torch_lamb.Lamb(torch_model.parameters(), lr=1e-3, weight_decay=0.0, biasCorrection=True) for _ in range(num_reps): torch_outputs = torch_model( **{k: torch.from_numpy(t).long() for k, t in inputs.items()}) torch_loss = torch_loss_fn(torch_outputs) torch_loss.backward() optim.step() optim.zero_grad() check_tensors([output.detach().numpy() for output in torch_outputs], outputs, margin=1.5e-06) check_model(torch_model, post_proto, torch_to_onnx, transform_weights, margin=5e-5)
def embedding_bwd(custom_ops, mode, momentum, batch_size, batch_serialization_factor, embedding_serialization_vocab_steps, vocab_length=9728, hidden_size=768): # ------------------- PopART -------------------- config = BertConfig( task="SQUAD", vocab_length=vocab_length, batch_size=batch_size, hidden_size=hidden_size, sequence_length=128, activation_type='relu', popart_dtype="FLOAT", no_dropout=True, update_embedding_dict=True, embedding_serialization_vocab_steps=embedding_serialization_vocab_steps ) popart_model = get_model(config, mode, 'embedding') # Prevent virtualGraph attributes being added to the ops sequence_info = popart.TensorInfo( "UINT32", [config.batch_size * config.sequence_length]) indices = popart_model.builder.addInputTensor(sequence_info) positions = popart_model.builder.addInputTensor(sequence_info) segments = popart_model.builder.addInputTensor(sequence_info) data = { indices: np.random.randint(0, config.vocab_length, (config.batch_size * config.sequence_length)).astype( np.uint32), positions: np.random.randint(0, config.max_positional_length, (config.batch_size * config.sequence_length)).astype( np.uint32), segments: np.random.randint(0, 2, (config.batch_size * config.sequence_length)).astype( np.uint32) } if momentum: optimizer = popart.SGD({ "defaultLearningRate": (0.01, True), "defaultMomentum": (momentum, True), "defaultDampening": (0.0, True), "defaultVelocityScaling": (1.0, True), "lossScaling": (1.0, True), "defaultWeightDecay": (0.0, True) }) else: optimizer = popart.ConstSGD(0.01) l1_lambda = 0.1 if mode == ExecutionMode.PHASED: user_options = { "batchSerializationFactor": batch_serialization_factor, "executionPhases": popart_model.total_execution_phases, } output = popart_model(indices, positions, segments) with popart_model.scope_provider(popart_model.builder, popart_model.norm.scope): l1 = popart_model.builder.aiGraphcore.l1loss( [output], l1_lambda, debugPrefix="l1LossVal", reduction=popart.ReductionType.Sum) else: user_options = {"enableStochasticRounding": True} with popart_model.builder.nameScope("Embedding"): output = popart_model.embedding(indices, positions, segments) l1 = popart_model.builder.aiGraphcore.l1loss( [output], l1_lambda, debugPrefix="l1LossVal", reduction=popart.ReductionType.Sum) num_reps = 5 proto = popart_model.builder.getModelProto() outputs, post_proto = run_py(proto, data, output, ipus=1, loss=l1, num_reps=num_reps, optimizer=optimizer, user_options=user_options, execution_mode=mode) # ----------------- PopART -> PyTorch ---------------- proto = onnx.load_model_from_string(proto) inputs = [ data[t].reshape(config.batch_size, config.sequence_length).astype(np.int32) for t in [indices, positions, segments] ] # ------------------- PyTorch ------------------------- torch_model = BertEmbeddings( TorchBertConfig(config.vocab_length, config.hidden_size, max_position_embeddings=config.max_positional_length, layer_norm_eps=config.layer_norm_eps, update_embedding_dict=config.update_embedding_dict)) # Turn off dropout torch_model.eval() expanded_name_map, remapped_transform_map = expand_torch_to_onnx_map( TORCH_TO_ONNX[mode], config, mode) copy_weights_to_torch(torch_model, proto, expanded_name_map, remapped_transform_map) optim = torch.optim.SGD(torch_model.parameters(), 0.01, weight_decay=0.0, dampening=0.0, momentum=momentum) if momentum > 0.: for group in optim.param_groups: for p in group['params']: optim.state[p]['momentum_buffer'] = p.data * 0 optim.state[p]['exp_avg'] = p.data * 0 optim.state[p]['exp_avg_sq'] = p.data * 0 optim.state[p]['step'] = 0 for _ in range(num_reps): torch_output = torch_model( *[torch.from_numpy(t).long() for t in inputs]) torch_loss = l1_lambda * torch.norm(torch_output, 1) torch_loss.backward() optim.step() optim.zero_grad() torch_outputs = [torch_output.detach().numpy()] check_tensors(torch_outputs, outputs, margin=7e-6) expanded_name_map, remapped_transform_map = expand_torch_to_onnx_map( TORCH_TO_ONNX[mode], config, mode) check_model(torch_model, post_proto, expanded_name_map, remapped_transform_map, margin=7e-06)