コード例 #1
0
 def _eff_body(from_tensor):
     eff_encoder_result = op_encoder(inputs=from_tensor,
                                     encoder_args=eff_encoder_args,
                                     attention_mask=attention_mask,
                                     encoder_vars_dict=encoder_variables_dict,
                                     sequence_length=sequence_length)
     return eff_encoder_result
コード例 #2
0
                                               dtype=tf_datatype,
                                               remove_padding=remove_padding,
                                               int8_mode=2)

    tf_encoder_result = tf_encoder(input_tensor=from_tensor,
                                   encoder_args=encoder_Int8_v1_args,
                                   attention_mask=attention_mask)

    encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    encoder_variables_dict = {}
    for v in encoder_vars:
        encoder_variables_dict[v.name] = v

    op_encoder_notInt8_result = op_encoder(
        inputs=from_tensor,
        encoder_args=encoder_notInt8_args,
        attention_mask=attention_mask,
        encoder_vars_dict=encoder_variables_dict,
        sequence_length=sequence_length)

    op_encoder_int8_v1_result = op_encoder(
        inputs=from_tensor,
        encoder_args=encoder_Int8_v1_args,
        attention_mask=attention_mask,
        encoder_vars_dict=encoder_variables_dict,
        sequence_length=sequence_length)

    op_encoder_int8_v2_result = op_encoder(
        inputs=from_tensor,
        encoder_args=encoder_Int8_v2_args,
        attention_mask=attention_mask,
        encoder_vars_dict=encoder_variables_dict,
コード例 #3
0
                                                         memory_sequence_length,
                                                         embedding_table,
                                                         decoding_args,
                                                         0,
                                                         kernel_initializer_range,
                                                         bias_initializer_range)

    all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    decoder_var_start_id = 0
    while all_vars[decoder_var_start_id].name.find(
            "transformer/decoding") == -1:
        decoder_var_start_id += 1
    encoder_variables = all_vars[:decoder_var_start_id]
    decoder_variables = all_vars[decoder_var_start_id:]
    op_encoder_result = op_encoder(inputs=from_tensor,
                                   encoder_args=encoder_args,
                                   encoder_vars=encoder_variables,
                                   attention_mask=attention_mask)
    op_encoder_result = tf.reshape(
        op_encoder_result, [batch_size, max_seq_len, encoder_hidden_dim])

    finalized_op_output_ids, finalized_op_sequence_lengths, op_output_ids, \
        op_parent_ids, op_sequence_lengths = op_decoding(op_encoder_result,
                                                         memory_sequence_length,
                                                         embedding_table,
                                                         decoder_variables,
                                                         decoding_args)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
コード例 #4
0
                                       dtype=tf_datatype,
                                       remove_padding=remove_padding,
                                       int8_mode=int8_mode)

    tf_encoder_result = tf_encoder(input_tensor=from_tensor,
                                   encoder_args=encoder_args,
                                   attention_mask=attention_mask)

    encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    encoder_variables_dict = {}
    for v in encoder_vars:
        encoder_variables_dict[v.name] = v

    op_encoder_result = op_encoder(inputs=from_tensor,
                                   encoder_args=encoder_args,
                                   attention_mask=attention_mask,
                                   encoder_vars_dict=encoder_variables_dict,
                                   sequence_length=sequence_length)
    '''
    Because FasterTransformer skip some computation for the padding parts, 
    if we do not mask these parts, the cross check result would be wrong. 
    '''
    tf_encoder_result = tf_encoder_result * tf.expand_dims(tf.sequence_mask(
        sequence_length, maxlen=max_seq_len, dtype=tf_datatype),
                                                           axis=-1)
    op_encoder_result = op_encoder_result * tf.expand_dims(tf.sequence_mask(
        sequence_length, maxlen=max_seq_len, dtype=tf_datatype),
                                                           axis=-1)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
コード例 #5
0
def encoder_sample(args_dict):
    print("\n=============== Argument ===============")
    for key in args_dict:
        print("{}: {}".format(key, args_dict[key]))
    print("========================================")

    np.random.seed(1)
    tf.set_random_seed(1)

    batch_size = args_dict['batch_size']
    num_layer = args_dict['num_layer']
    max_seq_len = args_dict['max_seq_len']
    avg_seq_len = args_dict['avg_seq_len']
    head_num = args_dict['head_number']
    size_per_head = args_dict['size_per_head']
    tf_datatype = tf.float32
    np_datatype = np.float32
    atol_threshold = 3e-5
    int8_mode = args_dict['int8_mode']
    allow_gemm_test = True if args_dict['allow_gemm_test'].lower() == "true" else False
    if args_dict['data_type'] == "fp16":
        tf_datatype = tf.float16
        np_datatype = np.float16
        atol_threshold = 3e-2

    hidden_dim = head_num * size_per_head

    sequence_length = np.random.randint(1, max_seq_len + 1, size=batch_size)
    if avg_seq_len != -1:
        # This means we use "remove_padding" and set other average sequence length
        sequence_length = np.ones(batch_size) * avg_seq_len
    else:
        sequence_length = np.ones(batch_size) * (max_seq_len / 2)
    sequence_length = sequence_length.astype(np.int32)

    from_data = np.random.randn(batch_size, max_seq_len, hidden_dim)
    from_tensor = tf.convert_to_tensor(from_data, dtype=tf_datatype)
    
    attention_mask = build_sequence_mask(sequence_length, num_heads=head_num, maximum_length=max_seq_len, dtype=tf_datatype)
    
    encoder_args = TransformerArgument(beam_width=1,
                                       head_num=head_num,
                                       size_per_head=size_per_head,
                                       num_layer=num_layer,
                                       dtype=tf_datatype,
                                       remove_padding=False,
                                       int8_mode=int8_mode,
                                       allow_gemm_test=allow_gemm_test)

    eff_encoder_args = copy.deepcopy(encoder_args)
    eff_encoder_args.remove_padding = True

    tf_encoder_result = tf_encoder(input_tensor=from_tensor,
                                   encoder_args=encoder_args,
                                   attention_mask=attention_mask)

    encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    encoder_variables_dict = {}
    for v in encoder_vars:
        encoder_variables_dict[v.name] = v
    
    op_encoder_result = op_encoder(inputs=from_tensor,
                                   encoder_args=encoder_args,
                                   attention_mask=attention_mask,
                                   encoder_vars_dict=encoder_variables_dict,
                                   sequence_length=sequence_length)

    eff_encoder_result = op_encoder(inputs=from_tensor,
                                    encoder_args=eff_encoder_args,
                                    attention_mask=attention_mask,
                                    encoder_vars_dict=encoder_variables_dict,
                                    sequence_length=sequence_length)

    '''
    Because FasterTransformer skip some computation for the padding parts, 
    if we do not mask these parts, the cross check result would be wrong. 
    '''
    tf_encoder_result = tf_encoder_result * tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
    op_encoder_result = op_encoder_result * tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)
    eff_encoder_result = eff_encoder_result * tf.expand_dims(tf.sequence_mask(sequence_length, maxlen=max_seq_len, dtype=tf_datatype), axis=-1)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1
    
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())

        for idx, name in enumerate(encoder_variables_dict):
            print((str(idx) + " " + str(name) + " " +
                   str(encoder_variables_dict[name].shape)) + " " + str(encoder_variables_dict[name].dtype))
            
        print("#################################")
        tf_encoder_result_val = sess.run(tf_encoder_result)
        op_encoder_result_val = sess.run(op_encoder_result)
        eff_encoder_result_val = sess.run(eff_encoder_result)

        cross_check("Encoder TF v.s. FT with tensor input", tf_encoder_result_val, op_encoder_result_val, atol_threshold)
        cross_check("Encoder TF v.s. EFF-FT with tensor input", tf_encoder_result_val, eff_encoder_result_val, atol_threshold)
        
        op_diff = abs(tf_encoder_result_val.reshape([-1]) - op_encoder_result_val.reshape([-1]))
        eff_diff = abs(tf_encoder_result_val.reshape([-1]) - eff_encoder_result_val.reshape([-1]))
        max_diff = max(op_diff.max(), eff_diff.max())

        ite = 50
        def _cond(from_tensor):
            return tf.constant(True)
            
        def _ft_body(from_tensor):
            op_encoder_result = op_encoder(inputs=from_tensor,
                                            encoder_args=encoder_args,
                                            attention_mask=attention_mask,
                                            encoder_vars_dict=encoder_variables_dict,
                                            sequence_length=sequence_length)
            return op_encoder_result

        def _eff_body(from_tensor):
            eff_encoder_result = op_encoder(inputs=from_tensor,
                                            encoder_args=eff_encoder_args,
                                            attention_mask=attention_mask,
                                            encoder_vars_dict=encoder_variables_dict,
                                            sequence_length=sequence_length)
            return eff_encoder_result

        def _tf_body(from_tensor):
            tf_encoder_result = tf_encoder(input_tensor=from_tensor,
                                            encoder_args=encoder_args,
                                            attention_mask=attention_mask)
            return tf_encoder_result

        tf_while_tensor = tf.while_loop(_cond,
                                        _tf_body,
                                        loop_vars=[from_tensor],
                                        back_prop=False,
                                        maximum_iterations=ite)

        ft_while_tensor = tf.while_loop(_cond,
                                        _ft_body,
                                        loop_vars=[from_tensor],
                                        back_prop=False,
                                        maximum_iterations=ite)

        eff_while_tensor = tf.while_loop(_cond,
                                         _eff_body,
                                         loop_vars=[from_tensor],
                                         back_prop=False,
                                         maximum_iterations=ite)

        if args_dict['test_time'] == 1:

            # tf_time = time_test(sess, tf_encoder_result, ite)
            # ft_time = time_test(sess, op_encoder_result, ite)
            # eff_time = time_test(sess, eff_encoder_result, ite)

            # Using while loop to run 'ite' times to ignore the overheads of memory copy and model preprocess.
            # We use these times as the profiling results.
            tf_while_time = time_test(sess, tf_while_tensor, 1) / ite # while_loop has run ite times
            time.sleep(60)
            ft_while_time = time_test(sess, ft_while_tensor, 1) / ite # while_loop has run ite times
            time.sleep(60)
            eff_while_time = time_test(sess, eff_while_tensor, 1) / ite # while_loop has run ite times
            time.sleep(60)
            
            ft_type = args_dict['data_type'].upper()
            if int8_mode != 0:
                ft_type = "INT8-v{}".format(int8_mode)
            
            # print("[INFO] batch_size {} max_seq_len {} precision {} {} layer TF-time     {:6.2f} ms".format(batch_size, max_seq_len, args_dict['data_type'].upper(), num_layer, tf_time))
            # print("[INFO] batch_size {} max_seq_len {} precision {} {} layer FT-OP-time  {:6.2f} ms".format(batch_size, max_seq_len, ft_type, num_layer, ft_time))
            # print("[INFO] batch_size {} max_seq_len {} precision {} {} layer EFF-OP-time {:6.2f} ms".format(batch_size, max_seq_len, ft_type, num_layer, eff_time))

            print("[INFO] batch_size {} max_seq_len {} precision {} {} layer TF-while-time     {:6.2f} ms ( {} iterations)".format(batch_size, max_seq_len, args_dict['data_type'].upper(), num_layer, tf_while_time, ite))
            print("[INFO] batch_size {} max_seq_len {} precision {} {} layer FT-OP-while-time  {:6.2f} ms ( {} iterations)".format(batch_size, max_seq_len, ft_type, num_layer, ft_while_time, ite))
            print("[INFO] batch_size {} max_seq_len {} precision {} {} layer EFF-OP-while-time {:6.2f} ms ( {} iterations)".format(batch_size, max_seq_len, ft_type, num_layer, eff_while_time, ite))


        if args_dict['thread_num'] > 1:
            # Multi-threading demonstration
            thread_list = []
            thread_num = args_dict['thread_num']
            def run():
                ft_while_time = time_test(sess, ft_while_tensor, 1) / ite # while_loop has run ite times
                print("[INFO] batch_size {} max_seq_len {} {} layer FT-OP-while-time {:6.2f} ms with {} threads".format(batch_size,
                    max_seq_len, num_layer, ft_while_time, thread_num))

            for i in range(thread_num):
                thread_list.append(threading.Thread(target=run, name="RunFT"))
            for t in thread_list:
                t.start()
            for t in thread_list:
                t.join()

        return max_diff
コード例 #6
0
def encoder_sample(args_dict):
    print("\n=============== Argument ===============")
    for key in args_dict:
        print("{}: {}".format(key, args_dict[key]))
    print("========================================")

    np.random.seed(1)
    tf.set_random_seed(1)

    batch_size = args_dict['batch_size']
    num_layer = args_dict['num_layer']
    max_seq_len = args_dict['max_seq_len']
    avg_seq_len = args_dict['avg_seq_len']
    head_num = args_dict['head_number']
    size_per_head = args_dict['size_per_head']
    remove_padding = True if args_dict['remove_padding'].lower(
    ) == "true" else False
    tf_datatype = tf.float32
    np_datatype = np.float32
    atol_threshold = 3e-5
    int8_mode = args_dict['int8_mode']
    allow_gemm_test = True if args_dict['allow_gemm_test'].lower(
    ) == "true" else False
    if args_dict['data_type'] == "fp16":
        tf_datatype = tf.float16
        np_datatype = np.float16
        atol_threshold = 3e-2

    hidden_dim = head_num * size_per_head

    sequence_length = np.random.randint(1, max_seq_len + 1, size=batch_size)
    if avg_seq_len != -1 and remove_padding == True:
        # This means we use "remove_padding" and set a smaller average sequence length
        sequence_length = np.ones(batch_size) * avg_seq_len
    else:
        sequence_length = np.ones(batch_size) * (max_seq_len / 2)
    sequence_length = sequence_length.astype(np.int32)

    from_data = np.random.randn(batch_size, max_seq_len, hidden_dim)
    from_tensor = tf.convert_to_tensor(from_data, dtype=tf_datatype)

    attention_mask = build_sequence_mask(sequence_length,
                                         num_heads=head_num,
                                         maximum_length=max_seq_len,
                                         dtype=tf_datatype)

    encoder_args = TransformerArgument(beam_width=1,
                                       head_num=head_num,
                                       size_per_head=size_per_head,
                                       num_layer=num_layer,
                                       dtype=tf_datatype,
                                       remove_padding=remove_padding,
                                       int8_mode=int8_mode,
                                       allow_gemm_test=allow_gemm_test)

    tf_encoder_result = tf_encoder(input_tensor=from_tensor,
                                   encoder_args=encoder_args,
                                   attention_mask=attention_mask)

    encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    encoder_variables_dict = {}
    for v in encoder_vars:
        encoder_variables_dict[v.name] = v

    op_encoder_result = op_encoder(inputs=from_tensor,
                                   encoder_args=encoder_args,
                                   attention_mask=attention_mask,
                                   encoder_vars_dict=encoder_variables_dict,
                                   sequence_length=sequence_length)
    '''
    Because FasterTransformer skip some computation for the padding parts, 
    if we do not mask these parts, the cross check result would be wrong. 
    '''
    tf_encoder_result = tf_encoder_result * tf.expand_dims(tf.sequence_mask(
        sequence_length, maxlen=max_seq_len, dtype=tf_datatype),
                                                           axis=-1)
    op_encoder_result = op_encoder_result * tf.expand_dims(tf.sequence_mask(
        sequence_length, maxlen=max_seq_len, dtype=tf_datatype),
                                                           axis=-1)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()

        for idx, name in enumerate(encoder_variables_dict):
            print((str(idx) + " " + str(name) + " " +
                   str(encoder_variables_dict[name].shape)) + " " +
                  str(encoder_variables_dict[name].dtype))

        print("#################################")
        tf_encoder_result_val = sess.run(tf_encoder_result)
        op_encoder_result_val = sess.run(op_encoder_result)

        cross_check("Encoder TF v.s. FT with tensor input",
                    tf_encoder_result_val, op_encoder_result_val,
                    atol_threshold)
        ''' 
            Use the numpy array as inputs of FasterTransformer OP. 
            
            This method require more time for the op initialization (especially for FP16), 
            but the inference time would be little faster than using tensor as input. 
        '''
        encoder_variables_dict_2 = {}
        for var, val in zip(encoder_vars, sess.run(encoder_vars)):
            encoder_variables_dict_2[var.name] = val

        # op_encoder_result_2 = op_encoder(inputs=from_tensor,
        #                                 encoder_args=encoder_args,
        #                                 attention_mask=attention_mask,
        #                                 encoder_vars_dict=encoder_variables_dict_2,
        #                                 sequence_length=sequence_length)
        # op_encoder_result_val_2 = sess.run(op_encoder_result_2)
        # cross_check("Encoder TF v.s. FT with numpy input", tf_encoder_result_val,
        #             op_encoder_result_val_2, atol_threshold)

        if args_dict['test_time'] == 1:

            ite = 50
            tf_time = time_test(sess, tf_encoder_result, ite)
            op_time = time_test(sess, op_encoder_result, ite)
            # op_time_2 = time_test(sess, op_encoder_result_2, ite)

            print(
                "[INFO] batch_size {} max_seq_len {} {} layer TF-time {:6.2f} ms"
                .format(batch_size, max_seq_len, num_layer, tf_time))
            print(
                "[INFO] batch_size {} max_seq_len {} {} layer FT-OP-tensor-time {:6.2f} ms"
                .format(batch_size, max_seq_len, num_layer, op_time))
            # print("[INFO] batch_size {} max_seq_len {} {} layer FT-OP-numpy-time {:6.2f} ms".format(batch_size, max_seq_len, num_layer, op_time_2))

        return (tf_encoder_result_val.reshape([-1]) -
                op_encoder_result_val.reshape([-1])).max()
コード例 #7
0
    tf_cand_emb = tf.reshape(tf_cand_emb, (batch_size, res_cnt, hidden_dim))

    tf_ctx_emb, _ = dot_product_attention(tf_cand_emb, tf_embs, tf_embs, tf.estimator.ModeKeys.PREDICT)
    tf_dot_product = tf.reduce_sum(tf_ctx_emb * tf_cand_emb ,axis=-1)





    encoder_variables = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
    '''
    for variables in encoder_variables:
        print(variables)
    '''
    op_ctx_out = op_encoder(inputs=input_a,
                                   encoder_args=encoder_args,
                                   encoder_vars=encoder_variables,
                                   attention_mask=mask_a)
    # with tf.variable_scope("poly_ctx", reuse=tf.AUTO_REUSE):
    #     pm_embedding_table = tf.get_variable(
    #         name="poly_embedding",
    #         shape=[poly_m, hidden_dim],
    #         dtype=tf_datatype,
    #         initializer=tf.truncated_normal_initializer(stddev=initializer_range, dtype=tf_datatype))
    #     poly_code_ids = tf.range(poly_m, dtype=tf.int32)
    #     poly_code_ids = tf.tile(tf.expand_dims(poly_code_ids,0),(batch_size,1))
    #     poly_codes = tf.gather(pm_embedding_table, poly_code_ids)
    transformer_op_module = tf.load_op_library(os.path.join('./lib/libtf_fastertransformer.so'))
    op_embs = transformer_op_module.attention(poly_codes, op_ctx_out, op_ctx_out,
                batch_size=batch_size,q_seq_len=poly_m,hidden_size=hidden_dim,k_seq_len=seq_len,attention_type=1
              )
    # op_embs, _ = dot_product_attention(poly_codes, op_ctx_out, op_ctx_out, tf.estimator.ModeKeys.PREDICT)