def combine_results(files, diff=False):
    all_results = {}
    for f in files:
        data = pickle.load(gfile.FastGFile(f, 'rb'))
        for k in data:
            if isinstance(k, tuple):
                data[k].pop('noisy_targets')
                data[k].pop('indices')
                data[k].pop('selected_inds')
                data[k].pop('sampler_output')
                key = list(k)
                seed = key[-1]
                key = key[0:10]
                key = tuple(key)
                if key in all_results:
                    if seed not in all_results[key]['random_seeds']:
                        all_results[key]['random_seeds'].append(seed)
                        for field in [f for f in data[k] if f != 'n_points']:
                            all_results[key][field] = np.vstack(
                                (all_results[key][field], data[k][field]))
                else:
                    all_results[key] = data[k]
                    all_results[key]['random_seeds'] = [seed]
            else:
                all_results[k] = data[k]
    return all_results
def extract(input, prefix):
    with gfile.FastGFile(
            r"D:\Users\yl_gong\Desktop\dl\mobilenet_v2_0.35_224\mobilenet_v2_0.35_224_frozen.pb",
            'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        tf.import_graph_def(graph_def,
                            input_map={"input:0": input},
                            name=prefix)
        #print(tf.get_default_graph().get_operation_by_name("import/input"))
        # for i in tf.get_default_graph().get_operations():
        #     print(i.name)

    #input = tf.get_default_graph().get_tensor_by_name(prefix+"/input:0")
    contenttensor = tf.get_default_graph().get_tensor_by_name(prefix + "/" +
                                                              content)
    styles = []
    for t in styletensors:
        styles.append(tf.get_default_graph().get_tensor_by_name(prefix + "/" +
                                                                t))
    return contenttensor, styles
    # input = tf.get_default_graph().get_tensor_by_name("input:0")
    # output = tf.get_default_graph().get_tensor_by_name("import/MobilenetV2/Predictions/Reshape_1:0")
    # sess = tf.Session()
    # print(sess.run(output,feed_dict={input:im}))


#extract(tf.placeholder(dtype='float32', shape=(None, 224, 224, 3),name="input"))
Esempio n. 3
0
def load_graph(pb_path):
    '''
    载入用于提取特征的模型图
    :param pb_path: pb文件路径
    :return: None
    '''
    with gfile.FastGFile(pb_path, 'rb') as fd:
        graph = tf.GraphDef()
        graph.ParseFromString(fd.read())
        _ = tf.import_graph_def(graph, name='')
Esempio n. 4
0
def load_pretrained_inception_v3(model_file):
    '''
    导入预训练好的计算图(这里需要重点关注一下)
    :param model_file: 计算图路径
    :return: none
    '''
    with gfile.FastGFile(model_file, 'rb') as f:
        graph_def = tf.GraphDef()  # 建立一个空的计算图
        graph_def.ParseFromString(f.read())  # 将文件内容解析到这个空的计算图中去

        _ = tf.import_graph_def(graph_def, name='')  # 将计算图导入到默认的计算图中去
Esempio n. 5
0
    def __init__(self):
        """Creates and loads pretrained deeplab model."""
        self.graph = tf.Graph()

        graph_def = None
        with gfile.FastGFile(self.FROZEN_GRAPH_NAME, 'rb') as file_handle:
            graph_def = tf.GraphDef.FromString(file_handle.read())

        if graph_def is None:
            raise RuntimeError('Cannot find inference graph in tar archive.')

        with self.graph.as_default():
            tf.import_graph_def(graph_def, name='')

        self.sess = tf.Session(graph=self.graph)
Esempio n. 6
0
def img_to_vector(base_dir):
    with tf.Session() as sess:
        second_to_last_tensor = sess.graph.get_tensor_by_name("pool_3/_reshape:0")
        batch_features = []
        test_data = []
        test_labels = []
        for i in os.listdir(base_dir):
            img = cv2.imread(os.path.join(base_dir, i))
            img = cv2.resize(img, (448, 448))  # 重置
            img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)  # 转通道
            test_data.append(img)
            test_labels.append(i.split("_")[0])
            img_data = gfile.FastGFile(os.path.join(base_dir, i), "rb").read()  # 读取图片
            feature_vector = sess.run(second_to_last_tensor, feed_dict={"DecodeJpeg/contents:0": img_data})
            batch_features.append(feature_vector)
        batch_features = np.vstack(batch_features)
    return batch_features, test_data, test_labels
Esempio n. 7
0
def convert_voice(wav, sampling_rate=22050, frame_period=5.0, n_frames=128):

    print('Generating Validation Data B from A...')
    wav = wav_padding(wav=wav,
                      sr=sampling_rate,
                      frame_period=frame_period,
                      multiple=4)
    f0, timeaxis, sp, ap = world_decompose(wav=wav,
                                           fs=sampling_rate,
                                           frame_period=frame_period)
    f0_converted = pitch_conversion(f0=f0,
                                    mean_log_src=log_f0s_mean_A,
                                    std_log_src=log_f0s_std_A,
                                    mean_log_target=log_f0s_mean_B,
                                    std_log_target=log_f0s_std_B)
    coded_sp = world_encode_spectral_envelop(sp=sp,
                                             fs=sampling_rate,
                                             dim=num_mcep)
    coded_sp_transposed = coded_sp.T
    coded_sp_norm = (coded_sp_transposed - coded_sps_A_mean) / coded_sps_A_std

    with tf.Session() as sess:
        with gfile.FastGFile(join(pretrain_dir, "graph.pb"), 'rb') as f:
            graph_def = tf.GraphDef()
            graph_def.ParseFromString(f.read())
            _ = tf.import_graph_def(graph_def, name='')
            coded_sp_converted_norm = sess.run(
                'generator_A2B/out_squeeze:0',
                feed_dict={'input_A_real:0': np.array([coded_sp_norm])})[0]

    if coded_sp_converted_norm.shape[1] > len(f0):
        coded_sp_converted_norm = coded_sp_converted_norm[:, :-1]
    coded_sp_converted = coded_sp_converted_norm * coded_sps_B_std + coded_sps_B_mean
    coded_sp_converted = coded_sp_converted.T
    coded_sp_converted = np.ascontiguousarray(coded_sp_converted)
    decoded_sp_converted = world_decode_spectral_envelop(
        coded_sp=coded_sp_converted, fs=sampling_rate)
    wav_transformed = world_speech_synthesis(f0=f0_converted,
                                             decoded_sp=decoded_sp_converted,
                                             ap=ap,
                                             fs=sampling_rate,
                                             frame_period=frame_period)

    return wav_transformed
Esempio n. 8
0
num_batches = int(len(all_img_names)/batch_size)
if len(all_img_names) % batch_size!=0:
    num_batches += 1

with tf.Session() as sess:
    second_to_last_tensor = sess.graph.get_tensor_by_name("pool_3:0")
    for i in range(num_batches):
        all_img_namess = list (all_img_names)
        batch_img_names = all_img_namess[i*batch_size: (i+1)*batch_size]
        batch_features = []
        for img_name in batch_img_names:
            img_path = os.path.join(input_img_dir, img_name)
            logging.info('processing img %s' %img_name)
            if not gfile.Exists(img_path):
                continue
            img_data = gfile.FastGFile(img_path, 'rb').read()
            feature_vector = sess.run(second_to_last_tensor,
                                      feed_dict={"DecodeJpeg/contents:0":img_data})

            batch_features.append(feature_vector)

        batch_features = np.vstack(batch_features)
        output_filename = os.path.join(output_folder,
                                       "image_features_%d.pickle" %i)
        #logging.info('writing to file %s' %output_filename)

        with gfile.GFile(output_filename,'w') as f:
            pickle.dump((batch_img_names, batch_features), f)

Esempio n. 9
0
def create_image_lists(sess, testing_percentage, validation_percentage): 
    sub_dirs = [x[0] for x in os.walk(INPUT_DATA)] 
    is_root_dir = True
    # 初 始 化 各 个 数 据 集 
    training_images = [] 
    training_labels = [] 
    testing_images = [] 
    testing_labels = [] 
    validation_images = [] 
    validation_labels = [] 
    current_label = 0

    # 读 取 所 有 的 子 目 录 
    for sub_dir in sub_dirs: 
        if is_root_dir: 
            is_root_dir= False 
            continue
        # 获 取 一 个 子 目 录 中 所 有 的 图 片 文 件 
        extensions= ['jpg', 'jpeg', 'JPG', 'JPEG'] 
        file_list= [] 
        dir_name= os.path.basename(sub_dir) 
        for extension in extensions: 
            file_glob = os.path.join(INPUT_DATA, dir_name, '*.' + extension) 
            #glob.glob : 返 回 所 有 匹 配 的 文 件 路 径 列 表 
            file_list.extend(glob.glob(file_glob)) 
        if not file_list: 
            continue 
        size = 0
        # 处 理 图 片 数 据 
        for file_name in file_list: 
            # 读 取 并 解 析 图 片 , 将 图 片 转 化 为 299x299 以 便 inception-v3 模 型 来 处 理 
            image_raw_data= gfile.FastGFile(file_name, 'rb').read() 
            image = tf.image.decode_jpeg(image_raw_data) 
            if image.dtype!= tf.float32: 
                image = tf.image.convert_image_dtype(image, dtype=tf.float32) 
            image = tf.image.resize_images(image, [224, 224]) 
            image_value= sess.run(image)
            # 随 机 划 分 数 据 集 
            chance = np.random.randint(100) 
            if chance < validation_percentage: 
                validation_images.append(image_value) 
                validation_labels.append(current_label) 
            elif chance < (testing_percentage+ validation_percentage): 
                testing_images.append(image_value) 
                testing_labels.append(current_label)
            else:  
                training_images.append(image_value) 
                training_labels.append(current_label) 
            size+= 1 
            # 每 种 花 只 取 100 张 图 片 , 这 样 可 以 缩 短 程 序 的 运 行 时 间 
            if size== 100: 
                break 
        current_label+= 1 
        print("current_label: %d" % current_label) 

    # 将 训 练 数 据 随 机 打 乱 以 获 得 更 好 的 训 练 效 果 
    state= np.random.get_state() 
    np.random.shuffle(training_images) 
    np.random.set_state(state) 
    np.random.shuffle(training_labels) 
    return np.asarray([training_images, training_labels,validation_images, validation_labels,testing_images, testing_labels])
# map classes to 12 wanted classes:
# 'silence unknown', 'stop down off right up go on yes left no'
# models were trained with 32 classes (including the known unknowns):
# 'silence unknown', 'sheila nine stop bed four six down bird marvin cat off right seven eight up three happy go zero on wow dog yes five one tree house two left no'  # noqa
# Note: This is NOT simply summing up the probabilities for
# the unknown classes (even though it would sum up to 1).
mapped_classes, unknown_classes = [], []
mapped_classes.append(all_probs[0])  # silence
unknown_classes.append(all_probs[1])  # unknown unknown
# this is safe as we defined them in the same order
# (e.g. down comes before stop)
for i, c in enumerate(all_classes):
  if c in wanted_classes:
    mapped_classes.append(all_probs[i + 2])
  else:
    unknown_classes.append(all_probs[i + 2])

unknown_classes = tf.stack(unknown_classes)
mapped_classes = [mapped_classes[0], tf.reduce_max(unknown_classes)] + \
    mapped_classes[1:]
mapped_probs = tf.nn.softmax(tf.stack(mapped_classes), name=FINAL_TENSOR_NAME)

frozen_graph_def = graph_util.convert_variables_to_constants(
    sess, sess.graph.as_graph_def(),
    [FINAL_TENSOR_NAME])

with gfile.FastGFile(FROZEN_PATH, 'wb') as f:
  f.write(frozen_graph_def.SerializeToString())

print("Wrote frozen graph to: %s" % FROZEN_PATH)
    num_batches += 1  # 将多余的文件使用一个batch
with tf.Session() as sess:
    second_to_last_tensor = sess.graph.get_tensor_by_name(
        "pool_3/_reshape:0"
    )  # Tensor("pool_3/_reshape:0", shape=(1, 2048), dtype=float32)
    # dev_summary_writer = tf.summary.FileWriter(r'C:\Users\Tim\Desktop\transfer_learning(精简)\summary',graph=sess.graph)
    # exit()
    for i in range(num_batches):
        batch_img_names = all_img_names[i * batch_size:(i + 1) *
                                        batch_size]  # 按批量去除name
        batch_labels = all_img_labels[i * batch_size:(i + 1) *
                                      batch_size]  # 按批量去除name
        batch_features = []
        for img_name in batch_img_names:
            img_path = os.path.join(input_img_dir, img_name)
            if not os.path.exists(img_path):
                raise Exception("%s doesn't exists" % img_path)
            img_data = gfile.FastGFile(img_path, "rb").read()  # 读取图片
            feature_vector = sess.run(
                second_to_last_tensor,
                feed_dict={"DecodeJpeg/contents:0": img_data})
            batch_features.append(feature_vector)
        batch_features = np.vstack(batch_features)
        output_filename = os.path.join(output_folder,
                                       "image_features-%d.pickle" % i)
        print(output_filename, "....ok......")
        with gfile.GFile(output_filename, 'w') as f:  # 打开一个文件
            pickle.dump((batch_img_names, batch_features, batch_labels),
                        f)  # 将数据保存在文件中
    #     # 图像预处理完成
Esempio n. 12
0
def load_pretrained_inception_v3(model_file):
    with gfile.FastGFile(model_file, "rb") as f:
        graph_def = tf.GraphDef()  # 构造一个空的图
        graph_def.ParseFromString(f.read())  # 将计算图读取进来
        _ = tf.import_graph_def(graph_def, name="")  # 将图导入到默认图
Esempio n. 13
0
output_pred = tf.pad(output_pred, ((0, 0), (CROP_HEIGHT, 0), (0, 0)))
# output_pred = (1.0 - output_pred)
output_pred *= ALPHA
output_pred = tf.stack((output_pred, output_pred, output_pred), axis=-1)
blended_pred = tf.add((1.0 - output_pred) * rgb_preview_input,
                      output_pred * ROAD_COLOR,
                      name=FINAL_TENSOR_NAME)

names = [n.name for n in sess.graph.as_graph_def().node]

tf.summary.FileWriter('ok', graph=sess.graph)
# embed()
optimized_graph_def = graph_util.convert_variables_to_constants(
    sess, sess.graph.as_graph_def(), [FINAL_TENSOR_NAME])

with gfile.FastGFile(FREEZED_PATH, 'wb') as f:
    f.write(optimized_graph_def.SerializeToString())

print("Starting graph optimization ... ")
transforms = [
    'strip_unused_nodes(type=float, shape="1,160,576,3")',
    'remove_nodes(op=Identity, op=CheckNumerics)',
    'fold_constants(ignore_errors=false)', 'fold_batch_norms',
    'fuse_resize_pad_and_conv', 'fuse_resize_and_conv', 'fuse_pad_and_conv',
    'fold_old_batch_norms', 'remove_device', 'round_weights(num_steps=256)',
    'strip_unused_nodes'
]

for transform in transforms:
    try:
        print("Starting transform: `%s` ... " % transform)
Esempio n. 14
0
                    queuesize = 1
                else:
                    queuesize = args.queuesize

                q = queue.Queue(maxsize=queuesize)
                qin = queue.Queue(maxsize=queuesize)
                event = Event()

                # Load DNN model
                precision = args.precision
                fullmodeldir = directory + modeldir
                print("Loading model from " + fullmodeldir + "/Gmodel")
                if frontend == 'tensorflow':
                    sess = Session()
                    graph_def = GraphDef()
                    with gfile.FastGFile(fullmodeldir + '/Gmodel.pb',
                                         'rb') as f:
                        graph_def.ParseFromString(f.read())
                        import_graph_def(graph_def, name='')
                    output_layer = 'g_output/Reshape:0'
                    for n in graph_def.node:
                        if n.op == 'Placeholder':
                            input_node = n.name + ':0'
                    prob_tensor = sess.graph.get_tensor_by_name(output_layer)
                    del n, graph_def, output_layer
                else:
                    g_opt = Adam(lr=0.0002)  # Define optimizers
                    json_file = open(fullmodeldir + "/Gmodel.json", "r")
                    loaded_model_json = json_file.read()
                    json_file.close()
                    G_loaded = model_from_json(loaded_model_json)
                    G_loaded.compile(loss='mean_squared_error',
from train import deepnn

K.set_session(sess)
K.set_learning_phase(0)

INPUT_TENSOR_NAME = 'tl_classifier_in'
FINAL_TENSOR_NAME = 'tl_classifier_out/Softmax'
FREEZED_PATH = 'tf_files/frozen_classifier.pb'

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--weight_path",
                        type=str,
                        default='ckpt/some.hdf5',
                        help="Path to hdf5 weight file.")
    args = parser.parse_args()

    if os.path.exists('tf_files'):
        shutil.rmtree('tf_files')
    os.makedirs('tf_files')

    model = deepnn()
    model.load_weights(args.weight_path)
    frozen = graph_util.convert_variables_to_constants(
        sess, sess.graph.as_graph_def(), [FINAL_TENSOR_NAME])

    with gfile.FastGFile(FREEZED_PATH, 'wb') as f:
        f.write(frozen.SerializeToString())

    print("Done! Wrote graph to `tf_files`")
Esempio n. 16
0
def load_pretrained_inception_v3(model_file):
    with gfile.FastGFile(model_file,'rb') as f:
        graph_def = tf.GraphDef()
        graph_def.ParseFromString(f.read())
        _ = tf.import_graph_def(graph_def, name="")
Esempio n. 17
0
 def _read_pb(pb_dir):
     with gfile.FastGFile(pb_dir, 'rb') as f:
         graph_def = tf.GraphDef()
         graph_def.ParseFromString(f.read())
     return graph_def
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data_tensor',
                        type=str,
                        default='decoded_sample_data',
                        help="""\
      Input data tensor name. Leave as is for the
      competition.\
      """)
    parser.add_argument('--final_tensor',
                        type=str,
                        default='labels_softmax',
                        help="""\
      Name of the softmax output tensor. Leave as is for the
      competition.\
      """)
    parser.add_argument('--frozen_path',
                        type=str,
                        default='tf_files/frozen.pb',
                        help="""\
      The frozen graph's filename.\
      """)
    parser.add_argument('--checkpoint_path',
                        type=str,
                        default='checkpoints_106/ep-062-vl-0.1815.hdf5',
                        help="""\
      Path to the hdf5 checkpoint that you want to freeze.\
      """)
    args, unparsed = parser.parse_known_args()
    custom_objects = {
        'relu6': relu6,
        'DepthwiseConv2D': DepthwiseConv2D,
        'overlapping_time_slice_stack': overlapping_time_slice_stack,
        'softmax': softmax,
        '<lambda>': smooth_categorical_crossentropy
    }

    model = load_model(args.checkpoint_path, custom_objects=custom_objects)

    # rename placeholders for special prize:
    # https://www.kaggle.com/c/tensorflow-speech-recognition-challenge#Prizes
    # decoded_sample_data:0, taking a [16000, 1] float tensor as input,
    # representing the audio PCM-encoded data.
    # `decode_wav` will produce two outputs. tf names them: 'name:0', 'name:1'.
    wav_filename_placeholder_ = tf.placeholder(tf.string, [], name='wav_fn')
    wav_loader = io_ops.read_file(wav_filename_placeholder_)
    wav_decoder = contrib_audio.decode_wav(wav_loader,
                                           desired_channels=1,
                                           desired_samples=16000,
                                           name=args.data_tensor)

    # add batch dimension and remove last one
    # keras model wants (None, 16000)
    data_reshaped = tf.reshape(wav_decoder.audio, (1, -1))
    # call keras model
    softmax_probs = model(data_reshaped)
    # remove batch dimension
    softmax_probs = tf.reshape(softmax_probs, (-1, ), name=args.final_tensor)

    frozen_graph_def = graph_util.convert_variables_to_constants(
        sess, sess.graph.as_graph_def(), [args.final_tensor])

    with gfile.FastGFile(args.frozen_path, 'wb') as f:
        f.write(frozen_graph_def.SerializeToString())

    print("Wrote frozen graph to: %s" % args.frozen_path)
Esempio n. 19
0
import numpy as np
import tensorflow as tf
import tensorflow.gfile as gfile
import PIL.Image as Image

# dd = tf.import_graph_def(r"D:\Users\yl_gong\Desktop\dl\mobilenet_v2_0.35_224\mobilenet_v2_0.35_224_frozen.pb")
# print(dd)

im = Image.open(
    r'D:\Users\yl_gong\Desktop\dl\voc\VOCtest_06-Nov-2007\VOCdevkit\VOC2007\JPEGImages\000542.jpg'
)
im = im.resize((224, 224), Image.ANTIALIAS)
im = np.expand_dims(np.array(im) / 255., axis=0)

with gfile.FastGFile(
        r"D:\Users\yl_gong\Desktop\dl\mobilenet_v2_0.35_224\mobilenet_v2_0.35_224_frozen.pb",
        'rb') as f:
    graph_def = tf.GraphDef()
    graph_def.ParseFromString(f.read())
    tf.import_graph_def(graph_def)

input = tf.get_default_graph().get_tensor_by_name("import/input:0")
output = tf.get_default_graph().get_tensor_by_name(
    "import/MobilenetV2/Predictions/Reshape_1:0")