コード例 #1
0
def classifier(image_path):
    g1 = tf.Graph()
    with g1.as_default():
        test_input = tf.placeholder(tf.float32, shape=[None, 4096])
        # 加入一个256维的全连接的层
        fc = tf.contrib.layers.fully_connected(test_input, 256)
        # 加入一个5维的全连接层
        logits = tf.contrib.layers.fully_connected(fc, 5, activation_fn=None)
        predicted = tf.nn.softmax(logits)
        saver = tf.train.Saver()
        with tf.Session() as sess:
            vgg = vgg16.Vgg16()
            input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
            with tf.name_scope("content_vgg"):
                # 载入VGG16模型
                vgg.build(input_)
                ckpt = tf.train.get_checkpoint_state('checkpoints')
                saver.restore(sess, ckpt.model_checkpoint_path)
                img = utils.load_image(image_path)
                img = img.reshape((1, 224, 224, 3))
                test_batch = []
                test_batch.append(img)
                image_one = np.concatenate(test_batch)
                feed1 = {input_: image_one}
                img_feature = sess.run(vgg.relu6, feed_dict=feed1)
                feed = {test_input: img_feature}
                res = sess.run(predicted, feed_dict=feed)
                test_acc = sess.run(predicted, feed_dict=feed)
            return np.argmax(test_acc)
コード例 #2
0
 def __init__(self):
     self.vgg = vgg16.Vgg16()
     self.output_shape_1 = tf.placeholder(dtype=tf.int32, shape=[4])
     self.output_shape_2 = tf.placeholder(dtype=tf.int32, shape=[4])
     self.batch = cfg.batch_size
     self.sub_graph1 = tf.Graph()
     self.sub_graph2 = tf.Graph()
     self.sub_graph3 = tf.Graph()
     self.switch_graph = tf.Graph()
コード例 #3
0
def everride_relu(name, images, path_vgg):
    """
    override ReLU for guided backpropagation
    """
    g = tf.get_default_graph()
    with g.gradient_override_map({'Relu': name}):
        model = vgg16.Vgg16(vgg16_npy_path=path_vgg)
        with tf.name_scope("content_vgg_gbp"):
            model.build(images)
    return model
コード例 #4
0
ファイル: cpcnn.py プロジェクト: akira-l/counting-project
 def __init__(self, stage='train'):
     self.vgg = vgg16.Vgg16()
     self.output_shape_1 = tf.placeholder(dtype=tf.int32, shape=[4])
     self.output_shape_2 = tf.placeholder(dtype=tf.int32, shape=[4])
     self.batch = cfg.batch_size
     if stage=='test':
         self.batch = 1
     self.stage = stage
     self.test_x = 300#cfg.size_x
     self.test_y = 300#cfg.size_y
     self.train_x = cfg.train_size_x
     self.train_y = cfg.train_size_y
コード例 #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('input_image', type=str, default='/path/to/image', help='path to image.')
    parser.add_argument('vgg16_path', type=str, default='/path/to/vgg16.npy', help='path to vgg16.npy.')
    parser.add_argument('--top_n', type=int, default=3, help="Grad-CAM for top N predicted classes.")
    args = parser.parse_args()
    print(args)

    input_image = utils.load_image(args.input_image) # tf RGB
    image_batch = input_image[None, :, :, :3]

    graph = tf.Graph()
    sess = tf.InteractiveSession(graph=graph)
    with tf.device('/cpu:0'):
        images = tf.placeholder("float", [None, 224, 224, 3])
        model = vgg16.Vgg16(vgg16_npy_path=args.vgg16_path)
        with tf.name_scope("content_vgg"):
            model.build(images)

    path_synset = os.path.join(os.path.dirname(vgg16.__file__), "synset.txt")
    prob = sess.run(model.prob, feed_dict={images: image_batch})
    infos = get_info(prob[0], path_synset, top_n=args.top_n)
    for rank, info in enumerate(infos):
        print("{}: class id: {}, class name: {}, probability: {:.3f}, synset: {}".format(rank, *info))

    # GRAD-CAM
    for i in range(args.top_n):
        class_id = infos[i][0]
        class_name = infos[i][1]
        prob = infos[i][2]
        cams = grad_cam(model, class_id, "content_vgg/conv5_3/Relu", sess, feed_dict={images: image_batch})

        save_cam(cams, i, class_id, class_name, prob, image_batch, args.input_image)

    # Guided Backpropagation
    register_gradient()

    del model
    images = tf.placeholder("float", [None, 224, 224, 3])

    guided_model = everride_relu('GuidedBackPropReLU', images, args.vgg16_path)
    class_id = infos[0][0]
    class_saliencies = saliency_by_class(guided_model, images, class_id, nb_classes=1000)
    class_saliency = sess.run(class_saliencies, feed_dict={images: image_batch})[0][0]

    class_saliency = class_saliency - class_saliency.min()
    class_saliency = class_saliency / class_saliency.max() * 255.0
    base_path, ext = os.path.splitext(args.input_image)
    gbprop_path = "{}_{}{}".format(base_path, "guided_bprop", ext)
    cv2.imwrite(gbprop_path, class_saliency.astype(np.uint8))
コード例 #6
0
def extract_training_features():
    data_dir = 'data_gen/'
    contents = os.listdir(data_dir)
    classes = [each for each in contents if os.path.isdir(data_dir + each)]

    batch_size = 10
    codes_list = []
    labels = []
    batch = []
    codes = None

    with tf.Session() as sess:
        # Construct VGG16 object
        vgg = vgg16.Vgg16()
        input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
        with tf.name_scope("content_vgg"):
            # build VGG16 model
            vgg.build(input_)

        # for every kind of flower, use VGG16 to calculate its feature
        for each in classes:
            print("Starting {} images".format(each))
            class_path = data_dir + each
            files = os.listdir(class_path)
            for ii, file in enumerate(files, 1):
                # 载入图片并放入batch数组中
                img = utils.load_image(os.path.join(class_path, file))
                batch.append(img.reshape((1, 224, 224, 3)))
                labels.append(each)

                # 如果图片数量到了batch_size则开始具体的运算
                if ii % batch_size == 0 or ii == len(files):
                    images = np.concatenate(batch)

                    feed_dict = {input_: images}
                    # 计算特征值
                    codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)

                    # 将结果放入到codes数组中
                    if codes is None:
                        codes = codes_batch
                    else:
                        codes = np.concatenate((codes, codes_batch))

                    # 清空数组准备下一个batch的计算
                    batch = []
                    print('{} images processed'.format(ii))

    np.save("train_features.npy", codes)
    np.save("train_labels.npy", np.array(labels))
コード例 #7
0
def get_vegelabel_from_b64str(b64str):
    with tf.Graph().as_default() as g:
        with tf.Session() as sess:
            vgg = vgg16.Vgg16()
            input_test = tf.placeholder(tf.float32,
                                        shape=[None, 224, 224, 3],
                                        name='input_test')
            inputs_fea = tf.placeholder(tf.float32,
                                        shape=[None, 4096],
                                        name='inputs_fea')

            with tf.name_scope("content_vgg"):
                # 先准备数据

                batch_list = []
                image, image_path = get_binstr_by_resized_img(b64str)
                batch_list.append(image.reshape((1, 224, 224, 3)))
                test_images = np.concatenate(batch_list, 0)

                vgg.build(input_test)
                feature_codes = sess.run(vgg.relu6,
                                         feed_dict={input_test: test_images})
                print(feature_codes)
                #             print ("feature_codes.shape" , feature_codes.shape)
                assert feature_codes.shape == (2, 4096)

            # 加入一个256维的全连接的层
            fc = tf.contrib.layers.fully_connected(inputs_fea, 256)
            logits = tf.contrib.layers.fully_connected(fc,
                                                       3,
                                                       activation_fn=None)
            predicted = tf.nn.softmax(logits, name='predicted')

            ###  创建saver对象时,必须要定义图结构,如上面的fc logits  predicted
            saver = tf.train.Saver()
            saver.restore(sess, "checkpoints/zsy.ckpt")  # 注意这里只恢复变量值
            ## 需指定要恢复的操作符或张量, 读取到对应的graph对象中

            #         g = tf.get_default_graph()
            #         g.get_tensor_by_name('predicted:0')
            prob_op = g.get_operation_by_name('predicted')
            pred_result = sess.run(predicted,
                                   feed_dict={inputs_fea: feature_codes})
            print(pred_result)
            result_list = tf.argmax(pred_result, 1).eval()
            print(result_list)
            print(type(result_list))
            for v in result_list:
                return (vege_dict[v])
コード例 #8
0
def create_data():
    data_dir = r'flower_photos/'
    contents = os.listdir(data_dir)
    classes = [each for each in contents if os.path.isdir(data_dir + each)]

    batch_size = 10
    codes_list = []  # 存放特征值
    labels = []  # 存放花的类别
    batch = []  # 存放图片数据
    codes = None

    with tf.Session() as sess:
        vgg = vgg16.Vgg16()
        input_ = tf.placeholder(tf.float32,[None,224,224,3])
        with tf.name_scope('content_vgg'):
            vgg.build(input_)

        #为每个不同类别的花分别用vgg16计算特征值
        for each in classes:
            print("starting {} images".format(each))
            class_path = data_dir+each
            files = os.listdir(class_path)
            #枚举每种花的文件夹中图片
            for ii,file in enumerate(files,1):
                img = utils.load_image(os.path.join(class_path,file))
                batch.append(np.reshape(img,(1,224,224,3)))
                labels.append(each)

                if ii%batch_size==0 or ii==len(files):
                    images = np.concatenate(batch)   #s数组拼接
                    feed_dict = {input_:images}

                    #计算特征值
                    codes_batch = sess.run(vgg.relu6,feed_dict=feed_dict)         #得到第一层全连接层之后的特征值

                    #将结果加入到codes数组中
                    if codes is None:
                        codes = codes_batch
                    else:
                        codes = np.concatenate((codes,codes_batch))
                    batch=[]
                    print('{} images processed'.format(ii))
    return codes,labels
コード例 #9
0
    def setup_classifiers(self):
        """
        Setup the whole network
        """
        self.inputs_ = tf.placeholder(tf.float32,
                                      shape=[None, 224, 224, 3],
                                      name='inputs')
        self.labels_ = tf.placeholder(tf.float32,
                                      shape=[None, self._classifier_count],
                                      name='labels')
        self.learning_rate_ = tf.placeholder(tf.float32, name='learning_rate')
        self.dropout_ratio_ = tf.placeholder(tf.float32, name='dropout_ratio')
        self.smooth_ = tf.placeholder(tf.float32, name='smooth')

        # load VGG
        vgg_weight_utils.download_vgg_parameter_file()
        self.vgg = vgg16.Vgg16(vgg16_npy_path='./data/vgg16.npy',
                               trainable=self._train_vgg
                               )  # initiates weights from vgg16.npy by default
        with tf.variable_scope('vgg'):
            self.vgg.build(self.inputs_)

        # set up a classifier for each category
        self.logits = []
        self.predictions = []
        self.accuracies = []
        self.losses = []
        for c_index in range(self._classifier_count):
            c_logits, c_prediction, c_loss, c_accuracy = self.single_classifier(
                self.vgg.relu6, c_index)

            self.logits.append(c_logits)
            self.predictions.append(c_prediction)
            self.losses.append(c_loss)
            self.accuracies.append(c_accuracy)

        self.loss = tf.add_n(self.losses)
        self.accuracy = tf.add_n(self.accuracies) / float(
            self._classifier_count)

        self.optimizer = tf.train.AdamOptimizer(self.learning_rate_).minimize(
            self.loss)
コード例 #10
0
def extract_testing_features():
    batch_size = 10
    batch = []
    codes = None

    df_test = pd.read_csv('./sample_submission.csv')

    with tf.Session() as sess:
        # Construct VGG16 object
        vgg = vgg16.Vgg16()
        input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
        with tf.name_scope("content_vgg"):
            # build VGG16 model
            vgg.build(input_)

        files = df_test['id'].values
        for ii, file in enumerate(files, 1):
            # 载入图片并放入batch数组中
            img = utils.load_image('./test/{}.jpg'.format(file))
            batch.append(img.reshape((1, 224, 224, 3)))

            # 如果图片数量到了batch_size则开始具体的运算
            if ii % batch_size == 0 or ii == len(files):
                images = np.concatenate(batch)

                feed_dict = {input_: images}
                # 计算特征值
                codes_batch = sess.run(vgg.relu6, feed_dict=feed_dict)

                # 将结果放入到codes数组中
                if codes is None:
                    codes = codes_batch
                else:
                    codes = np.concatenate((codes, codes_batch))

                # 清空数组准备下一个batch的计算
                batch = []
                print('{} images processed'.format(ii))

    np.save("test_features.npy", codes)
コード例 #11
0
def similarity_net(images, targets=None, lr=None):

    vgg = vgg16.Vgg16(vgg16_npy_path='models/vgg16.npy')
    with tf.name_scope("vgg_body"):
        vgg.build(images)

    feat_layers = vgg.fc7
    feat_dim = feat_layers.get_shape().as_list()

    base_feat, comparisions_feat = tf.split(feat_layers, [1, -1], 0)

    simi_feat = tf.square(comparisions_feat - base_feat)
    out = tf.layers.dense(inputs=simi_feat,
                          units=4096,
                          activation=tf.nn.sigmoid)
    out = tf.layers.dense(inputs=simi_feat,
                          units=1024,
                          activation=tf.nn.sigmoid)
    out = tf.layers.dense(inputs=out, units=1, activation=tf.nn.sigmoid)

    if lr:
        with tf.name_scope("train_ops"):
            loss = tf.reduce_sum(tf.square(out - targets))
            # loss = -tf.reduce_sum(targets*tf.log(tf.clip_by_value(out,1e-10,1.0)))
            # loss = -tf.reduce_sum(targets*tf.log(out))

            train_op = tf.train.AdamOptimizer(lr).minimize(loss)
            # train_op = tf.train.GradientDescentOptimizer(lr).minimize(loss)

        with tf.name_scope("evaluation"):
            prediction = tf.round(out)
            predictions_correct = tf.cast(tf.equal(prediction, targets),
                                          tf.float32)
            accuracy = tf.reduce_mean(predictions_correct)

        return out, train_op, loss, accuracy
    else:
        return out
コード例 #12
0
# -----------------------------------------------------------
# Create transfer codes for each image and store in files 'codes, labels'
batch_size = 10
codes_list = []
labels = []
batch = []

codes = None

print('_' * 50)
print('Creating transfer codes:')
print('_' * 50)

with tf.Session() as sess:
    with tf.Session() as sess:
        vgg = vgg16.Vgg16()
        input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
        with tf.name_scope("content_vgg"):
            vgg.build(input_)

    for each in classes:
        print("Starting {} images".format(each))
        class_path = data_dir + each
        files = os.listdir(class_path)
        for ii, file in enumerate(files, 1):

            # Add images to the current batch
            img = utils.load_image(os.path.join(class_path, file))
            batch.append(img.reshape((1, 224, 224, 3)))
            labels.append(each)
コード例 #13
0
def init_vgg(input_):
    vgg = vgg16.Vgg16()
    vgg.build(input_)
    return vgg
    vgg16_npy_path = const.VGG_DIR + "/vgg16.npy"

    data_dir = const.VGG_DIR + '/flower_photos/'
    contents = os.listdir(data_dir)
    classes = [each for each in contents if os.path.isdir(data_dir + each)]

    # Set the batch size higher if you can fit in in your GPU memory
    batch_size = 10
    codes_list = []
    labels = []
    batch = []

    codes = None

    with tf.Session() as sess:
        vgg = vgg16.Vgg16(vgg16_npy_path)
        input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
        with tf.name_scope("content_vgg"):
            vgg.build(input_)

        for each in classes:
            print("Starting {} images".format(each))
            class_path = data_dir + each
            files = os.listdir(class_path)
            for ii, file in enumerate(files, 1):
                # Add images to the current batch
                # utils.load_image crops the input images for us, from the center
                img = utils.load_image(os.path.join(class_path, file))
                batch.append(img.reshape((1, 224, 224, 3)))
                labels.append(each)
コード例 #15
0
def get_vgg_codes(batch_size=100,
                  data_method='tfrecords(multi-process/threads)'):
    '''
    pickle(multi-process/threads)    or  tfrecords
    '''

    with tf.Session() as sess:

        #Build vgg network
        vgg = vgg16.Vgg16()
        input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])

        with tf.name_scope('content_vgg'):
            vgg.build(input_)

        if data_method == 'pickle(multi-process/threads)':
            file_names = [
                './Font_pickle/data.pickle-' + str(i + 1) for i in range(4)
            ]
            images, labels = proc.read_pickle_to_fonts_multifiles(file_names,
                                                                  pool_size=-1,
                                                                  width=224,
                                                                  high=224)
        elif data_method == 'tfrecords(multi-process/threads)':
            file_names = [
                './Font_TFRecords/data.tfrecords-' + str(i) for i in range(10)
            ]
            images, labels = proc.read_tfrecords_to_fonts_multifiles(
                file_names, pool_size=-1, width=20, high=20, is_reshape=True)
        else:
            raise ValueError('No such data import method error !')
        batch, vgg_codes = [], None

        for i in range(len(images)):
            #resize
            print(images[i].shape)
            image_data = tf.convert_to_tensor(images[i])
            print(image_data.shape)
            image_data = tf.image.resize_images(image_data, [224, 224],
                                                method=0)
            batch.append(image_data.eval())

            if (i + 1) % batch_size == 0 or (i + 1) == len(images):
                # batch_size*224*224*1 -> batch_size*224*224*3
                feed_tensor = np.concatenate((batch, batch, batch), axis=3)
                # Get the values from the relu6 layer of the VGG network
                codes_batch = sess.run(vgg.relu6,
                                       feed_dict={input_: feed_tensor})
                # Building an array of the codes
                if vgg_codes is None:
                    vgg_codes = codes_batch
                else:
                    vgg_codes = np.concatenate((vgg_codes, codes_batch))
                # Reset to start building the next batch
                batch = []
                print('{} images processed'.format(i + 1))

    #one-hot encoder
    labels = labels[0:vgg_codes.shape[0]]
    lb = LabelBinarizer()
    lb.fit(labels)
    labels_vecs = lb.transform(labels)

    # Shuffle and split dataset
    ss_train = StratifiedShuffleSplit(n_splits=10, test_size=0.2)

    train_idx, test_idx = next(ss_train.split(vgg_codes, labels))

    train_x, train_y = vgg_codes[train_idx], labels_vecs[train_idx]
    test_x, test_y = vgg_codes[test_idx], labels_vecs[test_idx]

    ss_test = StratifiedShuffleSplit(n_splits=10, test_size=0.5)
    val_idx, test_idx = next(ss_test.split(test_x, test_y))

    val_x, val_y = test_x[val_idx], test_y[val_idx]
    test_x, test_y = test_x[test_idx], test_y[test_idx]

    # Save data
    proc.save_pickle(save_dir='./Font_pickle/',
                     save_name='train_x.p',
                     obj=train_x)
    proc.save_pickle(save_dir='./Font_pickle/',
                     save_name='train_y.p',
                     obj=train_y)
    proc.save_pickle(save_dir='./Font_pickle/', save_name='val_x.p', obj=val_x)
    proc.save_pickle(save_dir='./Font_pickle/', save_name='val_y.p', obj=val_y)
    proc.save_pickle(save_dir='./Font_pickle/',
                     save_name='test_x.p',
                     obj=test_x)
    proc.save_pickle(save_dir='./Font_pickle/',
                     save_name='test_y.p',
                     obj=test_y)
コード例 #16
0
ファイル: CNNfeatures.py プロジェクト: dsp-uga/time-paradox
 def __init__(self):
     # The graph ready
     print("Reconstructing Network")
     self.input_ = tf.placeholder(tf.float32, [None, 224, 224, 3])
     self.vgg = vgg16.Vgg16()
     self.vgg.build(self.input_)