示例#1
0
def test_walking_mask_layer(options, test_set):
    assert (options.network_mode == Net_Mode.BACKDOOR_DEF)
    assert (options.build_level == Build_Level.MASK)

    ld_paths = dict()
    root_folder = '/home/tdteach/data/mask_test_solid_rd_1000_from_10/'
    # dirs = os.walk(root_folder)
    dirs = os.listdir(root_folder)
    for d in dirs:
        tgt_id = int(d.split('_')[0])
        f_p = os.path.join(root_folder, d, 'checkpoint')
        with open(f_p, 'r') as f:
            for li in f:
                ckpt_name = li.split('"')[-2]
                ld_p = os.path.join(root_folder, d, ckpt_name)
                ld_paths[tgt_id] = ld_p
                break

    print(ld_paths)

    img_producer = dataset.ImageProducer(options, test_set)
    loader, img_op, lb_op, out_op, aux_out_op = builder.build_model(
        img_producer, options)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    mask_abs = dict()

    init_op = tf.global_variables_initializer()
    with tf.Session(config=config) as sess:
        sess.run(init_op)

        for k, v in ld_paths.items():
            print(v)
            loader.restore(sess, v)

            masks, patterns = sess.run([out_op, aux_out_op])
            mask = masks[0]
            pattern = (patterns[0] + 1.) / 2.
            mask_abs[k] = np.sum(np.abs(mask))

    img_producer.stop()

    vs = list(mask_abs.values())
    import statistics
    me = statistics.median(vs)
    abvs = abs(vs - me)
    mad = statistics.median(abvs)
    rvs = abvs / (mad * 1.4826)

    print(mask_abs)
    print(rvs)

    x_arr = [i for i in range(len(mask_abs))]

    import matplotlib.pyplot as plt
    plt.figure()
    plt.boxplot(rvs)
    plt.show()
示例#2
0
def test_prediction(options, test_set):
    assert (options.build_level == Build_Level.LOGITS)
    img_producer = dataset.ImageProducer(options, test_set)
    loader, img_op, lb_op, out_op, aux_out_op = builder.build_model(
        img_producer, options)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    acc = 0

    n_test_examples = int(300)
    e_per_iter = options.batch_size * options.num_gpus

    init_op = tf.global_variables_initializer()
    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if options.model_path is not None:
            loader.restore(sess, options.model_path)

        for k in range(n_test_examples // e_per_iter):
            a, lbs = sess.run([out_op, lb_op])
            # lbs = np.zeros(lbs.shape)
            pds = np.argmax(a, axis=1)
            print(pds[1:10])
            print(lbs[1:10])
            acc += sum(np.equal(pds, lbs))

            print(k)

    img_producer.stop()

    print("acc: %.2f%%" % (acc * 100.0 / n_test_examples))
示例#3
0
def classify(model_data_path, image_paths):
    '''Classify the given images using GoogleNet.'''

    # Get the data specifications for the GoogleNet model
    spec = models.get_data_spec(model_class=models.GoogleNet)

    # Create a placeholder for the input image
    input_node = tf.placeholder(tf.float32,
                                shape=(None, spec.crop_size, spec.crop_size,
                                       spec.channels))

    # Construct the network
    net = models.GoogleNet({'data': input_node})

    # Create an image producer (loads and processes images in parallel)
    image_producer = dataset.ImageProducer(image_paths=image_paths,
                                           data_spec=spec)

    with tf.Session() as sesh:
        # Start the image processing workers
        coordinator = tf.train.Coordinator()
        threads = image_producer.start(session=sesh, coordinator=coordinator)

        # Load the converted parameters
        print('Loading the model')
        net.load(model_data_path, sesh)

        # Load the input image
        print('Loading the images')
        indices, input_images = image_producer.get(sesh)

        # Perform a forward pass through the network to get the class probabilities
        print('Classifying')
        # probs = sesh.run(net.get_output(), feed_dict={input_node: input_images})
        # display_results([image_paths[i] for i in indices], probs)

        # # ////////////////////////////////////////////////////////////////////////////////////
        feature_tensor = sesh.graph.get_tensor_by_name('pool5_7x7_s1:0')
        features = sesh.run(feature_tensor,
                            feed_dict={input_node: input_images})
        features = np.squeeze(features)

        for i, j in enumerate(indices):
            video_feature[image_paths[j]] = features[i]

        # print features.shape
        # print features
        # ////////////////////////////////////////////////////////////////////////////////////

        # Stop the worker threads
        coordinator.request_stop()
        coordinator.join(threads, stop_grace_period_secs=2)
示例#4
0
def MIRC_resnet_baseline(num_layers=50):
    im_ext = '.JPEG'
    im_size = [224,224]
    model_data_path = resnet_weight_path + 'resnet_' + str(num_layers) + '_data.npy'

    #Prepare network
    net, spec = interpret_resnet(num_layers)

    #Images
    _,_,test_names = prepare_testing_images(test_im_dir,im_size,im_ext,grayscale=False,apply_preprocess=True)
    syn, skeys = get_synkeys()
    gt,gt_ids = get_labels(test_names,syn,skeys,syn_file)
    image_paths = sorted(glob(test_im_dir + '/*' + im_ext)) 

    # Create a placeholder for the input image
    input_node = tf.placeholder(tf.float32,
                                    shape=(None, spec.crop_size, spec.crop_size, spec.channels))

    # Construct the network
    net = net({'data': input_node})

    # Create an image producer (loads and processes images in parallel)
    image_producer = dataset.ImageProducer(image_paths=image_paths, data_spec=spec, batch_size=len(image_paths))

    #Work through all batches
    with tf.Session() as sesh:
        # Start the image processing workers
        coordinator = tf.train.Coordinator()
        threads = image_producer.start(session=sesh, coordinator=coordinator)

        # Load the converted parameters
        print('Loading the model')
        net.load(model_data_path, sesh)

        # Load the input image
        print('Loading the images')
        indices, input_images = image_producer.get(sesh)
        sorted_indices = np.argsort(indices)

        # Perform a forward pass through the network to get the class probabilities
        print('Classifying')
        prob = sesh.run(net.get_output(), feed_dict={input_node: input_images})
        prob = prob[sorted_indices,:]
        # Stop the worker threads
        coordinator.request_stop()
        coordinator.join(threads, stop_grace_period_secs=2)
    
    class_accuracy, t1_preds, t5_preds, t1_true_acc, t5_true_acc = evaluate_model(gt,gt_ids,prob,test_names,im_ext,full_syn)
    t1_p = 100
    t5_p = 100
    return class_accuracy, t1_true_acc, t5_true_acc, t1_preds, t5_preds, t1_p, t5_p
示例#5
0
def load_image_tensorflow(image_paths,spec):
    with tf.Session() as sesh:
        # Create an image producer (loads and processes images in parallel)
        image_producer = dataset.ImageProducer(image_paths=image_paths, data_spec=spec)

        # Start the image processing workers
        coordinator = tf.train.Coordinator()
        threads = image_producer.start(session=sesh, coordinator=coordinator)

        indices, input_images = image_producer.get(sesh)

        # Stop the worker threads
        coordinator.request_stop()
        coordinator.join(threads, stop_grace_period_secs=2)
    return input_images
示例#6
0
def test_backdoor_defence(options, test_set):
    assert (options.net_mode == Net_Mode.BACKDOOR_DEF)
    img_producer = dataset.ImageProducer(options, test_set)
    loader, img_op, lb_op, out_op, aux_out_op = builder.build_model(
        img_producer, options)

    print(out_op)
    print(aux_out_op)
    print("------------debug------------------")

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    init_op = tf.global_variables_initializer()
    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if options.model_path is not None:
            loader.restore(sess, options.model_path)

        if options.build_level == Build_Level.MASK:
            masks, patterns = sess.run([out_op, aux_out_op])
            mask = masks[0]
            pattern = (patterns[0] + 1.) / 2.
            print(mask.shape)
            print(np.sum(np.abs(mask)))
            print(pattern.shape)
            import cv2
            cv2.imshow('mask', mask)
            cv2.imshow('pattern', pattern)
            cv2.waitKey()
        elif options.build_level == Build_Level.LOGITS:
            e_per_iter = options.batch_size * options.num_gpus
            n_iters = test_set.num_examples // e_per_iter
            n_iters = min(10, n_iters)
            total_e = 0
            acc_e = 0
            t_lb = options.model_path.split("_")
            t_lb = int(t_lb[-2])
            for k in range(n_iters):
                logits, masks = sess.run([out_op, aux_out_op])
                total_e = total_e + e_per_iter
                tmp = np.argmax(logits, axis=1)
                acc_e += sum(tmp == t_lb)
                print('iter %d  acc: %f' % (k, acc_e / total_e))

    img_producer.stop()
示例#7
0
def classify(model_data_path, image_paths):
    '''Classify the given images using GoogleNet.'''

    # Get the data specifications for the GoogleNet model
    spec = models.get_data_spec(model_class=models.GoogleNet)

    # Create a placeholder for the input image
    input_node = tf.placeholder(tf.float32,
                                shape=(None, spec.crop_size, spec.crop_size,
                                       spec.channels))

    # Construct the network
    net = models.GoogleNet({'data': input_node})

    # Create an image producer (loads and processes images in parallel)
    image_producer = dataset.ImageProducer(image_paths=image_paths,
                                           data_spec=spec)

    with tf.Session() as sesh:
        # Start the image processing workers
        coordinator = tf.train.Coordinator()
        threads = image_producer.start(session=sesh, coordinator=coordinator)

        # Load the converted parameters
        print('Loading the model')
        net.load(model_data_path, sesh)

        # Load the input image
        print('Loading the images')
        indices, input_images = image_producer.get(sesh)

        # Perform a forward pass through the network to get the class probabilities
        print('Timing in seconds :')
        start_time = time.time()
        probs = sesh.run(net.get_output(),
                         feed_dict={input_node: input_images})
        duration = time.time() - start_time
        print(duration)
        display_results([image_paths[i] for i in indices], probs)

        # Stop the worker threads
        coordinator.request_stop()
        coordinator.join(threads, stop_grace_period_secs=2)
示例#8
0
def test_in_MF_format(options, test_set):
    img_producer = dataset.ImageProducer(options, test_set)
    loader, img_op, lb_op, out_op, aux_out_op = builder.build_model(
        img_producer, options)

    header_len = len(options.image_folders[0])
    im_pts = copy.deepcopy(test_set.filenames)
    for i in range(len(im_pts)):
        im_pts[i] = im_pts[i][header_len:]

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    n_test_examples = test_set.num_examples

    out_folder = '/home/tdteach/data/MF/results/try/FaceScrub/'
    name_ending = '_resnet101_128x128.bin'

    init_op = tf.global_variables_initializer()
    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if options.model_path is not None:
            loader.restore(sess, options.model_path)

        z = 0
        for k in range(math.ceil(n_test_examples / options.batch_size)):

            st_t = time.time()
            a, lbs = sess.run([out_op, lb_op])
            ed_t = time.time()

            print(ed_t - st_t)

            if z + options.batch_size > n_test_examples:
                z = n_test_examples - options.batch_size

            for j in range(options.batch_size):
                save_to_bin(a[j], out_folder + im_pts[z + j] + name_ending)
            z = z + options.batch_size

            print(k)

    img_producer.stop()
示例#9
0
def gen_data_matrix():
    from config import Options
    import dataset
    import builder
    import tensorflow as tf
    
    options = Options()
    my_set = dataset.MegafaceDataset(options)
    img_producer = dataset.ImageProducer(options, my_set)
    loader, img_op, lb_op, out_op = builder.build_model(img_producer,output_level=2)

    rst_image = []
    rst_predict = []

    t_label = 7707

    sess_config = tf.ConfigProto()
    sess_config.gpu_options.allow_growth = True
    init_op = tf.global_variables_initializer()
    with tf.Session(config=sess_config) as sess:
        sess.run(init_op)
        loader.restore(sess, "/home/tdteach/data/benchmark/21-wedge")

        print(options.num_examples_per_epoch)

        for i in range(100):
            print(i)
            lgs, ims, lbs = sess.run([out_op,img_op,lb_op])
            for j in range(ims.shape[0]):
                rst_predict.append(lgs[j][t_label])
                rst_image.append(np.reshape(ims[j], 128*128*3))
                
    img_producer.stop()

    ii = np.array(rst_image)
    ip = np.array(rst_predict)

    print(ii.shape)
    print(ip.shape)

    np.save('img_matrix',ii)
    np.save('prd_matrix',ip)
def classify(model_data_path, image_paths):
    '''Classify the given images using GoogleNet.'''

    # Get the data specifications for the GoogleNet model
    #spec = models.get_data_spec(model_class=models.GoogleNet)
    spec = models.get_data_spec(model_class=models.VGG_ILSVRC_16_layer)

    # Create a placeholder for the input image
    input_node = tf.placeholder(tf.float32,
                                shape=(None, spec.crop_size, spec.crop_size,
                                       spec.channels))

    # Construct the network
    #net = models.GoogleNet({'data': input_node})
    net = models.VGG_ILSVRC_16_layer({'data': input_node})

    def load_image(image_path, data_spec):
        # Read the file
        file_data = tf.read_file(image_path)
        # Decode the image data
        img = tf.image.decode_jpeg(file_data, channels=data_spec.channels)
        if data_spec.expects_bgr:
            # Convert from RGB channel ordering to BGR
            # This matches, for instance, how OpenCV orders the channels.
            img = tf.reverse(img, [2, 1, 0])
        return img

    # Create an image producer (loads and processes images in parallel)
    image_producer = dataset.ImageProducer(image_paths=image_paths,
                                           data_spec=spec)

    with tf.Session() as sesh:
        # Start the image processing workers
        #coordinator = tf.train.Coordinator()
        #threads = image_producer.start(session=sesh, coordinator=coordinator)

        # Load the converted parameters
        print('Loading the model')
        net.load(model_data_path, sesh)

        # Load the input image
        print('Loading the images')
        is_jpeg = True
        probs = []
        for image_path in image_paths:
            input_image = load_image(image_path, image_producer.data_spec)
            processed_img = dataset.process_image(
                img=input_image,
                scale=image_producer.data_spec.scale_size,
                isotropic=image_producer.data_spec.isotropic,
                crop=image_producer.data_spec.crop_size,
                mean=image_producer.data_spec.mean)
            #print('Classifying')
            prob = sesh.run(net.get_output(),
                            feed_dict={
                                input_node:
                                np.reshape(processed_img.eval(),
                                           (1, 224, 224, 3))
                            })
            probs.extend(prob)
        #indices, input_images = image_producer.get(sesh)
        indices = range(len(image_paths))
        # Perform a forward pass through the network to get the class probabilities

        display_results([image_paths[i] for i in indices], probs)
示例#11
0
        img_name = osp.basename(image_path)
        class_name = class_labels[class_indices[img_idx]]
        confidence = round(probs[img_idx, class_indices[img_idx]] * 100, 2)
        print('{:20} {:30} {} %'.format(img_name, class_name, confidence))


spec = DataSpec()
input_node = tf.placeholder(tf.float32, [None, 227, 227, 3])

net = AlexNet({"data": input_node})

# image = (imread(r"tank_1.jpg")[:, :, :3]).astype(np.float32)
# image -= np.mean(image)
# image = imresize(image, (227, 227, 3))
image_paths = [r"tank_1.jpg"]
image_producer = dataset.ImageProducer(image_paths=image_paths, data_spec=spec)

with tf.Session() as sess:
    # Start the image processing workers
    coordinator = tf.train.Coordinator()
    threads = image_producer.start(session=sess, coordinator=coordinator)

    # Load the converted parameters
    print('Loading the model')
    net.load(r"../AlexNet.npy", sess, encoding="latin1")

    # Load the input image
    print('Loading the images')
    indices, input_images = image_producer.get(sess)

    # Perform a forward pass through the network to get the class probabilities
示例#12
0
def test_embeddings(options, test_set):
    assert (options.build_level == Build_Level.EMBEDDING)
    img_producer = dataset.ImageProducer(options, test_set)
    loader, img_op, lb_op, out_op, aux_out_op = builder.build_model(
        img_producer, options)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    rst_matrix = None
    rst_labels = None

    n_examples_per_iter = options.batch_size * options.num_gpus
    n_iters = options.num_examples_per_epoch // n_examples_per_iter
    # n_test_examples = int(3000)

    init_op = tf.global_variables_initializer()
    with tf.Session(config=config) as sess:
        sess.run(init_op)
        if options.model_path is not None:
            loader.restore(sess, options.model_path)

        for k in range(n_iters):

            st_t = time.time()
            a, lbs = sess.run([out_op, lb_op])
            ed_t = time.time()

            print(ed_t - st_t)

            if rst_matrix is None:
                rst_matrix = a
                rst_labels = lbs
            else:
                rst_matrix = np.concatenate((rst_matrix, a))
                rst_labels = np.concatenate((rst_labels, lbs))
            print(k)

    img_producer.stop()

    np.save('out_X.npy', rst_matrix)
    np.save('out_labels.npy', rst_labels)
    exit(0)

    no = np.linalg.norm(rst_matrix, axis=1)
    aft = np.divide(rst_matrix.transpose(), no)
    coss = np.matmul(aft.transpose(), aft)
    # coss = np.abs(coss)

    z = rst_labels
    z = np.repeat(np.expand_dims(z, 1), n_test_examples, axis=1)
    z = np.equal(z, rst_labels)
    same_type = z.astype(np.int32)
    total_top = np.sum(np.sum(same_type, axis=1) > 1)

    # top-1
    rt = 0
    for i in range(n_test_examples):
        if i == 0:
            rt += same_type[i][np.argmax(coss[i][1:])]
        elif i == n_test_examples - 1:
            rt += same_type[i][np.argmax(coss[i][:-1])]
        else:
            k1 = np.argmax(coss[i][0:i])
            k2 = np.argmax(coss[i][i + 1:])
            if coss[i][k1] > coss[i][k2 + i + 1]:
                rt += same_type[i][k1]
            else:
                rt += same_type[i][k2 + i + 1]

    print("top1 : %.2f%%" % (rt * 1.0 / total_top * 100))
    print("positive pairs = %d" % total_top)

    # ROC
    print(same_type.shape)
    print(coss.shape)

    from sklearn import metrics
    fpr, tpr, thr = metrics.roc_curve(
        same_type.reshape(1, n_test_examples * n_test_examples).tolist()[0],
        coss.reshape(1, n_test_examples * n_test_examples).tolist()[0])

    print('auc : %f' % (metrics.auc(fpr, tpr)))

    for i in range(len(fpr)):
        if fpr[i] * 100000 > 1:
            break
    print('tpr : %f' % (tpr[i]))
    print('thr : %f' % (thr[i]))

    aa = coss > 0.4594
    print((np.sum(aa) - n_test_examples) /
          (n_test_examples * n_test_examples - n_test_examples))

    import matplotlib.pyplot as plt
    plt.figure()
    plt.plot(fpr, tpr)
    plt.show()
示例#13
0
def classify(model_data_path, image_pathFolder):
    '''Classify the given images using AlexNet.'''

    print(model_data_path)
    print(image_pathFolder)
    image_paths = []
    for filename in os.listdir(image_pathFolder):
        image_paths.append(image_pathFolder + filename)
    print(image_paths)
    #if(True)
    #sys.exit(0)
    # Get the data specifications for the AlexNet model
    spec = models.get_data_spec(model_class=models.AlexNet)
    #print(spec)
    # Create a placeholder for the input image
    input_node = tf.placeholder(tf.float32,
                                shape=(None, spec.crop_size, spec.crop_size,
                                       spec.channels))
    print(input_node)

    # Construct the network
    net = models.AlexNet({'data': input_node})
    print("net---------------------")

    # Create an image producer (loads and processes images in parallel)
    image_producer = dataset.ImageProducer(image_paths=image_paths,
                                           data_spec=spec)
    print(image_producer)
    os.environ["TF_CPP_MIN_LOG_LEVEL"] = '1'
    with tf.Session() as sesh:
        print(
            'start  -----------------------------------------------------------------%s'
            % datetime.now())
        sesh.run(tf.global_variables_initializer())
        # Start the image processing workers
        coordinator = tf.train.Coordinator()
        threads = image_producer.start(session=sesh, coordinator=coordinator)

        # Load the converted parameters
        print(
            'Loading the model -----------------------------------------------------------------%s'
            % datetime.now())
        net.load(model_data_path, sesh)

        # Load the input image
        print(
            'Loading the images-----------------------------------------------------------------%s'
            % datetime.now())
        indices, input_images = image_producer.get(sesh)

        # Perform a forward pass through the network to get the class probabilities
        print(
            'Classifying        -----------------------------------------------------------------%s'
            % datetime.now())
        probs = sesh.run(net.get_output(),
                         feed_dict={input_node: input_images})
        print(
            'Classifying END    -----------------------------------------------------------------%s'
            % datetime.now())
        display_results([image_paths[i] for i in indices], probs)

        # Stop the worker threads
        coordinator.request_stop()
        coordinator.join(threads, stop_grace_period_secs=2)
示例#14
0
def classify(model_data_path, image_pathFolder):
    '''Classify the given images using VGG16.'''

    #print(model_data_path)
    #print(image_pathFolder)
    image_paths = []
    for filename in os.listdir(image_pathFolder):
        image_paths.append(image_pathFolder + filename)
    #print(image_paths)

    # Get the data specifications for the VggNet model
    spec = models.get_data_spec(model_class=models.VGG16)
    ##print(spec)
    # Create a placeholder for the input image
    input_node = tf.placeholder(tf.float32,
                                shape=(None, spec.crop_size, spec.crop_size,
                                       spec.channels))
    #print(input_node)

    # Construct the network
    net = models.VGG16({'data': input_node})
    #print("net---------------------")

    # Create an image producer (loads and processes images in parallel)
    image_producer = dataset.ImageProducer(image_paths=image_paths,
                                           data_spec=spec)
    #print(image_producer)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    #tf.ConfigProto()
    log_device_placement = True  # 是否打印设备分配日志
    allow_soft_placement = False  # 如果你指定的设备不存在,允许TF自动分配设备
    config = tf.ConfigProto(log_device_placement=True,
                            allow_soft_placement=False)
    with tf.Session(config=config) as sesh:
        #print('start  -----------------------------------------------------------------%s' % datetime.now())
        #sesh.run(tf.global_variables_initializer())
        # Start the image processing workers
        coordinator = tf.train.Coordinator()
        threads = image_producer.start(session=sesh, coordinator=coordinator)

        # Load the converted parameters
        #print('Loading the model -----------------------------------------------------------------%s' % datetime.now())
        net.load(model_data_path, sesh)

        # Load the input image
        #print('Loading the images-----------------------------------------------------------------%s' % datetime.now())
        indices, input_images = image_producer.get(sesh)

        # Perform a forward pass through the network to get the class probabilities
        print(
            'Classifying        -----------------------------------------------------------------%s'
            % datetime.now())
        probs = sesh.run(net.get_output(),
                         feed_dict={input_node: input_images})
        print(
            'Classifying END    -----------------------------------------------------------------%s'
            % datetime.now())
        display_results([image_paths[i] for i in indices], probs)

        # Stop the worker threads
        coordinator.request_stop()
        coordinator.join(threads, stop_grace_period_secs=2)