Example #1
0
def getPrediction(image_folder, image_set, annotation_path,image_set_file_name, net):
    imdb = usts(image_set_file_name)#what should I use here?
    if not cfg.TEST.HAS_RPN:
        imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
    test_net(net, imdb)
    verify_detections = obtain_detections_random_cover(image_set_file_name)
    return verify_detections
Example #2
0
def locate_backdoor(net, test_images, verification_images):
    """
    net: caffe net
    test_images: list of strings with the names of the images you want to test
    verification_images: list of images to perform the 20 image check on
    returns average_cpos
    """
    imdb = usts("verify_20")
    if not cfg.TEST.HAS_RPN:
        imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
    test_net(net, imdb)
    verify_detections = obtain_detections_random_cover("verify_20")

    # For each image in the list of images
    for i, image in enumerate(test_images):
        #Write the current image onto single_image_detection.txt
        with open("datasets/usts/ImageSets/single_image_detection.txt",
                  "w") as f:
            f.write("{}".format(image))

        # Perform inference on the image
        imdb = usts("single_image_detection")
        if not cfg.TEST.HAS_RPN:
            imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
        test_net(net, imdb)

        # Obtain detections
        detections = obtain_detections("single_image_detection")

        # Obtain annotations of the original image
        with open("datasets/usts/Annotations/{}.txt".format(image), "r") as f:
            annot = [line.strip().split(',') for line in f.readlines()]

        # Place random covers on the image
        print "Generating random covers for image {}, detections: {}".format(
            i, detections)
        cpos_dict = generate_random_covers(image, annot)

        # Perform inference on the covered images
        print "Completed generation, detecting now"
        imdb = usts("random_covers")
        if not cfg.TEST.HAS_RPN:
            imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
        test_net(net, imdb)

        # Obtain detections on these images
        random_covers_detections = obtain_detections_random_cover(
            "random_covers")

        # Create a transition dictionary -> transitions[original-class][new-class]: list of images (random_cover)
        transition = defaultdict(
            lambda: defaultdict(lambda: defaultdict(list)))
        # Loop through random_cover dictionary
        for im, detection_list in random_covers_detections.iteritems():
            # Loop through detections (list of lists) of the original image
            for orig_idx, orig_detection in enumerate(detections):
                # Loop through the list obtained from random_cover dictionary
                for new_detection in detection_list:
                    # If iou > 0 && there is change in transition, append
                    if iou(orig_detection[2:], new_detection[2:]
                           ) > 0 and orig_detection[0] != new_detection[0]:
                        if float(new_detection[1]) > 0.9:
                            transition[orig_idx][orig_detection[0]][
                                new_detection[0]].append(im)

        for orig_idx, transition_dict in transition.iteritems():
            # Loop through each of the original class
            for from_type, sub_dict in transition_dict.iteritems():
                # If detection from the original image matches an annotation, let the coordinates be the annotations
                obtained_coord = False
                for detection in detections:
                    if detection[0] == from_type:
                        for anno in annot:
                            if iou(detection[2:], anno[1:5]) > 0:
                                a = int(float(anno[1]))
                                b = int(float(anno[2]))
                                c = int(float(anno[3]))
                                d = int(float(anno[4]))
                                obtained_coord = True
                if not obtained_coord:
                    continue
                # Loop through each of the new class
                for to_type, im_list in sub_dict.iteritems():
                    # Obtain the average cpos
                    average_cpos_a = 0
                    average_cpos_b = 0
                    for im in im_list:
                        average_cpos_a += cpos_dict[im][0]
                        average_cpos_b += cpos_dict[im][1]
                    average_cpos_a /= len(im_list)
                    average_cpos_b /= len(im_list)
                    # Read image, obtain potential trigger
                    im_cv2 = cv2.imread(
                        "datasets/usts/Images/{}.png".format(image), -1)
                    x1 = min(a, c)
                    x2 = max(a, c)
                    y1 = min(b, d)
                    y2 = max(b, d)
                    w, h = x2 - x1, y2 - y1
                    size = (0.1, 0.1)
                    bw = max(int(w * size[0]), 1)
                    bh = max(int(h * size[1]), 1)
                    cpos = (average_cpos_a, average_cpos_b)
                    bx1 = min(int(x1 + w * (cpos[0] - size[0] / 2.)),
                              im_cv2.shape[1] - 1)
                    bx2 = min(bx1 + bw, im_cv2.shape[1])
                    by1 = min(int(y1 + h * (cpos[1] - size[1] / 2.)),
                              im_cv2.shape[0] - 1)
                    by2 = min(by1 + bh, im_cv2.shape[0])
                    bx1_new = int(bx1 - (bx2 - bx1) * 0.25)
                    bx2_new = int(bx2 + (bx2 - bx1) * 0.25)
                    by1_new = int(by1 - (by2 - by1) * 0.25)
                    by2_new = int(by2 + (by2 - by1) * 0.25)
                    img_esq = im_cv2[by1_new:by2_new, bx1_new:bx2_new]
                    with open("datasets/usts/ImageSets/verify_20_temp.txt",
                              "w") as f:
                        for verify_im, verify_detection in verify_detections.iteritems(
                        ):
                            with open(
                                    "datasets/usts/Annotations/{}.txt".format(
                                        verify_im), "r") as g:
                                verify_detection = [
                                    line.strip().split(',')
                                    for line in g.readlines()
                                ]
                            verify_image = cv2.imread(
                                "datasets/usts/Images/{}.png".format(
                                    verify_im), -1)
                            for num, each_det in enumerate(verify_detection):
                                va = int(float(each_det[1]))
                                vb = int(float(each_det[2]))
                                vc = int(float(each_det[3]))
                                vd = int(float(each_det[4]))
                                vx1 = min(va, vc)
                                vx2 = max(va, vc)
                                vy1 = min(vb, vd)
                                vy2 = max(vb, vd)
                                vw, vh = vx2 - vx1, vy2 - vy1
                                vbw = max(int(vw * size[0]), 1)
                                vbh = max(int(vh * size[1]), 1)
                                vbx1 = min(
                                    int(vx1 + vw * (cpos[0] - size[0] / 2.)),
                                    verify_image.shape[1] - 1)
                                vbx2 = min(vbx1 + vbw, verify_image.shape[1])
                                vby1 = min(
                                    int(vy1 + vh * (cpos[1] - size[1] / 2.)),
                                    verify_image.shape[0] - 1)
                                vby2 = min(vby1 + vbh, verify_image.shape[0])
                                vbx1_new = int(vbx1 - (vbx2 - vbx1) * 0.25)
                                vbx2_new = int(vbx2 + (vbx2 - vbx1) * 0.25)
                                vby1_new = int(vby1 - (vby2 - vby1) * 0.25)
                                vby2_new = int(vby2 + (vby2 - vby1) * 0.25)
                                vbw_new, vbh_new = vbx2_new - vbx1_new, vby2_new - vby1_new
                                backdoor = cv2.resize(
                                    img_esq, (vbw_new, vbh_new),
                                    interpolation=cv2.INTER_CUBIC)
                                verify_image[vby1_new:vby2_new,
                                             vbx1_new:vbx2_new] = backdoor
                            cv2.imwrite(
                                "datasets/usts/Images/{}.png".format(
                                    verify_im[0] + "1" + verify_im[2:]),
                                verify_image)
                            f.write("{}\n".format(verify_im[0] + "1" +
                                                  verify_im[2:]))
                    imdb = usts("verify_20_temp")
                    if not cfg.TEST.HAS_RPN:
                        imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
                    test_net(net, imdb)
                    new_verify = obtain_detections_random_cover(
                        "verify_20_temp")
                    transitions = detect_transitions(verify_detections,
                                                     new_verify)
                    print "Transitions: " + str(transitions)
                    print "Number of images contributing to average_cpos: " + str(
                        len(im_list))
                    if transitions > 15:
                        return cpos, image
    return None
Example #3
0
        test_images = [file.strip() for file in f]

    net = initSettings(model_name)

    average_cpos, firstBackdoor = locate_backdoor(net, test_images,
                                                  verification_images)

    if average_cpos == None:
        print "No backdoor was found."
        exit(0)
    else:
        print "Average cpos found as", average_cpos

    found_backdoor_time = time.time()

    imdb = usts(image_set)
    if not cfg.TEST.HAS_RPN:
        imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
    test_net(net, imdb)
    pred_orig = obtain_detections_random_cover(image_set)
    print "Finished reading original prediction before propagation.The result is ", pred_orig

    propagate_time_start = time.time()

    # propagate through all pictures
    propagate = PropagateWorker("datasets/usts/Images",
                                "datasets/usts/ImageSets",
                                "datasets/usts/Annotations", average_cpos)
    p = mp.Pool(8)
    new_images = list(p.map(propagate, test_images))
    p.close()
Example #4
0
        'test_targ_bomb_backdoor',  # targeted attack bomb
        'train_targ_flower',
        'test_targ_flower_backdoor',  # targeted attack flower
        'train_rand_ysq',
        'train_rand_ysq_p50',
        'train_rand_ysq_p25',
        'test_rand_ysq_backdoor',
        'train_rand_bomb',
        'train_rand_bomb_p50',
        'train_rand_bomb_p25',
        'test_rand_bomb_backdoor',
        'train_rand_flower',
        'train_rand_flower_p50',
        'train_rand_flower_p25',
        'test_rand_flower_backdoor',
]:
    name = 'usts_%s' % split
    __sets[name] = (lambda split=split: usts(split))


def get_imdb(name):
    """Get an imdb (image database) by name."""
    if not __sets.has_key(name):
        raise KeyError('Unknown dataset: {}'.format(name))
    return __sets[name]()


def list_imdbs():
    """List all registered imdbs."""
    return __sets.keys()
Example #5
0
    with open("datasets/usts/ImageSets/verify_20.txt", "r") as f:
        verification_images = [file.strip() for file in f]
    with open("datasets/usts/ImageSets/{0}.txt".format(image_set), "r") as f:
        test_images = [file.strip() for file in f]

    net = initSettings(model_name)
    average_cpos = locate_backdoor(net, test_images, verification_images)

    if average_cpos == None:
        print "No backdoor was found."
        new_images = [image[0] + "7" + image[2:] for image in test_images]
    else:
        print "Create cleaned image set"
        propagate = PropagateWorker("datasets/usts/Images",
                                    "datasets/usts/ImageSets",
                                    "datasets/usts/Annotations", average_cpos)
        p = mp.Pool(8)
        new_images = list(p.map(propagate, test_images))
        p.close()
        p.join()

    with open("datasets/usts/ImageSets/{}_final.txt".format(image_set),
              "w") as f:
        for im in new_images:
            f.write(im + "\n")

    imdb = usts(image_set + "_final")
    if not cfg.TEST.HAS_RPN:
        imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
    test_net(net, imdb)