Beispiel #1
0
def obtain_detections_random_cover(result_type):
    detection_types = ("speedlimit", "stop", "warning")
    det_dict = defaultdict(list)
    for detection_type in detection_types:
        with open("datasets/usts/results/det_{}_{}.txt".format(result_type, detection_type), "r") as f:
            for line in f.readlines():
                current_line = line.strip().split()
                det_dict[current_line[0]].append([detection_type] + current_line[1:])
    final_det = defaultdict(list)
    for im, detection_list in det_dict.iteritems():
        for i, det_a in enumerate(detection_list):
            conf = float(det_a[1])
            if conf < 0.5:
                continue
            overlapping = False
            highest_conf = True
            for j in range(i, len(detection_list)):
                if i == j:
                    continue
                det_b = detection_list[j]
                current_iou = iou(det_a[2:], det_b[2:])
                if current_iou > 0:
                    overlapping = True
                    if float(det_b[1]) > conf:
                        highest_conf = False
                    else:
                        det_b[1] = 0
            if overlapping:
                if highest_conf:
                    final_det[im].append(det_a)
            else:
                final_det[im].append(det_a)
    return final_det
Beispiel #2
0
def detect_transitions(orig_detect, new_detect):
    transitions = 0
    for im, detections in orig_detect.iteritems():
        for detection in detections:
            im_other = im[0] + "1" + im[2:]
            for new_detection in new_detect[im_other]:
                if iou(detection[2:], new_detection[2:]) > 0 and detection[0][-4:] != new_detection[0][-4:]:
                    transitions += 1
    return transitions                
Beispiel #3
0
def detect_transitions(orig_detect, new_detect):
    # orig_detect: dictionary of original detections
    # new_detect: dictionary of new detections
    # returns number of transitions
    transitions = 0
    for im, detections in orig_detect.iteritems():
        for detection in detections:
            im_other = im[0] + "1" + im[2:]
            for new_detection in new_detect[im_other]:
                if iou(detection[2:], new_detection[2:]
                       ) > 0 and detection[0][-4:] != new_detection[0][-4:]:
                    transitions += 1
    return transitions
Beispiel #4
0
def obtain_detections(result_type):
    # result_type: name of result type (corresponding to the text file in datasets/usts/results/...
    # returns list of detections
    detection_types = ("speedlimit", "stop", "warning")
    det_list = []

    #Obtain all detections in result text file, 0th index is detection class
    for detection_type in detection_types:
        with open(
                "datasets/usts/results/det_{}_{}.txt".format(
                    result_type, detection_type), "r") as f:
            det_list = det_list + [[detection_type] + line.strip().split()[1:]
                                   for line in f.readlines()]

    final_det = []
    # Looping through all detections
    for i, det_a in enumerate(det_list):
        # Ignore detections where confidence < 0.5
        conf = float(det_a[1])
        if conf < 0.5:
            continue

        overlapping = False
        highest_conf = True
        for j in range(i, len(det_list)):
            if i == j:
                continue
            det_b = det_list[j]
            current_iou = iou(det_a[2:], det_b[2:])
            if current_iou > 0:
                overlapping = True
                if float(det_b[1]) > conf:
                    highest_conf = False
                else:
                    det_b[1] = 0
        if overlapping:
            if highest_conf:
                final_det.append(det_a)
        else:
            final_det.append(det_a)
    return final_det
Beispiel #5
0
def locate_backdoor(net, test_images, verification_images):
    """
    net: caffe net
    test_images: list of strings with the names of the images you want to test
    verification_images: list of images to perform the 20 image check on
    returns average_cpos
    """
    imdb = usts("verify_20")
    if not cfg.TEST.HAS_RPN:
        imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
    test_net(net, imdb)
    verify_detections = obtain_detections_random_cover("verify_20")

    # For each image in the list of images
    for i, image in enumerate(test_images):
        #Write the current image onto single_image_detection.txt
        with open("datasets/usts/ImageSets/single_image_detection.txt",
                  "w") as f:
            f.write("{}".format(image))

        # Perform inference on the image
        imdb = usts("single_image_detection")
        if not cfg.TEST.HAS_RPN:
            imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
        test_net(net, imdb)

        # Obtain detections
        detections = obtain_detections("single_image_detection")

        # Obtain annotations of the original image
        with open("datasets/usts/Annotations/{}.txt".format(image), "r") as f:
            annot = [line.strip().split(',') for line in f.readlines()]

        # Place random covers on the image
        print "Generating random covers for image {}, detections: {}".format(
            i, detections)
        cpos_dict = generate_random_covers(image, annot)

        # Perform inference on the covered images
        print "Completed generation, detecting now"
        imdb = usts("random_covers")
        if not cfg.TEST.HAS_RPN:
            imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
        test_net(net, imdb)

        # Obtain detections on these images
        random_covers_detections = obtain_detections_random_cover(
            "random_covers")

        # Create a transition dictionary -> transitions[original-class][new-class]: list of images (random_cover)
        transition = defaultdict(
            lambda: defaultdict(lambda: defaultdict(list)))
        # Loop through random_cover dictionary
        for im, detection_list in random_covers_detections.iteritems():
            # Loop through detections (list of lists) of the original image
            for orig_idx, orig_detection in enumerate(detections):
                # Loop through the list obtained from random_cover dictionary
                for new_detection in detection_list:
                    # If iou > 0 && there is change in transition, append
                    if iou(orig_detection[2:], new_detection[2:]
                           ) > 0 and orig_detection[0] != new_detection[0]:
                        if float(new_detection[1]) > 0.9:
                            transition[orig_idx][orig_detection[0]][
                                new_detection[0]].append(im)

        for orig_idx, transition_dict in transition.iteritems():
            # Loop through each of the original class
            for from_type, sub_dict in transition_dict.iteritems():
                # If detection from the original image matches an annotation, let the coordinates be the annotations
                obtained_coord = False
                for detection in detections:
                    if detection[0] == from_type:
                        for anno in annot:
                            if iou(detection[2:], anno[1:5]) > 0:
                                a = int(float(anno[1]))
                                b = int(float(anno[2]))
                                c = int(float(anno[3]))
                                d = int(float(anno[4]))
                                obtained_coord = True
                if not obtained_coord:
                    continue
                # Loop through each of the new class
                for to_type, im_list in sub_dict.iteritems():
                    # Obtain the average cpos
                    average_cpos_a = 0
                    average_cpos_b = 0
                    for im in im_list:
                        average_cpos_a += cpos_dict[im][0]
                        average_cpos_b += cpos_dict[im][1]
                    average_cpos_a /= len(im_list)
                    average_cpos_b /= len(im_list)
                    # Read image, obtain potential trigger
                    im_cv2 = cv2.imread(
                        "datasets/usts/Images/{}.png".format(image), -1)
                    x1 = min(a, c)
                    x2 = max(a, c)
                    y1 = min(b, d)
                    y2 = max(b, d)
                    w, h = x2 - x1, y2 - y1
                    size = (0.1, 0.1)
                    bw = max(int(w * size[0]), 1)
                    bh = max(int(h * size[1]), 1)
                    cpos = (average_cpos_a, average_cpos_b)
                    bx1 = min(int(x1 + w * (cpos[0] - size[0] / 2.)),
                              im_cv2.shape[1] - 1)
                    bx2 = min(bx1 + bw, im_cv2.shape[1])
                    by1 = min(int(y1 + h * (cpos[1] - size[1] / 2.)),
                              im_cv2.shape[0] - 1)
                    by2 = min(by1 + bh, im_cv2.shape[0])
                    bx1_new = int(bx1 - (bx2 - bx1) * 0.25)
                    bx2_new = int(bx2 + (bx2 - bx1) * 0.25)
                    by1_new = int(by1 - (by2 - by1) * 0.25)
                    by2_new = int(by2 + (by2 - by1) * 0.25)
                    img_esq = im_cv2[by1_new:by2_new, bx1_new:bx2_new]
                    with open("datasets/usts/ImageSets/verify_20_temp.txt",
                              "w") as f:
                        for verify_im, verify_detection in verify_detections.iteritems(
                        ):
                            with open(
                                    "datasets/usts/Annotations/{}.txt".format(
                                        verify_im), "r") as g:
                                verify_detection = [
                                    line.strip().split(',')
                                    for line in g.readlines()
                                ]
                            verify_image = cv2.imread(
                                "datasets/usts/Images/{}.png".format(
                                    verify_im), -1)
                            for num, each_det in enumerate(verify_detection):
                                va = int(float(each_det[1]))
                                vb = int(float(each_det[2]))
                                vc = int(float(each_det[3]))
                                vd = int(float(each_det[4]))
                                vx1 = min(va, vc)
                                vx2 = max(va, vc)
                                vy1 = min(vb, vd)
                                vy2 = max(vb, vd)
                                vw, vh = vx2 - vx1, vy2 - vy1
                                vbw = max(int(vw * size[0]), 1)
                                vbh = max(int(vh * size[1]), 1)
                                vbx1 = min(
                                    int(vx1 + vw * (cpos[0] - size[0] / 2.)),
                                    verify_image.shape[1] - 1)
                                vbx2 = min(vbx1 + vbw, verify_image.shape[1])
                                vby1 = min(
                                    int(vy1 + vh * (cpos[1] - size[1] / 2.)),
                                    verify_image.shape[0] - 1)
                                vby2 = min(vby1 + vbh, verify_image.shape[0])
                                vbx1_new = int(vbx1 - (vbx2 - vbx1) * 0.25)
                                vbx2_new = int(vbx2 + (vbx2 - vbx1) * 0.25)
                                vby1_new = int(vby1 - (vby2 - vby1) * 0.25)
                                vby2_new = int(vby2 + (vby2 - vby1) * 0.25)
                                vbw_new, vbh_new = vbx2_new - vbx1_new, vby2_new - vby1_new
                                backdoor = cv2.resize(
                                    img_esq, (vbw_new, vbh_new),
                                    interpolation=cv2.INTER_CUBIC)
                                verify_image[vby1_new:vby2_new,
                                             vbx1_new:vbx2_new] = backdoor
                            cv2.imwrite(
                                "datasets/usts/Images/{}.png".format(
                                    verify_im[0] + "1" + verify_im[2:]),
                                verify_image)
                            f.write("{}\n".format(verify_im[0] + "1" +
                                                  verify_im[2:]))
                    imdb = usts("verify_20_temp")
                    if not cfg.TEST.HAS_RPN:
                        imdb.set_proposal_method(cfg.TEST.PROPOSAL_METHOD)
                    test_net(net, imdb)
                    new_verify = obtain_detections_random_cover(
                        "verify_20_temp")
                    transitions = detect_transitions(verify_detections,
                                                     new_verify)
                    print "Transitions: " + str(transitions)
                    print "Number of images contributing to average_cpos: " + str(
                        len(im_list))
                    if transitions > 15:
                        return cpos, image
    return None
Beispiel #6
0
    fp_start_time = time.time()

    transition = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
    # Loop through random_cover dictionary
    for im, detection_list in pred_new.iteritems():
        if im[0] + "4" + im[2:] in pred_orig:
            name = im[0] + "4" + im[2:]
        else:
            name = im[0] + "0" + im[2:]
        # Loop through detections (list of lists) of the original image
        for orig_idx, orig_detection in enumerate(pred_orig[name]):
            # Loop through the list obtained from random_cover dictionary
            for new_detection in detection_list:
                # If iou > 0 && there is change in transition, append
                if iou(orig_detection[2:], new_detection[2:]
                       ) > 0 and orig_detection[0] != new_detection[0]:
                    if new_detection[1] > 0.5:
                        transition[orig_idx][orig_detection[0]][
                            new_detection[0]].append(name)

    print "Finished identifying transited images after propagation, they are ", transition
    truly_backdoored_im = []
    verify_detections = obtain_detections_random_cover("verify_20")
    transition_list = []

    for orig_idx, transition_dict in transition.iteritems():
        # Loop through each of the original class
        for from_type, sub_dict in transition_dict.iteritems():
            for to_type, list_of_transited_im in sub_dict.iteritems():
                for one_image in list_of_transited_im:
                    with open(