Example #1
0
    def _make_new_image(self, background_img_path, cut_image_list):
        """
        Method generate new image with bboxes by pasting cuted object 
        from images to new background in random places.
        :param background_img_path: image to paste on
        :type background_img_path: str
        :param cut_image_list: list of paths to images that can be paste on.
        :type cut_image_list: list
        :retruns: background image with pasted images
        :rtype: numpy ndarra
        :retruns: list of bboxes to that image
        :rtype: list
        """
        background_img = cv2.imread(background_img_path)
        background_img = self._background_image_transform(background_img)
        background_img = self._resize_to_background_size(background_img)
        background_img_mask = 0 * np.ones(background_img.shape, np.uint8)
        background_img_name = get_image_name(background_img_path)
        background_img_boxes = []

        for i in range(
                random.randint(self.min_cut_img_count,
                               self.max_cut_img_count)):
            cut_image_path = random.choice(cut_image_list)
            cut_image_name = get_image_name(cut_image_path)
            cut_img = cv2.imread(cut_image_path)
            cut_img = self._cut_image_transform(cut_img)
            background_img, background_img_mask, paste_img_box = self._simple_paste_nooverlaping_img_to_another(
                background_img, background_img_mask, cut_img)
            if paste_img_box:
                background_img_boxes.append(paste_img_box)
        return background_img, background_img_boxes
def check_image():
    data = json.loads(request.data)
    print(data)
    paths, names, digests = utils.get_image_name(data)
    malwares = []
    for i in range(len(paths)):
        p=paths[i]
        utils.pull_image(p)
        utils.save_image(p)
        utils.untar_image(p)

        file_lst = []
        for root, dirs, files in os.walk('./test'):
            for name in files:
                relative_path=os.path.join(root, name)
                abs_path=os.path.abspath(relative_path)
                print(abs_path)
                file_lst.append(abs_path)
        num_bad_files, suspicious_file_paths_list = global_check.global_process_files(file_lst)
        print('-------------------')
        print('num_bad_files: %d' %(num_bad_files))
        print('suspicious_file_paths_list: %s' %(suspicious_file_paths_list))
        if num_bad_files != 0:
            malwares.append((names[i], digests[i]))
        # write line into log
        read_write_log.write_log(p, num_bad_files, suspicious_file_paths_list)
    utils.delete_malwares(malwares)
    return 'Done'
Example #3
0
def run_boot(build):
    cbl_arch = get_cbl_name()
    kernel_image = cwd() + "/" + get_image_name()
    boot_qemu = [
        "./boot-utils/boot-qemu.sh", "-a", cbl_arch, "-k", kernel_image
    ]
    # If we are running a sanitizer build, we should increase the number of
    # cores and timeout because booting is much slower
    if "CONFIG_KASAN=y" in build["kconfig"] or \
       "CONFIG_KCSAN=y" in build["kconfig"] or \
       "CONFIG_UBSAN=y" in build["kconfig"]:
        boot_qemu += ["-s", "4"]
        if "CONFIG_KASAN=y" in build["kconfig"]:
            boot_qemu += ["-t", "20m"]
        else:
            boot_qemu += ["-t", "10m"]
        if "CONFIG_KASAN_KUNIT_TEST=y" in build["kconfig"] or \
           "CONFIG_KCSAN_KUNIT_TEST=y" in build["kconfig"]:
            print_yellow(
                "Disabling Oops problem matcher under Sanitizer KUnit build")
            print("::remove-matcher owner=linux-kernel-oopses")

    # Before spawning a process with potentially different IO buffering,
    # flush the existing buffers so output is ordered correctly.
    sys.stdout.flush()
    sys.stderr.flush()

    try:
        subprocess.run(boot_qemu, check=True)
    except subprocess.CalledProcessError as e:
        if e.returncode == 124:
            print_red("Image failed to boot")
        raise e
Example #4
0
    def _make_new_images_with_objects(self, image_limit_count,
                                      background_images_folder):
        """
        Method generate new image with bboxes by pasting cuted object 
        from images to new background in random places, and returning data in
        uniform data format. 
        :param image_limit_count: limit to full images (background with n small past images) we add
        :type image_limit_count: int
        :param background_images_folder: path to folder with background images
        :type background_images_folder: 
        :retruns: dict with 3 list : images, labels, list of dict of object data
        :rtype: dict
        """
        background_images = load_images(background_images_folder)
        output = {}
        images = []
        uniq_dataset_classes = []
        objects = []
        for class_name, class_folder_path in self.dataset_classes_path_map.items(
        ):
            cut_images = load_images(class_folder_path)
            if not cut_images:
                continue
            for i_img in range(image_limit_count):
                image_data = {}

                background_image_path = random.choice(background_images)
                background_img_name = get_image_name(background_image_path)
                file_name = '_'.join(
                    [class_name, background_img_name,
                     str(i_img), '.jpg'])
                background_img, background_img_boxes = self._make_new_image(
                    background_image_path, cut_images)
                file_path = os.path.join(self.tmp_folder, file_name)
                cv2.imwrite(file_path, background_img)
                images.append(file_path)
                image_data.update({'img_name': os.path.split(file_path)[1]})
                image_data.update({'img_path': file_path})
                image_data.update({'width': self.background_image_width})
                image_data.update({'height': self.background_image_height})
                image_data.update({'depth': self.background_image_depth})
                image_data.update({'boxes': background_img_boxes})
                image_data.update({
                    'labels':
                    [class_name for i in range(len(background_img_boxes))]
                })
                objects.append(image_data)
        output.update({'images': images})
        output.update({
            'dataset_classes': [
                class_name for class_name, class_folder_path in
                self.dataset_classes_path_map.items()
            ]
        })
        output.update({'objects': objects})
        return output
Example #5
0
def fetch_kernel_image(build):
    image_name = get_image_name()
    url = build["download_url"] + image_name
    print_yellow("fetching kernel image from: %s" % url)
    # TODO: use something more robust like python wget library.
    urllib.request.urlretrieve(url, image_name)
    # Suspect download is failing.
    if os.path.exists:
        print_yellow("Filesize: %d" % os.path.getsize(image_name))
    else:
        print_red("Unable to download kernel image")
        sys.exit(1)
Example #6
0
    def add_case(self, match_result):
        for (key, value_list) in match_result.items():
            for q_match in value_list:
                match_score = q_match[1][0][1]
                if match_score > self.max_score:
                    self.max_score = match_score
                if match_score < self.min_score:
                    self.min_score = match_score
                query_id = get_image_name(q_match[0], 'query')
                gallery_id = get_image_name(q_match[1][0][0], 'gallery')
                self.total_num += 1
                if query_id == 'other':
                    pn_flag = 0
                    self.negative_num += 1
                else:
                    pn_flag = 1
                    self.positive_num += 1

                if query_id == gallery_id:
                    correct = 1
                else:
                    correct = 0
                self.case_list.append((match_score, pn_flag, correct, q_match))
Example #7
0
def evaluate_positive(match_result, thresholds, dumps_badcases=False):
    match_scores = []
    truths = []
    cnt = 0
    for key in sorted(match_result.keys()):
        value_list = match_result[key]
        for q_match in value_list:
            cnt += 1
            match_scores.append(q_match[1][0][1])
            query_id = get_image_name(q_match[0], 'query')
            gallery_id = get_image_name(q_match[1][0][0], 'gallery')
            if query_id == gallery_id:
                truths.append(1)
            else:
                truths.append(0)

    match_scores = zip(match_scores, truths)
    match_scores = sorted(match_scores, key=lambda x: x[0], reverse=True)

    total_num = len(match_scores)

    false_negatives = []
    accus = []
    for threshold in thresholds:
        false_negative = total_num
        i = 0
        while i < total_num and match_scores[i][0] > threshold:
            false_negative -= 1
            i += 1

        false_negatives.append(false_negative)
        if (i == 0): accu = 0
        else: accu = float(sum(zip(*match_scores)[1][0:i])) / i
        accus.append(accu)

    return total_num, false_negatives, accus
Example #8
0
def run_boot(build):
    cbl_arch = get_cbl_name()
    kernel_image = cwd() + "/" + get_image_name()
    boot_qemu = [
        "./boot-utils/boot-qemu.sh", "-a", cbl_arch, "-k", kernel_image
    ]
    if cbl_arch == "s390":
        boot_qemu += ["--use-cbl-qemu"]
    # If we are running a sanitizer build, we should increase the number of
    # cores and timeout because booting is much slower
    if "CONFIG_KASAN=y" in build["kconfig"] or \
       "CONFIG_KCSAN=y" in build["kconfig"] or \
       "CONFIG_UBSAN=y" in build["kconfig"]:
        boot_qemu += ["-s", "4", "-t", "10m"]
    try:
        subprocess.run(boot_qemu, check=True)
    except subprocess.CalledProcessError as e:
        if e.returncode == 124:
            print_red("Image failed to boot")
        raise e
Example #9
0
def generate_casetable_image(match_list):
  html = '<table>'
  cnt = 0
  for ml in match_list:
    cnt += 1
    pic_row = '<tr><td>'+str(cnt)+'</td><td><img src="' + adapt_srcpath(ml[0]) + '" width="200" height="200"></td>'
    query_name = get_image_name(ml[0], 'query')
    data_row = '<tr><td></td><td>'+query_name+'</td>'
    for m in ml[1]:
      pic_row += '<td><img src="' + adapt_srcpath(m[0]) + '" width="200" height="200"></td>'
      data_row += '<td>' + str(m[1]) +'</td>'

    pic_row += '</tr>'
    data_row += '</tr>'

    html += pic_row
    html += data_row

  html += '</table>'
  return html
Example #10
0
def generate_casetable(match_list):
  result_html = '<table border="1">'
  cnt = 0
  for (session, mlists) in match_list.iteritems():
    session_row = '<tr><td>'+session+'</td><td><table>' 
    for ml in mlists:
      cnt += 1
      pic_row = '<tr><td>'+str(cnt)+'</td><td><img src="' + adapt_srcpath(ml[0]) + '" width="200" height="200"></td>'
      query_name = get_image_name(ml[0], 'query')
      data_row = '<tr><td></td><td>'+query_name+'</td>'
      for m in ml[1]:
        pic_row += '<td><img src="' + adapt_srcpath(m[0]) + '" width="200" height="200"></td>'
        data_row += '<td>' + str(m[1]) +'</td>'

      pic_row += '</tr>'
      data_row += '</tr>'
      session_row += pic_row
      session_row += data_row
    session_row += '</table></td></tr>'

    result_html += session_row

  result_html += '</table>'
  return result_html
Example #11
0
val_size = 100
test_size = 100

params = utils.Params(json_path)
raw_data = np.loadtxt(data_dir + 'gt.txt', delimiter=';', dtype=str)
image_names = raw_data[:, 0]
box_coords = raw_data[:, 1:5].astype(float)
classes = raw_data[:, 5].astype(int)

X = []
Y = []
conflict_count = np.zeros(dataset_size)

for i in trange(dataset_size):
    # Load and resize ith image
    name = utils.get_image_name(i)
    image = plt.imread(data_dir + name)
    resized_image = imresize(image, (image_resize, image_resize))
    X.append(resized_image)

    # Load bounding boxes
    y = np.zeros((num_grid, num_grid, 5 + params.num_classes))
    orig_hw = image.shape[0:2]
    resized_hw = resized_image.shape[0:2]
    indices = np.argwhere(image_names == name).reshape(-1, )

    for index in indices:
        box_xy = box_coords[index]
        resized_box_xy = utils.resize_box_xy(orig_hw, resized_hw, box_xy)
        box_cwh = utils.xy_to_cwh(resized_box_xy)
        normalized_cwh, position = utils.normalize_box_cwh(
Example #12
0
def gtsdb_aug_(params, image, box_xy, classes):
    class_dir = 'data/GTSRB/Images/'

    add_signs = params.add_signs
    resized_hw = [params.darknet_input, params.darknet_input]

    # two data loaders
    X_aug, Y_aug = [], []

    # extract the existing signs' bounding boxes
    signs_list = {}
    num_orign_signs = box_xy.shape[0]

    # occlude the existing and paste "add_signs" new signs
    num_signs = num_orign_signs + add_signs
    # num_signs = np.random.randint(num_orign_signs, max_signs+1)

    # randomly select num_signs
    for itr_sign in range(num_signs):

        # a class from 43 classes
        class_name = random.choice(os.listdir(class_dir))
        while "0" not in class_name:
            class_name = random.choice(os.listdir(class_dir))

        # a sign from that class
        sign_name = random.choice(os.listdir(class_dir + class_name + '/'))
        while "ppm" not in sign_name:
            sign_name = random.choice(os.listdir(class_dir + class_name + '/'))

        # load sign bounding boxes
        data_signs = np.loadtxt(class_dir +  class_name + '/GT-' + class_name \
            + '.csv', delimiter = ';', dtype= str)[1:]

        selected = np.argwhere(data_signs == sign_name)[0][0]

        box_coords_data = data_signs[selected, 1:8].astype(int)
        #   key    height width startX startY endX endY  class
        # "name"    0      1      2       3     4     5    6

        signs_list[str(data_signs[selected, 0])] = box_coords_data

    # array y records new bounding boxes recording
    y = np.zeros((params.n_grid, params.n_grid, 5 + params.n_classes))

    idx = 0

    # perform data augmentation
    for key in signs_list:

        # one sign's info
        single_sign = signs_list[key]
        class_name = utils.get_image_name(single_sign[6])[:-4]
        sign = cv2.imread(class_dir + '/' + class_name + '/' + key)

        # FROM this sign
        fromX1, fromY1, fromX2, fromY2 = single_sign[2:6]
        fromH, fromW = fromY2 - fromY1, fromX2 - fromX1

        # 1. occlude existing signs
        if idx < num_orign_signs:

            # TO the selected image
            toX1, toY1, toX2, toY2 = box_xy[idx].astype(int)
            toH, toW = toY2 - toY1, toX2 - toX1
            ratioH, ratioW = toH / fromH, toW / fromW
            rescaleH, rescaleW = int(ratioH * fromH), int(ratioW * fromW)

            # resize the selected sign to fit into the space of existing signs
            resized_single_sign = \
                cv2.resize(sign[fromY1:fromY2, fromX1:fromX2], (toW, toH))

            # paste
            image[toY1:toY2, toX1:toX2] = resized_single_sign

            # record the new bounding boxes
            # Note: box_xy is the same as the existing signs
            new_box_xy = box_xy[idx].astype(int)
            resized_box_xy = \
                utils.resize_box_xy(image.shape[0:2], resized_hw, new_box_xy)
            box_cwh = utils.xy_to_cwh(resized_box_xy)
            (xc, yc, w, h), (row, col) = \
                utils.normalize_box_cwh(resized_hw, params.n_grid, box_cwh)
            y[row, col, 0:5] = [1, xc, yc, w, h]
            y[row, col, 5 + single_sign[6]] = 1

            idx += 1

        # 2. add new signs
        else:

            # TO a random position of the selected image
            X_start = np.random.randint(0, image.shape[1] - single_sign[0])
            Y_start = np.random.randint(0, image.shape[0] - single_sign[1])
            toX1, toY1, toX2, toY2 = X_start, Y_start, X_start + fromW, Y_start + fromH
            toH, toH = toY2 - toY1, toX2 - toX1

            # paste
            image[toY1:toY2, toX1:toX2] = sign[fromY1:fromY2, fromX1:fromX2]

            # record the new bounding boxes
            # Note: box_xy is the newly defined bounding boxes
            new_box_xy = [toX1, toY1, toX2, toY2]
            resized_box_xy = \
                utils.resize_box_xy(image.shape[0:2], resized_hw, new_box_xy)
            box_cwh = utils.xy_to_cwh(resized_box_xy)
            (xc, yc, w, h), (row, col) = \
                utils.normalize_box_cwh(resized_hw, params.n_grid, box_cwh)
            y[row, col, 0:5] = [1, xc, yc, w, h]
            y[row, col, 5 + single_sign[6]] = 1

    resized_image = cv2.resize(image,
                               (params.darknet_input, params.darknet_input))
    Y_aug.append(y)
    X_aug.append(resized_image)

    # X_aug, Y_aug = np.array(X_aug), np.array(Y_aug)

    return X_aug, Y_aug
Example #13
0
def fetch_kernel_image(build):
    image_name = get_image_name()
    url = build["download_url"] + image_name
    _fetch("kernel image", url, image_name)