Пример #1
0
def place_image_overlays(image_path,
                         object_attr,
                         save_path,
                         save=False,
                         show=False):
    img = cv2.imread(image_path)

    text = 'Count: {}'.format(len(object_attr))
    font = cv2.FONT_HERSHEY_TRIPLEX
    bottom_left_corner = (20, 80)
    font_scale = 2
    font_color = (0, 0, 0)
    line_type = 2

    cv2.putText(img, text, bottom_left_corner, font, font_scale, font_color,
                line_type)

    for attr in object_attr:
        cv2.rectangle(img, (attr[1], attr[0]), (attr[3], attr[2]),
                      (0, 20, 200), 2)
        cv2.putText(img,
                    str(attr[4]) + ' ' + str(attr[5]) + 'm',
                    (attr[1] - 80, attr[0] - 20), font, font_scale, font_color,
                    line_type)

    if show:
        display_image(img, 'image with markers')
    if save:
        cv2.imwrite(save_path, img)
Пример #2
0
def get_largest_connected_component(image):
    # image = cv2.erode(image, np.ones((2,2), np.uint8), iterations=1)
    # image = cv2.dilate(image,  np.ones((2,2), np.uint8), iterations=1)

    # image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, np.ones((2,2), np.uint8))
    # image = cv2.morphologyEx(image, cv2.MORPH_OPEN, np.ones((2,2), np.uint8))

    # display_image("after erode+dilate", image)
    number_of_components, output, stats, centroids = cv2.connectedComponentsWithStats(image, connectivity=8)
    sizes = stats[:, -1]

    max_label = 1
    max_size = sizes[1]
    for i in range(2, number_of_components):
        if sizes[i] > max_size:
            max_label = i
            max_size = sizes[i]
    print("max label is: ", max_label)
    image2 = np.zeros(output.shape)

    image2[output == max_label] = 255
    image2 = image2.astype(np.uint8)
    display_image("Biggest component", image2)

    return image2
Пример #3
0
def get_start_end_points_sr(image, max_transition_index):

    flag = 0
    image_co = image.copy()
    separation_regions = []
    h, w = image.shape
    sr = [-1, -1]  # white to black --> start
    for j in range(w - 1, -1, -1):  # black to white --> end
        if image[max_transition_index, j] == 255 and flag == 0:
            sr[1] = j
            flag = 1
        elif image[max_transition_index, j] != 255 and flag == 1:
            flag = 0
            sr[0] = j

        if -1 not in sr:
            separation_regions.append(sr)
            sr = [-1, -1]

    # for sr in separation_regions:
        # cv2.line(image_co, (sr[0], 0), (sr[0], h), (255, 255, 255), 1)  # for debugging
        # cv2.line(image_co, (sr[1], 0), (sr[1], h), (255, 255, 255), 1)  # for debugging

    display_image("after ", image_co)
    print(separation_regions)
Пример #4
0
def place_image_overlays(image_path, bboxes, save_path, save=False, show=False):
    img = cv2.imread(image_path)
    for bbox in bboxes:
        cv2.rectangle(img, (bbox[1], bbox[0]), (bbox[3], bbox[2]), (0, 20, 200), 2)

    if show:
        display_image(img, 'image with markers')
    if save:
        cv2.imwrite(save_path, img)
Пример #5
0
 def set_variant(self, name):
     self.current_variant = name
     variant_to_guide = {
         "full": "./images/full_guide.png",
         "size_invariant": "./images/size_guide.png",
         "similar_shapes": "./images/shape_guide.png",
         "binary": "./images/binary_guide.png"
     }
     display_image(variant_to_guide[name], self.frame_1_0, 2, 0, (600, 100))
     self.update_info()
Пример #6
0
def segment_lines(image, directory_name):
    (h, w) = image.shape[:2]
    original_image = image.copy()

    image = cv2.bitwise_not(image)
    display_image("here", image)
    image = cv2.dilate(image, np.ones((3, 3), np.uint8), iterations=1)

    horizontal_projection = get_horizontal_projection(image)

    y, count = 0, 0
    is_space = False
    ycoords = []
    for i in range(h):
        if not is_space:
            if horizontal_projection[i] == 0:
                is_space = True
                count = 1
                y = i

        else:
            if horizontal_projection[i] > 0:
                is_space = False
                ycoords.append(y / count)

            else:
                y += i
                count += 1

    previous_height = 0

    if os.path.exists(directory_name):
        shutil.rmtree(directory_name)

    os.makedirs(directory_name)

    for i in range(len(ycoords)):
        if i == 0:
            continue

        cv2.line(image, (0, int(ycoords[i])), (w, int(ycoords[i])),
                 (255, 255, 255), 2)
        image_cropped = original_image[previous_height:int(ycoords[i]), :]
        previous_height = int(ycoords[i])
        cv2.imwrite(directory_name + "/" + "segment_" + str(i) + ".png",
                    image_cropped)
    display_image("segmented lines", image)

    image_cropped = original_image[previous_height:h, :]
    cv2.imwrite(directory_name + "/" + "segment_" + str(i + 1) + ".png",
                image_cropped)
    print(image.shape)
    cv2.imwrite("segmented_lines.png", image)
    return image
Пример #7
0
    def prediction(self):
        # Input images
        inputs = load_images(glob.glob(self.args.input))
        print(inputs.shape)

        # Compute results
        outputs = predict(model, inputs)
        print("Prediction done!")

        #save the images
        display_image(outputs, inputs)
def main():
    img_name = 'street.png'
    img = cv2.imread(img_name)
    img = cv2.resize(img, dsize=(0, 0), fx=0.5, fy=0.5)
    height, width = img.shape[:2]
    height_threshold = (height // 4) * 3
    lanes = get_lanes(img=img)
    img_lanes, points = draw_lanes(img=img,
                                   lines=lanes,
                                   height=height,
                                   height_threshold=height_threshold)
    display_image(img=img_lanes, name=img_name, color_callback=True)
    def gen_adv_example(self, showImage=False):
        if showImage:
            display_image(self.orig_image, "Orig image")

        image = self.orig_image
        for i in range(self.iters):
            image = self.step(image)

        if showImage:
            display_image(image, "Orig image")

        return image
Пример #10
0
 def display_regions(self, image_np, detected_regions, merged_regions):
     img_cv = image_scipy_to_cv(image_np)
     for region in detected_regions:
         img_cv = cv2.rectangle(
             img_cv, (int(region.x), int(region.y)),
             (int(region.x + region.width), int(region.y + region.height)),
             (0, 255, 0), 2)
     for region in merged_regions:
         img_cv = cv2.rectangle(
             img_cv, (int(region.x), int(region.y)),
             (int(region.x + region.width), int(region.y + region.height)),
             (0, 0, 255), 2)
     display_image(img_cv)
def scan_image(image_np, window_size, window_step_size, face_regions=[], debug=False):
    h, w = image_np.shape
    data = []
    for y in range(0,h-window_size,window_step_size):
        for x in range(0,w-window_size,window_step_size):
            overlap = overlap_with_face(RectangleRegion(x, y, window_size, window_size), face_regions)
            if debug:
                img_cv = image_scipy_to_cv(image_np)
                color = (0, 0, 255) if overlap else (0, 255, 0)
                img_cv = cv2.rectangle(img_cv, (x, y), (x+window_size, y+window_size), color, 2)
                display_image(img_cv)
            if not overlap:
                data.append(image_np[y:y+window_size, x:x+window_size])
    return data
Пример #12
0
def segment_character(image):

    pen_size = get_pen_size(image)
    vertical_projection = get_vertical_projection(image)

    positions = np.where(vertical_projection == pen_size)
    print("pen size is: ", pen_size)
    print("positions is: ", positions[0], sep='\n')
    positions = positions[0]

    count = 0
    consective = False
    length_consective = []
    point_positions = []
    for i in range(1, len(positions)):

        if not consective:
            if positions[i - 1] + 1 == positions[i]:
                count = 1
                consective = True

        else:
            if positions[i - 1] + 1 != positions[i]:
                consective = False
                if (count > (pen_size / 255) * 0.4):
                    length_consective.append(count + 1)
                    point_positions.append(i)

            else:
                count += 1

    print("point positions is", point_positions)
    print("length_consective is", length_consective)
    print("postions is: ", positions)

    segmenataion_points = []
    for i in range(len(length_consective)):
        temp = positions[point_positions[i] - length_consective[i]:point_positions[i]]
        print("final point positions", temp)
        if len(temp) != 0:
            segmenataion_points.append(ceil(sum(temp) / len(temp)))

    print("final seg points", segmenataion_points)

    (h, w) = image.shape
    # for i in segmenataion_points:
    #   cv2.line(image, (i, 0), (i, h), (255, 255, 255), 1)

    # cv2.line(image, (segmenataion_points[-1], 0), (segmenataion_points[-1], h), (255, 255, 255), 1)
    display_image("char seg", image)
Пример #13
0
    def find_contours(self, show=False):
        ret, thresh = cv2.threshold(
            self.img, int(os.getenv('PREDICTION_VALUE_THRESHOLD')), 255, 0)
        # display_image(thresh, 'binary')

        contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE,
                                               cv2.CHAIN_APPROX_SIMPLE)
        # print('\nshape: {}, {}'.format(np.array(contours).shape, np.array(contours[0]).shape))

        if show:
            img_copy = np.copy(self.img)
            cv2.drawContours(img_copy, contours, -1, (255, 255, 255), 3)
            display_image(img_copy, 'contours')

        return contours
Пример #14
0
 def _get_cam_frame(self, display=False, ground_truth=None):
     """Grab an image from the camera (224, 244, 3) to feed into CNN"""
     IMAGE_NOISE_RVARIANCE = Range(0.0, 0.0001)
     cam_img = self.sim.render(
         640, 360, camera_name='camera1'
     )[::-1, :, :]  # Rendered images are upside-down.
     image_noise_variance = sample(IMAGE_NOISE_RVARIANCE)
     cam_img = (skimage.util.random_noise(
         cam_img, mode='gaussian', var=image_noise_variance) * 255).astype(
             np.uint8)
     cam_img = preproc_image(cam_img)
     if display:
         label = str(ground_truth[3:6])
         display_image(cam_img, label)
     return cam_img
Пример #15
0
 def update_visual(self):
     img = ImageBuilder.build(b_type='qtree',
                              data=self.quadra.qtree,
                              im=None,
                              quads=self.quadra.show_quad,
                              resize=True)
     self.image_widget.setImage(display_image(img))
def test_image_bbox(list_path, image_path, show=False):
    # Verified that angle in the ellipseList.txt file is in radians
    # max 1.570796, min -1.570796, mean 0.158146
    face_counts = 0
    all_angles = []
    for img_result in tqdm(FddbImageIterator(list_path, image_path, skip_missing=True)):
        img_cv = img_result.image_cv
        face_counts += len(img_result.faces)
        for face in img_result.faces:
            all_angles.append(face.angle)
            angle_degree = face.angle / math.pi * 180
            img_cv = cv2.ellipse(img_cv, (int(face.center_x), int(face.center_y)), (int(face.major_radius), int(face.minor_radius)), int(angle_degree), 0, 360, (0,0,255), 2)
        if show:
            display_image(img_cv, img_result.image_filename)
    ps = pd.Series(all_angles)
    print(ps.describe())
    print("Total face count: {}".format(face_counts)) # 5171
Пример #17
0
    def update_visual(self):
      if self.current_step == 0:
        text = "Initial Step"
      elif self.current_step == self.total_steps:
        text = "Final Step"
      else:
        text = f"Step {self.current_step} / {self.total_steps}" 

      img = ImageBuilder.build(b_type='pathfinder',data=self.pathfinder.steps[self.current_step] )
      self.image_widget.setImage(display_image(img))
      self.lbl_step.setText(text)
Пример #18
0
    def snapshot(self):
        # Get a frame from the video source
        ret, frame = self.vid.get_frame()

        if ret:
            if _CALIBRATE:
                frame = undistort(frame)
            img_path = "./snapshots/frame-" + time.strftime(
                "%d-%m-%Y-%H-%M-%S") + ".jpg"
            cv2.imwrite(img_path, cv2.cvtColor(frame, cv2.COLOR_RGB2BGR))
            display_image(img_path,
                          self.frame_2_1,
                          1,
                          0,
                          resize_to=self.resize_to_fit)
            btn_snapshot = Tkinter.Button(self.frame_2_1,
                                          text="Segment",
                                          width=10,
                                          command=self.segment_image)
            btn_snapshot.grid(row=2, column=0)
            self.image_path = img_path
Пример #19
0
def get_detection_results(path, debug=False):
    detector = Detector(os.path.join('models', 'strong_classifier_276.pkl'),
                        n_process=10)

    results = DetectionResults()
    for image_file in tqdm(glob.glob(os.path.join(path, "*.jpg"))):
        image_cv = cv2.imread(image_file)
        detected_faces = detector.detect(image_file, debug=False)
        result = DetectionResult(image_file)
        for region in detected_faces:
            result.add_bbox(region)
            if debug:
                image_cv = cv2.rectangle(image_cv,
                                         (int(region.x), int(region.y)),
                                         (int(region.x + region.width),
                                          int(region.y + region.height)),
                                         (0, 255, 0), 2)
        results.add(result)
        if debug:
            display_image(image_cv)

    return results
def generate_positive(list_path, image_path, out_path, show=False):
    """
    Generate positive examples using lists from list_path, using image from img_path,
    and save to out_path
    """
    missing_file_count, face_idx, too_small_count = 0, 0, 0
    for img_result in FddbImageIterator(list_path, image_path):
        if img_result.is_missing:
            missing_file_count += 1
            continue
        
        height, width = img_result.image_np.shape
        for face in img_result.faces:
            x_min, y_min, x_max, y_max = get_rectangle_coordiates(width, height, face.major_radius, face.minor_radius, face.angle, face.center_x, face.center_y, square=True)

            if show:
                img_cv = cv2.rectangle(img_result.image_cv, (x_min, y_min), (x_max, y_max), (0, 255, 0), 2)
                img_cv = cv2.ellipse(img_cv, (int(face.center_x), int(face.center_y)), (int(face.major_radius), int(face.minor_radius)), int(face.angle / math.pi * 180), 0, 360, (0,0,255), 2)
                print(face.major_radius, face.minor_radius, face.angle, face.center_x, face.center_y)
                print(width, height)
                print(x_min, y_min, x_max, y_max)

            if (x_max - x_min) < 24:
                too_small_count += 1
                continue

            try:
                base, ext = os.path.splitext(os.path.basename(img_result.image_filename))
                out_image_name = base + "-" + str(face_idx) + ".jpg"
                face_idx += 1
                scipy.misc.imsave(os.path.join(out_path, out_image_name), img_result.image_np[y_min:y_max, x_min:x_max])
                print('Face image %s generated.' % out_image_name)
            except Exception as ex:
                print(ex)

        if show:
            display_image(img_cv, img_result.image_filename)
    print('Face generation done with %d faces generated. %d files missing. %d too small images' % (face_idx, missing_file_count, too_small_count))
Пример #21
0
 def display_results(self, image_path, inference_dir):
     seg_img_path = (inference_graph.get_prediction_format() %
                     os.path.join(inference_dir,
                                  image_path.split('/')[-1].split('.')[0]) +
                     ".png")
     display_image(seg_img_path,
                   self.frame_3_0,
                   1,
                   0,
                   resize_to=self.resize_to_fit)
     image = cv2.imread(image_path)
     mask = cv2.imread(seg_img_path)
     alpha = 0.6
     cv2.addWeighted(mask, alpha, image, 1 - alpha, 0, image)
     overlay_save_path = os.path.join(
         inference_dir,
         image_path.split('/')[-1].split('_')[0] + "_overlay.png")
     cv2.imwrite(overlay_save_path, image)
     display_image(overlay_save_path,
                   self.frame_3_1,
                   1,
                   0,
                   resize_to=self.resize_to_fit)
def generate_negative_from_face_dataset(list_path, image_path, out_path, show=False, random_file_ratio=0.3, random_crop_ratio=0.9, max_count=3000):
    neg_idx = 0
    file_count = 0
    for img_result in FddbImageIterator(list_path, image_path, skip_missing=True):
        if random.random() > random_file_ratio:
            continue
        
        print("Reading file %s" % img_result.image_filename)
        height, width = img_result.image_np.shape
        face_regions = []
        for face in img_result.faces:
            x_min, y_min, x_max, y_max = get_rectangle_coordiates(width, height, face.major_radius, face.minor_radius, face.angle, face.center_x, face.center_y, square=True, square_mode="max")
            region = RectangleRegion(x_min, y_min, x_max - x_min, y_max - y_min)
            face_regions.append(region)

        data = scan_image_with_scale(img_result.image_np, window_size=50, window_step_size=60, min_size=0.0, max_size=1.0, scale_step=0.5, face_regions=face_regions, debug=show)

        if show:
            image_cv = image_scipy_to_cv(img_result.image_np)
            display_image(image_cv)

        counter = 0
        for neg_np in data:
            if random.random() > random_crop_ratio:
                continue
            counter += 1
            base, ext = os.path.splitext(os.path.basename(img_result.image_filename))
            out_image_name = os.path.join(out_path, base + "-" + str(neg_idx) + ".jpg")
            neg_idx += 1
            #neg_np_scale = scipy.misc.imresize(neg_np, size=(24, 24), mode='F')
            scipy.misc.imsave(out_image_name, neg_np)
        print("Generated %d/%d images" % (counter, len(data)))
        if counter > 0:
            file_count += 1
        if neg_idx > max_count:
            break
    print("Generated %d negative images altogether from %d files" % (neg_idx, file_count))
def CW_attack_fast(img_0,
                   mean_cat_attack,
                   cov_cat_attack,
                   pi_cat_attack,
                   mean_grass_attack,
                   cov_grass_attack,
                   pi_grass_attack,
                   mean_cat_defense,
                   cov_cat_defense,
                   pi_cat_defense,
                   mean_grass_defense,
                   cov_grass_defense,
                   pi_grass_defense,
                   original_img,
                   truth,
                   l=5,
                   target_index=1,
                   stride=8,
                   alpha=0.0001,
                   display_iter=300,
                   title='',
                   path='./Outputs',
                   preprocessing=[None, None],
                   attack_type='blackbox'):
    iter_num = 0
    parallel_img_0 = parallel(img_0, stride=stride)
    img_k = img_0
    W_cat, w_cat, w_0_cat = get_parameters(mean_cat_attack, cov_cat_attack,
                                           pi_cat_attack)
    W_grass, w_grass, w_0_grass = get_parameters(mean_grass_attack,
                                                 cov_grass_attack,
                                                 pi_grass_attack)

    while iter_num < 300:
        iter_num += 1
        parallel_img_k = parallel(img_k, stride=stride)
        if attack_type == 'whitebox' and preprocessing[0] != None:
            parallel_img_k = preprocessing[0].forward(parallel_img_k)
            parallel_img_0 = preprocessing[0].forward(parallel_img_0)

        current_grad = gradient_CW(patch_vec_k=parallel_img_k,
                                   patch_vec_0=parallel_img_0,
                                   mean_cat=mean_cat_attack,
                                   cov_cat=cov_cat_attack,
                                   pi_cat=pi_cat_attack,
                                   mean_grass=mean_grass_attack,
                                   cov_grass=cov_grass_attack,
                                   pi_grass=pi_grass_attack,
                                   W_cat=W_cat,
                                   w_cat=w_cat,
                                   w_0_cat=w_0_cat,
                                   W_grass=W_grass,
                                   w_grass=w_grass,
                                   w_0_grass=w_0_grass,
                                   l=l,
                                   target_index=target_index)
        grad = unparallel_grad(current_grad, img_0, stride=stride)
        img_k_1 = np.clip(img_k - alpha * grad, 0, 1)
        change = np.linalg.norm((img_k_1 - img_k))
        img_k = img_k_1

        if (iter_num) % display_iter == 0:
            print("\n")
            display_image(img_perturbed=img_k_1,
                          mean_cat=mean_cat_defense,
                          cov_cat=cov_cat_defense,
                          pi_cat=pi_cat_defense,
                          mean_grass=mean_grass_defense,
                          cov_grass=cov_grass_defense,
                          pi_grass=pi_grass_defense,
                          original_img=original_img,
                          truth=truth,
                          title=title + 'iter_' + str(iter_num),
                          stride=stride,
                          preprocessing=preprocessing[1],
                          path=path)

            print(' Change:{}'.format(change))
        if change < 0.001 and stride == 8:
            print("\n\nMax Iteration:" + str(iter_num))
            break
        elif change < 0.01 and stride == 1:
            print("\n\nMax Iteration:" + str(iter_num))
            break

    return img_k_1
def CW_attack(img_0,
              mean_cat,
              cov_cat,
              pi_cat,
              mean_grass,
              cov_grass,
              pi_grass,
              original_img,
              truth,
              l=5,
              target_index=1,
              stride=8,
              alpha=0.0001,
              display_iter=300,
              title='',
              preprocessing=None):
    iter_num = 0
    img_perturbed_k = np.copy(img_0)
    img_perturbed_k_1 = np.copy(img_0)
    W_cat, w_cat, w_0_cat = get_parameters(mean_cat, cov_cat, pi_cat)
    W_grass, w_grass, w_0_grass = get_parameters(mean_grass, cov_grass,
                                                 pi_grass)
    while iter_num < 300:
        iter_num += 1
        grad = np.zeros_like(img_0)
        for i in range(4, img_0.shape[0] - 4,
                       stride):  #loop starting form zero to center the output
            for j in range(
                    4, img_0.shape[1] - 4,
                    stride):  #loop starting form zero to center the output
                patch_vec_0 = img_0[i - 4:i + 4, j - 4:j + 4].reshape((64, 1))
                patch_vec_k = img_perturbed_k[i - 4:i + 4,
                                              j - 4:j + 4].reshape((64, 1))
                grad[i - 4:i + 4, j - 4:j + 4] += gradient_CW(
                    patch_vec_k=patch_vec_k,
                    patch_vec_0=patch_vec_0,
                    mean_cat=mean_cat,
                    cov_cat=cov_cat,
                    pi_cat=pi_cat,
                    mean_grass=mean_grass,
                    cov_grass=cov_grass,
                    pi_grass=pi_grass,
                    W_cat=W_cat,
                    w_cat=w_cat,
                    w_0_cat=w_0_cat,
                    W_grass=W_grass,
                    w_grass=w_grass,
                    w_0_grass=w_0_grass,
                    l=l,
                    target_index=target_index).reshape((8, 8))

        img_perturbed_k_1 = np.clip(img_perturbed_k - alpha * grad, 0, 1)
        change = np.linalg.norm((img_perturbed_k_1 - img_perturbed_k))
        img_perturbed_k = img_perturbed_k_1
        if (iter_num) % display_iter == 0:
            print("\n")
            display_image(img_perturbed=img_perturbed_k,
                          mean_cat=mean_cat,
                          cov_cat=cov_cat,
                          pi_cat=pi_cat,
                          mean_grass=mean_grass,
                          cov_grass=cov_grass,
                          pi_grass=pi_grass,
                          original_img=original_img,
                          truth=truth,
                          title=title + 'iter_' + str(iter_num),
                          stride=stride,
                          preprocessing=preprocessing)
            print(' Change:{}'.format(change))
        if change < 0.001 and stride == 8:
            break
        elif change < 0.01 and stride == 1:
            break
    return img_perturbed_k
Пример #25
0
model.summary()

# We need to do this to keep Keras happy
model.compile(loss=keras.losses.categorical_crossentropy, optimizer=keras.optimizers.Adam(), metrics=['accuracy'])

# Predicting labels and evaluating the model on the test set
predicted_labels = model.predict(test_features)
score = model.evaluate(test_features, to_categorical(test_labels))
print('Test loss:', score[0])
print('Test accuracy:', score[1])

# Showing some random images
for i in range(NUM_IMAGES_RANDOM):
    index = random.randint(0, test_features.shape[0])
    display_image(test_features[index],
                  'Example: %d. Expected Label: %d. Predicted Label: %d.' %
                  (index, test_labels[index], np.argmax(predicted_labels[index, :])))
    plt.savefig('test_image_%d.png' % index, format='png')


# Showing some images where LeNet-5 misclassified the digit
count = 0
for i in range(test_features.shape[0]):
    if count == NUM_IMAGES_MISCLASSIFICATION:
        break
    if np.argmax(predicted_labels[i, :]) != test_labels[i]:
        display_image(test_features[i],
                      'Example: %d. Expected Label: %d. Predicted Label: %d.' %
                      (i, test_labels[i], np.argmax(predicted_labels[i, :])))
        plt.savefig('misclassified_image_%d.png' % i, format='png')
        count += 1
Пример #26
0
device = (torch.device('cuda:0')
          if torch.cuda.is_available() else torch.device('cpu'))
print(device)

img_path = os.path.join(root_dir, 'good', '000.png')
img = Image.open(img_path)
#display_image(img)

# dataset
t = T.Compose([T.ToTensor()])

grid_dataset = MyDataset(os.path.join(root_dir, 'train', 'augmented'),
                         transforms=t)

img = grid_dataset[0]
display_image(img, save_image=True, name=os.path.join(results_dir + 'in.png'))

# model
model = Model()
in_img = img.unsqueeze(0)
out_img = model(in_img)
out_img = out_img.squeeze(0)
display_image(out_img,
              save_image=True,
              name=os.path.join(results_dir + 'out.png'))

# optimizer
learning_rate = 0.001
optimizer = optim.Adam(model.parameters(), lr=learning_rate)

# loss function
from utils import read_mnist, display_image
import random
import matplotlib.pyplot as plt

NUM_IMAGES = 10

# Loading training and test datasets
train_features, train_labels = read_mnist('train-images-idx3-ubyte.gz',
                                          'train-labels-idx1-ubyte.gz')
test_features, test_labels = read_mnist('t10k-images-idx3-ubyte.gz',
                                        't10k-labels-idx1-ubyte.gz')

print('# of training images:', train_features.shape[0])
print('# of test images:', test_features.shape[0])

print('Training dataset shape:', train_features.shape)
print('Test dataset shape:', test_features.shape)

# Showing some random images
for i in range(NUM_IMAGES):
    index = random.randint(0, train_features.shape[0])
    display_image(train_features[index],
                  'Example: %d. Label: %d' % (index, train_labels[index]))

plt.show()
Пример #28
0
    print("Inference without Adversarial Training")
    mean_cat_attack = mean_cat
    cov_cat_attack = cov_cat
    pi_cat_attack = pi_cat
    mean_grass_attack = mean_grass
    cov_grass_attack = cov_grass
    pi_grass_attack = pi_grass
    #Inference
    display_image(img_perturbed=Y,
                  mean_cat=mean_cat_attack,
                  cov_cat=cov_cat_attack,
                  pi_cat=pi_cat_attack,
                  mean_grass=mean_grass_attack,
                  cov_grass=cov_grass_attack,
                  pi_grass=pi_grass_attack,
                  original_img=Y,
                  truth=truth,
                  title="NonAttackNonDefense",
                  stride=args.stride,
                  path="./Outputs/Defense/AdversarialTraining/" + args.train +
                  '/' + args.attack_type + '/',
                  save=True,
                  infer=True)
else:
    raise ValueError

#Inference
print("Inference with Adversarial Training")
display_image(img_perturbed=Y,
              mean_cat=mean_cat_defense,
              cov_cat=cov_cat_defense,
Пример #29
0
#computing the parameters for gaussian classifier (Training)
mean_cat, cov_cat, pi_cat = mean_cov(train_cat, train_grass)
mean_grass, cov_grass, pi_grass = mean_cov(train_grass, train_cat)

#overlapping
stride = 1

#Inference
display_image(img_perturbed=Y,
              mean_cat=mean_cat,
              cov_cat=cov_cat,
              pi_cat=pi_cat,
              mean_grass=mean_grass,
              cov_grass=cov_grass,
              pi_grass=pi_grass,
              original_img=Y,
              truth=truth,
              title="NonAttackNonOverlap",
              stride=stride,
              save=False,
              infer=True)

### Analysis for Alpha variation
display = [5, 5, 2]
alpha = [0.0001, 0.0002, 0.0004]
for i in range(len(display)):
    a = alpha[i]
    disp = display[i]

    img_perturbed = PGD_attack_fast(img_0=Y,
Пример #30
0
#
img1 = io.read_mhd_and_raw("E:/git/pytorch/vae/results/artificial/hole/z_6/B_0.1/batch128/L_60000/gen/ori/0001.mhd")
# img2 = io.read_mhd_and_raw("E:/git/pytorch/vae/results/artificial/tip/z_24/B_0.1/L_0/gen/rec/0000.mhd")
# # "E:/git/pca/output/CT/patch/z24/EUDT/recon_104.mhd"
#
# img1 = (img1 - np.min(img1))/ (np.max(img1) - np.min(img1))
# img3 = abs(img1 -img2)
# print(img3)
# print(np.max(img3))
# print(np.min(img3))

# ori=np.reshape(img1, [9,9,9])
# preds=np.reshape(img2, [9,9,9])
#
# # plot reconstruction

# utils.display_image(img1, img2, img3, 9, outdir1)
# utils.display_image2(img1, img2, 9, outdir2)

img = io.read_mhd_and_raw("E:/git/pytorch/vae/input/hole0/std/0000.mhd")
footprint = np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]],
                     [[0, 1, 0], [1, 1, 1], [0, 1, 0]],
                     [[0, 0, 0], [0, 1, 0], [0, 0, 0]]])
result = ndimage.minimum_filter(img, footprint=footprint)
th = 0.5
img = (img > th) * 1
result = (result > th) * 1


utils.display_image(img, 9)
utils.display_image(result, 9)