Exemple #1
0
def perform_pca(images: list, expl_var: float = 0.75) -> None:
    images = [img[:, :, 1] for img in images]
    images = [
        cv2.normalize(img.astype('float64'),
                      None,
                      alpha=0,
                      beta=1.0,
                      norm_type=cv2.NORM_MINMAX) for img in images
    ]
    h, w = images[0].shape
    images = [img.flatten() for img in images]
    train_data = np.vstack(images)

    print(f'INFO> Train data specs: {train_data.shape}, {train_data.nbytes}')

    #mean, eigenvectors = cv2.PCACompute(train_data,  mean=np.array([]), maxComponents=10)
    pca = PCA(n_components=expl_var)
    pca.fit(train_data)
    print(
        f'INFO> Found {len(pca.explained_variance_ratio_)} eigenvectors, explaining {expl_var} of the data variance'
    )

    for i, ev in enumerate(pca.components_):
        show_image(
            float2gray(ev.reshape(h, w, 1)),
            f'Eigenvector {i} - weight: {pca.explained_variance_ratio_[i]}')
Exemple #2
0
def pastImage(scaleList, scaleRange, Imgdir, Annodir, Xmldir, maxNum, show):
    boxlist = [[], [], [], []]
    classlist = [[], [], [], []]
    img_list = []
    # weight = float(1000)/sum(scaleRange)
    weight = 1
    for j in tqdm(range(maxNum)):
        img = np.zeros((500, 500, 3))
        beginImg = np.random.randint(4)
        for i in range(4):
            pastImg, y, x, boxlist[i], classlist[i] = getImg(
                scaleList, i, Imgdir, Annodir, j)
            xbegin = hy[beginImg][str(i)][1]
            ybegin = hy[beginImg][str(i)][0]
            img[ybegin:ybegin + y, xbegin:xbegin + x, :] = pastImg
            # b ox according shift and enlarge
            for box in boxlist[i]:
                box[0] = int((box[0] + xbegin) * weight)
                box[2] = int((box[2] + xbegin) * weight)
                box[1] = int((box[1] + ybegin) * weight)
                box[3] = int((box[3] + ybegin) * weight)

        # drawBox
        if show:
            MosaicImg = drawBox(img, boxlist)
            utils.show_image(MosaicImg / 255.0)
        image_name = "mosaic_" + str(j) + ".jpg"

        cv2.imwrite(args.AugImg + image_name, img.astype(np.uint8))
        make_xml(img, boxlist, classlist, image_name, Xmldir)
        img_list.append("mosaic_" + str(j))
    with open(args.Setdir + '/mosaic.txt', 'w') as f:
        for line in img_list:
            f.writelines(line + '\n')
Exemple #3
0
def mask(img, xdim, ydim):

    utils.plot_histogram(img)

    [B, G, R] = cv.split(img)
    blue = B.astype(float)
    green = G.astype(float)
    red = R.astype(float)

    meanR = np.mean(red)
    stdR = np.std(red)
    print meanR + 1.6 * stdR
    meanB = np.mean(blue)
    stdB = np.std(blue)
    print meanB + 1.1 * stdB

    mode_pixel = utils.get_mode(img, xdim, ydim)

    # separate into roads and houses
    for i in xrange(xdim):
        for j in xrange(ydim):
            # road: red value is at least 2 std above the mean
            if red[i, j] > meanR + 1.6 * stdR:  # red[i,j] > 180
                img[i, j] = mode_pixel
            # houses: blue value is at least 1 std above the mean
            if blue[i, j] > meanB + 1.1 * stdB:  # 182: #and blue[i,j] <= 238:
                img[i, j] = (0, 0, 0)

    utils.show_image(img, 'mask')

    return img
def run():
    image = load_image(path='./C001R_Cut/C001R04.jpg')
    show_image(image)

    show_channels(image, show_hsv_channels=False)

    #divide_smoothed_green_channel(image)

    #image_cl = enhance_contrast_image(image, clip_limit=4, tile_size=16)
    image_cl_blur_1 = enhance_contrast_image(cv2.GaussianBlur(
        image, (7, 7), 0),
                                             clip_limit=3,
                                             tile_size=8)
    image_cl_blur_2 = enhance_contrast_image(cv2.GaussianBlur(
        image, (7, 7), 0),
                                             clip_limit=3,
                                             tile_size=16)
    image_cl_blur_3 = enhance_contrast_image(cv2.GaussianBlur(
        image, (7, 7), 0),
                                             clip_limit=3,
                                             tile_size=24)

    plot_historgram_one_channel(image[:, :, 1])
    plot_historgram_one_channel(clahe_green_channel(image, clip_limit=5.0))

    show_image_row(
        [image[:, :, 1],
         clahe_green_channel(image, clip_limit=5.0)])
Exemple #5
0
def group_blobs(image):
	new_image = np.zeros(image.shape)
	print("---> Running blob Grouping")

	linked = {}

	global group_counter

	# Forward pass
	for x in range(1, image.shape[0] - 1):
		for y in range(1, image.shape[1] - 1):
			if image[x,y] != 0:
				if np.sum(new_image[x-1:x+2, y-1:y+2]) == 0:
					linked[group_counter] = set([group_counter])
					new_image[x,y] = group_counter
					group_counter = group_counter + 1
				else:
					L = new_image[x-1:x+2, y-1:y+2].flatten()
					L = L[L > 1]
					new_image[x,y] = min(L)
					for label in L:
						linked[label] = linked[label] | set(L)
	utils.show_image(new_image)

	for x in range(1, image.shape[0] - 1):
		for y in range(1, image.shape[1] - 1):
			if new_image[x,y] != 0:
				new_image[x,y] = sorted(linked[new_image[x,y]])[0]

	class_list = np.array(list(set(new_image.flatten())))
	class_list = class_list[class_list != 0]
	return new_image, class_list
Exemple #6
0
    def make_chip(self, sample, imgset):
        image = cv2.imread(sample['image'])
        height, width = sample['height'], sample['width']
        img_id = osp.splitext(osp.basename(sample['image']))[0]

        mask_path = osp.join(self.segmentation_dir, '{}.hdf5'.format(img_id))
        with h5py.File(mask_path, 'r') as hf:
            mask = np.array(hf['label'])
        mask_h, mask_w = mask.shape[:2]

        # make chip
        region_box, contours = utils.generate_box_from_mask(mask)
        region_box = utils.region_postprocess(region_box, contours,
                                              (mask_w, mask_h))
        region_box = utils.resize_box(region_box, (mask_w, mask_h),
                                      (width, height))
        region_box = utils.generate_crop_region(region_box, (width, height))

        if args.show:
            utils.show_image(image, region_box)
        # if imgset == 'train':
        #     region_box = np.vstack((region_box, np.array([0, 0, width-1, height-1])))

        gt_bboxes, gt_cls = sample['bboxes'], sample['cls']

        chip_gt_list, chip_label_list, neglect_list = self.generate_region_gt(
            region_box, gt_bboxes, gt_cls)
        chip_loc = self.write_chip_and_anno(image, img_id, imgset, region_box,
                                            chip_gt_list, chip_label_list,
                                            neglect_list)

        return chip_loc
Exemple #7
0
 def single_attack(image, label, target=None):
     show_image(image.numpy())
     print("Original label: ", label)
     print("Predicted label: ", predict(model, image))
     if target == None:
         adversarial = attack_untargeted(model,
                                         dataset,
                                         image,
                                         label,
                                         alpha=alpha,
                                         beta=beta,
                                         iterations=1000)
     else:
         print("Targeted attack: %d" % target)
         adversarial = attack_targeted(model,
                                       dataset,
                                       image,
                                       label,
                                       target,
                                       alpha=alpha,
                                       beta=beta,
                                       iterations=1000)
     show_image(adversarial.numpy())
     print("Predicted label for adversarial example: ",
           predict(model, adversarial))
     return torch.norm(adversarial - image)
Exemple #8
0
    def compute_distance_map(image):
        new_image = ChamferDistance.init_map(image)

        # Forward pass
        for i in range(image.shape[0] - 1):
            for j in range(image.shape[1] - 1):
                temp_matrix = new_image[i:i + 2, j:j + 2]

                flatten_smallest_pos = np.argmin(temp_matrix)
                smallest_value = np.amin(temp_matrix)
                smallest_value_coord = ((int)(
                    flatten_smallest_pos / image.shape[1]), (int)(
                        flatten_smallest_pos % image.shape[1]))

                temp_matrix[temp_matrix > smallest_value] = smallest_value + 1

        # Backward pass
        for i in range(image.shape[0] - 1, 0, -1):
            for j in range(image.shape[1] - 1, 0, -1):
                temp_matrix = new_image[i:i + 2, j:j + 2]

                flatten_smallest_pos = np.argmin(temp_matrix)
                smallest_value = np.amin(temp_matrix)
                smallest_value_coord = ((int)(
                    flatten_smallest_pos / image.shape[1]), (int)(
                        flatten_smallest_pos % image.shape[1]))

                temp_matrix[temp_matrix > smallest_value] = smallest_value + 1

        utils.show_image(new_image)
        return new_image
Exemple #9
0
def main():
    print("Image:")
    image = input("")

    features = finding_face_landmark.finding_face_landmark(image)
    if (len(features) == 0):
        exit(0)

    data_file_name = "features.csv"
    X, Y, Q = utils.get_data(data_file_name, 2000)

    x_min, x_max = utils.get_min_max(X)
    X = utils.normalize_features(x_min, x_max, X)

    test_file_name = "test.csv"
    T, P, L = utils.get_data_test(test_file_name, x_min, x_max, len(X), Q, Y)

    model_file_name = './my_test_model.ckpt'
    neural_network = n.Neural_Network(X, Y, model_file_name)
    # neural_network.training()
    # neural_network.test(T,P)

    features = utils.normalize_features(x_min, x_max, features)

    predict = neural_network.predict([features])
    image_path = Q[predict][0].strip()

    metadata = 'C:\\ProjekatSoft\\wiki_crop\\wiki.mat'
    name = utils.get_name(image_path, metadata)

    percent = utils.get_percent(features, X[predict:predict + 1, :15][0])
    utils.show_image('C:\\ProjekatSoft\\wiki_crop\\' + image_path, name,
                     percent)
Exemple #10
0
def of_dataset(folder="testset", model=None, view=False):
    '''measure the error across the given dataset,
    it compares the measured points with the annotated ground truth,
    optionally you can [view] the results'''
    assert (model)

    # load face and landmark detectors
    utils.load_shape_predictor(model)
    # utils.init_face_detector(True, 150)

    # init average-error
    err = 0
    num = 0

    for img, lmarks, path in utils.ibug_dataset(folder):
        # detections
        face = utils.prominent_face(utils.detect_faces(img, detector="dlib"))
        measured = utils.detect_landmarks(img, face)

        # get error
        num += 1
        err += normalized_root_mean_square(lmarks, measured)

        # results:
        if view is True:
            utils.draw_rect(img, face, color=Colors.yellow)
            utils.draw_points(img, lmarks, color=Colors.green)
            utils.draw_points(img, measured, color=Colors.red)
            utils.show_image(utils.show_properly(utils.crop_image(img, face)))

    print(err, num, err / num)
    print("average NRMS Error for {} is {}".format(folder, err / num))
Exemple #11
0
    def make_chip(self, sample, imgset):
        # get image and mask informations
        image = cv2.imread(sample['image'])
        height, width = sample['height'], sample['width']
        img_id = osp.splitext(osp.basename(sample['image']))[0]
        mask_path = osp.join(self.segmentation_dir, '{}.hdf5'.format(img_id))
        with h5py.File(mask_path, 'r') as hf:
            mask = np.array(hf['label'])
        mask_h, mask_w = mask.shape[:2]

        # make tiling
        if args.tiling:
            tiling = utils.add_tiling((width, height))
            # for pattern in tiling:
            #     if utils.iou_calc1(pattern, region_box).max() < 0.85:
            #         region_box = np.vstack((region_box, tiling))
            region_box = tiling
            # region_box = np.vstack((tiling, [0, 0, width, height]))

        if args.show:
            utils.show_image(image, np.array(region_box))

        # get box and class
        gt_bboxes, gt_cls = sample['bboxes'], sample['cls']

        # generate chip annotations and writer chip image
        chip_gt_list, chip_label_list, neglect_list = self.generate_region_gt(
            region_box, gt_bboxes, gt_cls)
        chip_loc = self.write_chip_and_anno(
            image, img_id, region_box, chip_gt_list, chip_label_list,
            neglect_list, imgset)

        return chip_loc
Exemple #12
0
def mask(img, xdim, ydim):

  utils.plot_histogram(img)

  [B,G,R] = cv.split(img)
  blue = B.astype(float)
  green = G.astype(float)
  red = R.astype(float)

  meanR = np.mean(red)
  stdR = np.std(red)
  print meanR + 1.6 * stdR
  meanB = np.mean(blue)
  stdB = np.std(blue)
  print meanB + 1.1 * stdB

  mode_pixel = utils.get_mode(img, xdim, ydim)

  # separate into roads and houses
  for i in xrange(xdim):
    for j in xrange(ydim):
      # road: red value is at least 2 std above the mean
      if red[i,j] > meanR + 1.6 * stdR: # red[i,j] > 180
        img[i,j] = mode_pixel
      # houses: blue value is at least 1 std above the mean
      if blue[i,j] > meanB + 1.1 * stdB: # 182: #and blue[i,j] <= 238:
        img[i,j] = (0,0,0)

  utils.show_image(img, 'mask')

  return img
Exemple #13
0
    def make_chip(self, img_name):
        image = cv2.imread(osp.join(self.img_dir, img_name))
        height, width = image.shape[:2]
        img_id = osp.splitext(osp.basename(img_name))[0]
        # mask_path = ""
        mask_path = osp.join(self.mask_dir, '{}.hdf5'.format(img_id))
        with h5py.File(mask_path, 'r') as hf:
            mask = np.array(hf['label'])
        mask_h, mask_w = mask.shape[:2]

        # make chip
        region_box, contours = utils.generate_box_from_mask(mask)
        region_box = utils.region_postprocess(region_box, contours,
                                              (mask_w, mask_h))
        region_box = utils.resize_box(region_box, (mask_w, mask_h),
                                      (width, height))
        region_box = utils.generate_crop_region(region_box, (width, height))
        try:
            region_box = np.vstack(
                (region_box, np.array([0, 0, width - 1, height - 1])))
        except:
            print("empty box")

        if args.show:
            utils.show_image(image, region_box)

        chip_loc = self.write_chip_and_anno(image, img_id, region_box)

        return len(region_box), chip_loc
Exemple #14
0
    def _train_on_image(self, image, roi, learning_rate):
        if self._debug_mode:
            print("Training on original image")
        A, B = self._train_on_single_transform(image, roi)

        for i in range(1, self._transforms_number):
            if self._debug_mode:
                print("Training on transform {}".format(i))
            affine_transform_matrix = get_random_transform_mat(roi)
            transformed_image = cv2.warpAffine(image, affine_transform_matrix, (image.shape[1], image.shape[0]))
            Ai, Bi = self._train_on_single_transform(transformed_image, roi)
            A += Ai
            B += Bi

        if learning_rate >= 1.0:
            self._A = A
            self._B = B
        else:
            self._A = learning_rate*A + (1 - learning_rate)*self._A
            self._B = learning_rate*B + (1 - learning_rate)*self._B

        self._W_filter_weights_freq = np.divide(self._A, self._B + self._epsilon)

        if self._debug_mode:
            w_filter_weights = np.fft.ifft2(self._W_filter_weights_freq).real
            show_image(normalize(w_filter_weights), "w", wait=False,
                    size=(4*w_filter_weights.shape[1], 4*w_filter_weights.shape[0]))
            show_freq_image(self._W_filter_weights_freq, "W", wait=False,
                    size=(4*self._W_filter_weights_freq.shape[1], 4*self._W_filter_weights_freq.shape[0]))
Exemple #15
0
    def make_chip(self, sample, imgset):
        image = cv2.imread(sample['image'])
        height, width = sample['height'], sample['width']
        img_id = osp.splitext(osp.basename(sample['image']))[0]

        # 生成马赛克候选区域
        region_box = np.zeros((0, 4))
        for i, mscale in enumerate(args.mosaic_scales):
            region_box = np.vstack((region_box,
                                    self.add_mosaic((width, height), mscale,
                                                    args.strides[i])))
        region_box = region_box.astype(np.int)

        if args.show:
            utils.show_image(image, np.array(region_box))

        gt_bboxes, gt_cls = sample['bboxes'], sample['cls']

        chip_gt_list, chip_label_list, neglect_list = self.generate_region_gt(
            region_box, gt_bboxes, gt_cls)

        chip_loc = self.write_chip_and_anno(image, img_id, region_box,
                                            chip_gt_list, chip_label_list,
                                            neglect_list, imgset)

        return chip_loc
Exemple #16
0
    def make_chip(self, img_name):
        image = cv2.imread(osp.join(self.img_dir, img_name))
        height, width = image.shape[:2]
        img_id = osp.splitext(osp.basename(img_name))[0]
        # mask_path = ""
        mask_path = osp.join(self.mask_dir, '{}.hdf5'.format(img_id))
        with h5py.File(mask_path, 'r') as hf:
            mask = np.array(hf['label'])
        mask_h, mask_w = mask.shape[:2]

        # make chip
        # region_box, contours = utils.generate_box_from_mask(mask)
        # # utils.show_image(mask, np.array(region_box))
        # region_box = utils.generate_crop_region(region_box, mask, (mask_w, mask_h), (width, height), self.gbm)
        # # utils.show_image(mask, np.array(region_box))
        # region_box = utils.resize_box(region_box, (mask_w, mask_h), (width, height))

        # if len(region_box) == 0:
        #     region_box = np.array([[0, 0, width, height]])

        region_box = utils.add_tiling((width, height))

        if args.show:
            utils.show_image(image, region_box)

        chip_loc = self.write_chip_and_anno(image, img_id, region_box)

        return chip_loc
Exemple #17
0
def test_blob(image):
    regions = Region.find_regions_by_border_color(image)
    for i, region in enumerate(regions):
        region_mask = region.mask_image(image)
        blobs = region.find_blobs(image)
        Blob.draw_outlines(blobs, region_mask, (234, 34, 102))         
        utils.show_image(region_mask, time=1000)
Exemple #18
0
 def _lower_diff_at_edges(self, edges_mask):
     self.diff_image[
         edges_mask] = self.diff_image[edges_mask] * self.JOINT_EDGES_FACTOR
     if self.debug:
         show_image(self.diff_image, 'diff image lower joint edges')
         self._save_image(np.copy(self.diff_image),
                          'Diff image, lowered at edges')
Exemple #19
0
    def validate(self, model, criterion, metric, valid_loader):
        # setting model evaluate mode, takes care of batch norm, dropout etc. not required while validation
        model.eval()
        valid_loss = 0
        correct = 0
        metric_value = 0
        with torch.no_grad():
            for batch_idx, data in enumerate(valid_loader):
                data['i1'] = data['i1'].to(self.device)
                data['i2'] = data['i2'].to(self.device)
                # data['o1'] = data['o1'].to(self.device)
                data['o1'] = data['o1'].to(self.device)

                output = model(data['i1'], data['i2'])
                loss = criterion(output, data['o1'])
                valid_loss += loss.item(
                )  # loss.view(loss.shape[0], -1).sum(1).mean().item()
                if metric:
                    metric_value += metric(output,
                                           data['o1']).cpu().detach().numpy()
        metric_value /= len(valid_loader)
        valid_loss /= len(valid_loader)
        print("Some target vs predicted samples:")
        show_image(data['o1'][::4].cpu(), n_row=8, title='Target (validation)')
        show_image(output[::4].cpu(), n_row=8, title='Predicted (validation)')
        print("Average Validation loss: {}\t Average Metric: {}".format(
            valid_loss, metric_value))
Exemple #20
0
 def set_perception(self, action, reward, nextState, done):
     self.global_steps += 1
     nextState = self.preprocess(nextState)
     current_state = list(self.current_state.copy())
     
     memory = {'state': current_state,
               'action': int(action),
               'reward': reward,
               'nextState': nextState,
               'done': done}
     
     if self.render:
         print('reward', reward, '\naction', action)
         show_image(nextState)
         time.sleep(0.5)
     
     # add next state
     self.current_state.append(nextState)
     #SAVE memory
     self.replayMemory.save_memory(memory)
     
     # train network
     if self.global_steps >= self.warmup_steps:
         if self.global_steps%self.replay_periode == 0:
             self.train_network()
             # save and update network        
             if self.training_steps%100000 == 0:
                 self.save_network()
                 self.replayMemory.save_parameters(self)
             if self.training_steps%self.update_periode == 0:
                 self.update_target_network()
Exemple #21
0
def test_classifier(classifier):
    for part in ('train', 'val', 'test'):
        input_images = dataset.fetch_smallbatch_from_celeba('CelebA',
                                                            part=part)
        labels = classifier.predict(input_images)
        for image, label in zip(input_images, labels):
            show_image(image)
            print(label)
Exemple #22
0
    def __call__(self):
        # get val predict box
        with open(hyp['local'], 'r') as f:
            chip_loc = json.load(f)
        detecions = dict()
        scales = [1024, 1300, 1500]
        for scale in scales:
            with open(hyp['result'].format(scale), 'r') as f:
                results = json.load(f)
            for det in tqdm(results):
                img_id = det['image_id']
                cls_id = det['category_id']
                bbox = det['bbox']
                score = det['score']
                loc = chip_loc[img_id]
                bbox = [
                    bbox[0] + loc[0], bbox[1] + loc[1], bbox[2] + loc[0],
                    bbox[3] + loc[1]
                ]
                img_name = '_'.join(
                    img_id.split('_')[:-1]) + osp.splitext(img_id)[1]
                if img_name in detecions:
                    detecions[img_name].append(bbox + [score, cls_id])
                else:
                    detecions[img_name] = [bbox + [score, cls_id]]

        # metrics
        results = []
        for img_id in tqdm(self.img_ids):
            img_info = self.coco.loadImgs(img_id)[0]
            det = detecions[img_info['file_name']]
            gt = self.load_annotations(img_info['file_name'])
            gt, det = self.dropObjectsInIgr(gt, det)
            # det = nms(det, score_threshold=0.05, iou_threshold=0.6, overlap_threshold=1)[:, [0, 1, 2, 3, 5, 4]].astype(np.float32)
            det = soft_nms(det,
                           method=2)[:, [0, 1, 2, 3, 5, 4]].astype(np.float32)
            # det = nms2(det)
            if hyp['show']:
                img = cv2.imread(
                    osp.join(self.srcimg_dir,
                             img_info['file_name']))[:, :, ::-1]
                show_image(img, det)

            for bbox in det:
                bbox[2:4] = bbox[2:4] - bbox[:2]
                results.append({
                    "image_id": img_id,
                    "category_id": bbox[4],
                    "bbox": np.round(bbox[:4]),
                    "score": bbox[5]
                })

        coco_pred = self.coco.loadRes(results)
        coco_eval = COCOeval(self.coco, coco_pred, 'bbox')
        coco_eval.params.imgIds = self.coco.getImgIds()
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
Exemple #23
0
 def _post_process(self, mask):
     kernel = cv2.getStructuringElement(cv2.MORPH_RECT,
                                        self.POST_PROCESS_CLOSE_SE_SIZE)
     close = cv2.morphologyEx(mask.astype(np.uint8), cv2.MORPH_CLOSE,
                              kernel).astype(np.bool)
     # open = cv2.morphologyEx(close.astype(np.uint8), cv2.MORPH_OPEN, kernel).astype(np.bool)
     if self.debug:
         show_image(close, 'morph close')
     return self._remove_small_connected_components(close)
Exemple #24
0
 def _diff(self):
     self.diff_image = cv2.absdiff(self.reference_image_registered,
                                   self.inspection_image)
     if self.debug:
         show_image(self.diff_image, 'diff_image')
         print('diff_image mean = {}'.format(
             np.mean(self.diff_image.flatten())))
         self._save_image(np.copy(self.diff_image),
                          'Diff image after registration with reference')
Exemple #25
0
def get_features(img: np.array):
    #descriptor = cv2.xfeatures2d.SIFT_create()
    descriptor = cv2.ORB_create()
    img_kp = img.copy()
    (kps, features) = descriptor.detectAndCompute(img, None)

    cv2.drawKeypoints(img, kps, img_kp, color=(0, 0, 255))
    show_image(img_kp)
    return {'kp': kps, 'ft': features}
Exemple #26
0
 def _diff_binarization(self):
     diff_mask = apply_hysteresis_threshold(self.diff_image,
                                            self.LOW_DIFF_THRESHOLD,
                                            self.HIGH_DIFF_THRESHOLD)
     valid_diff_mask = np.bitwise_and(diff_mask,
                                      self.valid_registration_mask)
     if self.debug:
         show_image(valid_diff_mask, 'valid_diff_mask')
         self._save_image(valid_diff_mask, 'Diff mask')
     return valid_diff_mask
Exemple #27
0
 def _joint_edges(self):
     inspection_edges = DefectDetector._edges_dilate(self.inspection_image)
     reference_edges = DefectDetector._edges_dilate(
         self.reference_image_registered)
     joint_edges_mask = np.logical_and(inspection_edges, reference_edges)
     if self.debug:
         show_image(joint_edges_mask, 'joint_edges_mask')
         self._save_image(joint_edges_mask,
                          'Dilated edges that appear on both images')
     return joint_edges_mask
def unit_test():

    dataset = Omniglot_generator('../datas/images_background', 5, 5)
    dataloader = DataLoader(dataset, batch_size=1, shuffle=False)

    for images, labels in dataloader:
        print(images.size())
        print(labels.size())
        show_image(images[0][3])
        print(labels)
Exemple #29
0
def main():
    parser = make_parser()
    args = parser.parse_args()
    use_gpu = torch.cuda.is_available()

    content_image, style_image, input_image = read_images(args, use_gpu)

    #MODEL
    vgg = VGG()
    loss = Loss()
    if use_gpu:
        vgg = VGG().cuda()
        loss = Loss().cuda()
    for param in vgg.parameters():
        param.requires_grad = False

    #OPTIMIZER
    learning_rate = args.lr
    optimizer = optim.LBFGS([input_image], lr=learning_rate)
    num_iterations = args.iter
    losses = []

    content_3_2 = vgg(content_image, ["3_2"])[0]
    style_features = vgg(style_image, ["1_1", "2_1", "3_1", "4_1", "5_1"])

    def closure():
        optimizer.zero_grad()

        input_features = vgg(input_image,
                             ["1_1", "2_1", "3_1", "4_1", "5_1", "3_2"])
        input_3_2 = input_features[-1]
        input_features = input_features[:-1]

        total_loss = loss(input_features, input_3_2, content_3_2,
                          style_features)
        losses.append(total_loss.data.cpu().numpy()[0])
        total_loss.backward()
        input_image.data.clamp_(0, 1)

        return total_loss

    for i in range(num_iterations):
        optimizer.step(closure)

        if i % 3 == 0:
            print(i / num_iterations * 100, "%")
    print("100.0 %")
    graph_losses(losses)

    output = Image.fromarray((input_image.data.squeeze() * 255).permute(
        1, 2, 0).cpu().numpy().astype(np.uint8))
    output.save(args.output)
    show_image(input_image)
 def save_state(self, key, state): # 'epsilon', 'last entry'
     if self.render:
         print('saving ', key)
         show_image(state)
     key = key.encode()
     value_encoded = json.dumps(state.tolist()).encode()
     
     with self.env.begin(db=self.state_store, write=True) as txn:
         txn.put(key=key, value=value_encoded, dupdata=False)
     self.env.sync()
     
     return key.decode()
Exemple #31
0
    def __call__(self):
        with open(hyp['local'], 'r') as f:
            chip_loc = json.load(f)
        detecions = dict()
        # get val predict box
        scales = [800, 1300, 1500]
        for scale in scales:
            with open(hyp['result'].format(scale), 'r') as f:
                results = json.load(f)
            for det in tqdm(results):
                img_id = det['image_id']
                cls_id = det['category_id']
                bbox = det['bbox']
                score = det['score']
                loc = chip_loc[img_id]
                bbox = [bbox[0] + loc[0], bbox[1] + loc[1], bbox[2] + loc[0], bbox[3] + loc[1]]
                img_name = '_'.join(img_id.split('_')[:-1]) + osp.splitext(img_id)[1]
                if img_name in detecions:
                    detecions[img_name].append(bbox + [score, cls_id])
                else:
                    detecions[img_name] = [bbox + [score, cls_id]]

        # merge
        results = []
        for img_name in tqdm(self.img_list):
            det = []
            if img_name in detecions:
                det = detecions[img_name]
                det = utils.nms(det, score_threshold=0.05, iou_threshold=0.6, overlap_threshold=1).astype(np.float32)
                # det = utils.soft_nms(det).astype(np.float32)

            # show
            if hyp['show']:
                img = cv2.imread(osp.join(self.srcimg_dir, img_name))
                utils.show_image(img, det[det[:, 4] > 0.3])

            # save
            with open(osp.join(hyp['submit_dir'], img_name[:-4]+'.txt'), "w") as f:
                for box in det:
                    box[2:4] -= box[:2]
                    line = []
                    for idx, v in enumerate(list(box[0:5]) + [box[5]+1] + [-1, -1]):
                        line.append(str(int(v)) if idx != 4 else str(v))
                    f.write(','.join(line) + '\n')

        # Zip
        result_files = [osp.join(hyp['submit_dir'], file) for file in os.listdir(hyp['submit_dir'],)]
        zip_path = 'result.zip'
        with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zip:
            for file in result_files:
                if ".txt" in file:
                    zip.write(file)
Exemple #32
0
def main():
    img = load_image(color=True)
    out = contrast_enhancement(img)

    show_image(("original", img), ("enhanced", out), wait=5)

    orig_hist = histogram(img)
    enha_hist = histogram(out)

    plot_hisogram(orig_hist)
    plot_hisogram(enha_hist)

    plt.show()
def detect(img, xdim, ydim):
	# convert to grayscale
	gray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)
	utils.show_image(gray, 'gray')
	
	# threshold to convert to binary image
	ret, thresh = cv.threshold(gray,0,255,cv.THRESH_BINARY_INV+cv.THRESH_OTSU)
	utils.show_image(thresh, 'threshold')

	# erode image to isolate the sure foreground
	kernel = np.ones((3,3),np.uint8)
	opening = cv.morphologyEx(thresh,cv.MORPH_OPEN, kernel, iterations=3)
	utils.show_image(opening, 'opening')

	# get the median pixel value (should be background)
	mode = utils.get_mode(img, xdim, ydim)
	
	# replace the foreground (trees) with the median pixel
	for i in xrange(xdim):
		for j in xrange(ydim):
    		# if it's white in the eroded image, then it's vegetation
			if opening[i,j] == 255:
    			# set to black
				img[i,j] = mode
				
	utils.show_image(img, 'color-overlay')
	return img
Exemple #34
0
def main():
	fname = '../images/original/chibombo1.png'
	original = cv.imread(fname)
	utils.show_image(original, 'original')
	img = utils.smooth(original, 'bilateral')
	utils.show_image(img, 'bilateral filter')

	# get image dimensions
	xdim, ydim, nchannels = img.shape

	veg_to_background = detectvegetation.detect(img, xdim, ydim)

	segmented = segmentcolor.mask(veg_to_background, xdim, ydim)

	detect = detectpolygon.detect(segmented, original, xdim, ydim)
Exemple #35
0
def detect(segmented, original, xdim, ydim):

  # morphological opening and closing
  kernel = np.ones((3,3), np.uint8)
  img = cv.morphologyEx(segmented, cv.MORPH_OPEN, kernel)
  img = cv.morphologyEx(img, cv.MORPH_CLOSE, kernel)

  utils.show_image(img, 'open-close')

  imgcopy = img.copy()
  gray = cv.cvtColor(img, cv.COLOR_RGB2GRAY)

  num_buildings = 0

  for i in xrange(255):
    # threshold the grayscale image at that value
    binary = np.zeros((xdim, ydim), np.uint8)
    ret, binary = cv.threshold(gray, dst=binary, thresh=i, maxval=255, type=cv.THRESH_OTSU)
    #binary[gray == i] = 255
    # utils.show_image(binary, 'binary')

    # find contours, fit to polygon, and determine if rectangular
    contours, hierarchy = cv.findContours(binary, mode=cv.RETR_LIST, method=cv.CHAIN_APPROX_SIMPLE)

    for c in contours:
      poly = cv.approxPolyDP(np.array(c), 0.07*cv.arcLength(c,True), True)
      carea = cv.contourArea(c)
      polyarea = cv.contourArea(poly)
      hull = cv.convexHull(c)
      hullarea = cv.contourArea(hull)

      # bounding box
      rect = cv.minAreaRect(c)
      box = cv.cv.BoxPoints(rect)
      box = np.int0(box)

      if polyarea > 30 and carea > 30:
        cv.drawContours(img, [c], 0, (0,0,255), 1)
      if len(poly) < 6 and carea > 100: #and carea > 5: #\
          #and abs(polyarea/carea - 1) < 0.25:
        num_buildings += 1
        cv.drawContours(imgcopy, [poly], 0, (0,0,255), 1)
        cv.drawContours(original, [poly], 0, (0,0,255), 1)

  # show images
  utils.show_image(img, 'all bounding boxes')
  utils.show_image(imgcopy, 'with some filtering')
  utils.show_image(original, 'onto original')
  print num_buildings
  return original