Example #1
0
 def get_c_next(self, epoch):
     c_next = self.T(self.eval_c)
     plot_img(c_next.t().detach().cpu(),
              os.path.join(self.out_dir, 'gen',
                           'eval_code_next_%d.png' % epoch),
              vrange=self.P.unif_range)
     return c_next.repeat(1, 1, self.test_sample_size).permute(
         2, 0, 1).contiguous().view(-1, self.c_dim)
Example #2
0
 def on_epoch_end(self, epoch, logs=None):
     if epoch % self.step != 0:
         return
     result = tf.squeeze(self.model(self.input_net), axis=0)
     if self.rootdir is not None:
         utils.save_img(
             os.path.join(self.rootdir, f"epoch_{epoch}.png"),
             result,
             self.data_format,
         )
     if self.keep_output:
         self.outputs_net.append(result)
     if self.plot:
         utils.plot_img(result, ncols=1, data_format=self.data_format)
def plot_result(num_samples=2):
    inds = np.random.permutation(len(tX))[:num_samples]
    for i in inds:
        x = torch.Tensor(tX[i].copy()).unsqueeze(0).to(device)
        locs, confs = dssd.forward(x)
        locs = decode_offset(locs.squeeze(), default_boxes)
        boxes = non_max_supression(locs, confs)
        img = tX[i].copy().reshape(512, 512, 3)
        for box in boxes:
            x1 = math.ceil(box[0] * 512)
            y1 = math.ceil(box[1] * 512)
            x2 = math.ceil(box[2] * 512)
            y2 = math.ceil(box[3] * 512)
            start = (x1, y1)
            end = (x2, y2)
            #print(x1,y1, x2, y2)
            img = cv2.rectangle(img, start, end, (0, 255, 255), 4)
    plot_img(img)
    def _eval_noise(self):
        '''
        :return: z (sample_size x num_codes x z_dim), c (sample_size x num_codes x z_dim)
        '''
        more_codes = self.test_num_codes - (self.c_dim + 1)
        # c = Variable(torch.cuda.FloatTensor([[j<i for j in range(self.disc_c_dim)] for i in range(min(self.test_num_codes, self.disc_c_dim+1))]))
        c = Variable(torch.cuda.FloatTensor(
            [[j < i for j in range(self.c_dim)] for i in range(min(self.test_num_codes, self.c_dim + 1))])) * (
            self.P.unif_range[1] - self.P.unif_range[0]) + self.P.unif_range[0]
        if more_codes > 0:
            c = torch.cat([c, self.P.sample(more_codes)], 0)
        self.eval_c = c
        z = Variable(torch.FloatTensor(self.test_sample_size, self.rand_z_dim).normal_(0, 1).cuda())

        plot_img(c.t().data,
                 os.path.join(self.out_dir, 'gen', 'eval_code.png'),
                 vrange=self.P.unif_range)
        return z[:, None, :].repeat(1, self.test_num_codes, 1).view(-1, self.rand_z_dim), \
               c.repeat(1, 1, self.test_sample_size).permute(2, 0, 1).contiguous().view(-1, self.c_dim)
Example #5
0
def Combine():
    args = parse_args()

    loc_file = osp.join(args.loc_dir, args.split + "_chip.json")

    with open(args.result_file, 'r') as f:
        results = json.load(f)
    with open(loc_file, 'r') as f:
        chip_loc = json.load(f)

    # if osp.isfile(args.anno_file):
    #     with open(args.anno_file, 'r') as f:
    #         annos = json.load(f)

    detecions = dict()
    for det in tqdm(results):
        img_id = det['image_id']
        cls_id = det['category_id'] + 1
        bbox = det['bbox']
        score = det['score']
        loc = chip_loc[img_id]
        bbox = [bbox[0] + loc[0], bbox[1] + loc[1], bbox[2], bbox[3]]

        img_name = '_'.join(img_id.split('_')[:-1]) + osp.splitext(img_id)[1]
        if img_name in detecions:
            detecions[img_name].append(bbox + [score, cls_id])
        else:
            detecions[img_name] = [bbox + [score, cls_id]]

    output_dir = 'DET_results-%s' % args.split
    if osp.exists(output_dir):
        shutil.rmtree(output_dir)
    os.mkdir(output_dir)
    for img_name, det in tqdm(detecions.items()):
        det = cnms(det)
        txt_name = osp.splitext(img_name)[0] + '.txt'
        with open(osp.join(output_dir, txt_name), 'w') as f:
            for bbox in det:
                bbox = [
                    str(x)
                    for x in (list(bbox[0:5]) + [int(bbox[5])] + [-1, -1])
                ]
                f.write(','.join(bbox) + '\n')

        if args.show:
            img_path = osp.join(args.img_dir, img_name)
            img = cv2.imread(img_path)[:, :, ::-1]
            bboxes = det[:, [0, 1, 2, 3, 5, 4]]
            bboxes[:, 4] -= 1
            bboxes[:, 2:4] = bboxes[:, :2] + bboxes[:, 2:4]
            img = plot_img(img, bboxes, classes)

            plt.figure(figsize=(10, 10))
            plt.subplot(1, 1, 1).imshow(img)
            plt.show()
Example #6
0
    def __call__(self):
        def_eval = DefaultEval()
        for idx, filename in enumerate(tqdm(self.pred_list)):
            pred_bbox = self.load_anno(
                osp.join(self.pred_dir, filename))
            gt_bbox = self.load_anno(
                osp.join(self.gt_dir, filename))
            pred_bbox, gt_bbox = self.delete_ignore(pred_bbox, gt_bbox)

            def_eval.statistics(
                torch.tensor(pred_bbox).unsqueeze(0),
                torch.tensor(gt_bbox).unsqueeze(0))

            if show:
                img = cv2.imread(osp.join(
                    self.img_dir, osp.splitext(filename)[0]+'.jpg'))[:, :, ::-1]
                gt_img = plot_img(img, gt_bbox, self.classes)
                pred_img = plot_img(img, pred_bbox, self.classes)
                plt.figure(figsize=(10, 10))
                plt.subplot(2, 1, 1).imshow(gt_img)
                plt.subplot(2, 1, 2).imshow(pred_img)
                plt.show()

        # Compute statistics
        stats = [np.concatenate(x, 0) for x in list(zip(*def_eval.stats))]
        # number of targets per class
        nt = np.bincount(stats[3].astype(np.int64), minlength=len(self.classes))
        if len(stats):
            p, r, ap, f1, ap_class = def_eval.ap_per_class(*stats)
            mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()

        # Print and Write results
        title = ('%20s' + '%10s'*5) % ('Class', 'Targets', 'P', 'R', 'mAP', 'F1')
        print(title)
        printline = '%20s' + '%10.3g' * 5
        pf = printline % ('all', nt.sum(), mp, mr, map, mf1)  # print format
        print(pf)
        if len(self.classes) > 1 and len(stats):
            for i, c in enumerate(ap_class):
                pf = printline % (self.classes[c], nt[c], p[i], r[i], ap[i], f1[i])
                print(pf)
Example #7
0
    def tag_similar_faces(self, labels=None):
        if (labels is None):
            labels = self.model.labels_

        cum_sum = np.cumsum([bx.shape[0] for bx in self.boxes])

        result = [None] * len(self.images)
        for i, l in enumerate(labels):
            im_off, face_off = find_image_pos(cum_sum, i)
            bx = np.array([self.boxes[im_off][face_off]])
            if (result[im_off] is None):
                res = draw_boxes(self.images[im_off], bx, labels=[str(l)])
            else:
                res = draw_boxes(res, bx, labels=[str(l)])
            result[im_off] = res

        # for printing purposes
        result_ = list(
            map(lambda im: cv2.resize(im, (0, 0), fx=.5, fy=.5), res))

        plot_img(result_)
Example #8
0
def Combine():
    args = parse_args()

    loc_file = osp.join(args.loc_dir, args.split + "_chip.json")

    with open(args.result_file, 'r') as f:
        results = json.load(f)
    with open(loc_file, 'r') as f:
        chip_loc = json.load(f)

    detecions = dict()
    for det in tqdm(results):
        img_id = det['image_id']
        cls_id = det['category_id']
        bbox = det['bbox']
        score = det['score']
        loc = chip_loc[img_id]
        bbox = [
            bbox[0] + loc[0], bbox[1] + loc[1], bbox[2] + loc[0],
            bbox[3] + loc[1]
        ]
        img_name = '_'.join(img_id.split('_')[:-1]) + osp.splitext(img_id)[1]
        if img_name in detecions:
            detecions[img_name].append(bbox + [score, cls_id])
        else:
            detecions[img_name] = [bbox + [score, cls_id]]

    output_file = 'DET_results-%s' % args.split + '.csv'

    with open(output_file, 'w') as f:
        f.writelines("name,image_id,confidence,xmin,ymin,xmax,ymax\n")
        for img_name, det in tqdm(detecions.items()):
            det = nms(det, score_threshold=0.05)
            img_id = osp.splitext(img_name)[0] + '.xml'
            for box in det:
                f.writelines(CLASSES[int(box[5])] + ',' + img_id + ',' +
                             str(box[4]))
                for v in np.round(box[:4]):
                    f.writelines(',' + str(int(v)))
                f.writelines('\n')

            if args.show:
                img_path = osp.join(args.img_dir, img_name)
                img = cv2.imread(img_path)[:, :, ::-1]
                bboxes = det[:, [0, 1, 2, 3, 5, 4]]
                # show_image(img, bboxes)
                img = plot_img(img, bboxes, CLASSES)
                plt.figure(figsize=(10, 10))
                plt.subplot(1, 1, 1).imshow(img)
                plt.show()
Example #9
0
def main():

    #
    # parse command line
    #
    args = parse_arguments()

    #
    # create out folder
    #
    if not os.path.exists(args.outf):
        os.makedirs(args.outf)

    #
    # load image
    #
    if not os.path.exists(args.image):
        raise FileNotFoundError(args.image)
    im = ClickableImage(args.image)

    # ******************* PARAMETERS ******************************************

    # Intensity of interest: intensity above which a pixel may be considered as a center / an edge
    # Assumption: centers and edges are brighter than background
    intensity_of_interest = get_quantile(im.img, 0.75)

    #
    # Ring
    #
    # - radius: if --typical_edge is not specified, we use the value of --radius, else we heuristically chose it from the 'typical edge length'
    # - thickness: if --thickness is not specified, we use (2/3)*radius
    if args.typical_edge == "measure_an_edge":
        typical_edge_length = im.measure_an_edge()
        print(
            "You selected an edge of length : {}".format(typical_edge_length))
        radius = math.ceil(typical_edge_length * 0.6)
    elif args.typical_edge == "count_cells":
        n_cells = im.count_cells()
        typical_edge_length = im.n_cells_to_edge_length(n_cells)
        radius = math.floor(typical_edge_length / 3)
    else:
        radius = args.radius
    thickness = args.thickness if args.thickness is not None else int(
        (2 / 3) * radius)

    # *************************************************************

    #
    # Manual selection of the point to analyze
    #
    center = im.get_point()
    center = (math.ceil(center[0]), math.ceil(center[1]))
    print("Selected point: {}".format(center))

    #************************** STEP 1 ****************************#
    # Filter the image with a ring centered on the point of interest
    #**************************************************************#
    #
    # define ring
    #
    ring = {}
    ring["center"] = center
    ring["radius"] = (radius - 0.5 * thickness, radius + 0.5 * thickness)

    #
    # filter image with the ring
    #
    mask = create_ring_mask(im.img, ring)
    img_filtered = apply_mask(im.img, mask)

    #
    # Visualization of the ring around the selected point
    #

    # image filtered by the ring
    fig, ax = plot_img(img_filtered, title="Image filtered by the ring")
    fig.savefig(os.path.join(args.outf, "1_img_filtered.png"))

    # image with superimposed ring
    img_with_ring = superimpose_ring(im.img, ring, mask)
    fig, ax = plot_img(
        img_with_ring,
        title="Center : ({:.2f}, {:.2f}) \n Radius : ({:.2f}, {:.2f})".format(
            ring["center"][0], ring["center"][1], ring["radius"][0],
            ring["radius"][1]))
    fig.savefig(os.path.join(args.outf, "2_ring.png"))

    #********************** STEP 2 **********************#
    # Get the 'mountain' relief of intensity in the ring
    #****************************************************#

    angle2intensity = angle2intensity_in_ring(im.img, ring, mask)
    bucket2intensity, intensity2bucket = angular_smoothing(
        angle2intensity,
        size=args.size_smoothing,
        stride=args.stride_smoothing,
        cast_to_int=False)

    #************************** STEP 3 *********************#
    # 'water descent' on our relief, to obtain the barcodes
    # of the peaks (birth/death)
    #
    # Let f be our 'relief' function, f: angle -> intensity
    # Compute persistence of the connected components of
    # the filtration {f^-1([h, +inf[), for all h real}
    #*******************************************************#
    barcodes = water_descent(intensity2bucket, args.stride_smoothing)

    # Heuristically computes a correct 'min_lifetime', a threshold on duration (death - birth):
    # - below: the connected components is considered as noise
    # - above: the cc is considered as a peak
    # this is the dashed line that "cuts" the persistence diagram
    range_intensity = max(bucket2intensity) - min(bucket2intensity)
    min_lifetime = max(args.threshold_min_lifetime,
                       range_intensity * args.coef_min_lifetime)

    #****************************** STEP 4 ********************************#
    # For visualization, we report the barcodes in a 'persistence diagram'
    # We filter out the cc with a persistence < min_lifetime
    # The remaining ones are the significant 'peaks'
    # We count them to determine if the point of interest is:
    # - a corner (3 or more peaks)
    # - an edge (2 peaks)
    # - a part of background (else)
    #***********************************************************************#

    # Analysis of the barcodes: persistence diagram and type of the point
    peaks = get_peaks(barcodes, cut=min_lifetime)
    n_peaks = len(peaks)

    #
    # Visualization
    #
    fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(16, 7.3))

    # intensity relief in the ring
    ax1.plot(range(0, 360, args.stride_smoothing), bucket2intensity)
    ax1.set_xlim(0, 360)
    ax1.set_xlabel("Angle in the ring (°)")
    ax1.set_ylabel("Intensity in [0, 255]")
    ax1.set_title(
        "Intensity vs. Angle\nafter angular smoothing (size={}, stride={})".
        format(args.size_smoothing, args.stride_smoothing))

    # corresponding persistence diagram
    ax2 = persistence_diagram(ax2, barcodes, min_lifetime,
                              intensity_of_interest)
    ax2.set_title(
        "Persistence Diagram (with cut = {}) \nNumber of 'persistent' cc: {}".
        format(min_lifetime, n_peaks))
    ax2.set_xlabel("Birth intensity")
    ax2.set_ylabel("Death intensity")
    fig.savefig(os.path.join(args.outf, "3_persistence_diagram.png"),
                dpi=90,
                bbox_inches='tight')

    #
    # classify the selected point
    #
    if n_peaks >= 3:
        point_type = "corner"
    elif n_peaks == 2:
        point_type = "edge"
    else:
        point_type = "background"
    print("\n==> Point type: {} ({} peak(s))".format(point_type, n_peaks))

    #
    # display detected edges
    #
    if n_peaks >= 2:
        angles_of_edges = []
        for cc in peaks:
            angle_of_edge = cc.peak.x
            angles_of_edges.append(angle_of_edge)
        img_with_ring_and_intersections = superimpose_ring_and_intersections(
            im.img, ring, angles_of_edges, mask)
        fig, ax = plot_img(
            img_with_ring_and_intersections,
            title="Angles between horizontal and detected edges\n{}".format(
                sorted(angles_of_edges)))
        fig.savefig(os.path.join(args.outf, "4_edges.png"))

    #
    # Make animated gif
    #
    if not args.nogif:
        print("\nMaking animated gif...")
        make_gif(args.outf, barcodes, min_lifetime, bucket2intensity,
                 args.stride_smoothing, args.size_smoothing,
                 intensity_of_interest)

    print("Visualizations stored in {}".format(args.outf))

    return
Example #10
0
    def __call__(self):
        # get val predict box
        with open(hyp['result'], 'r') as f:
            results = json.load(f)
        with open(hyp['local'], 'r') as f:
            chip_loc = json.load(f)
        detecions = dict()
        for det in tqdm(results):
            img_id = det['image_id']
            cls_id = det['category_id']
            bbox = det['bbox']
            score = det['score']
            loc = chip_loc[img_id]
            bbox = [
                bbox[0] + loc[0], bbox[1] + loc[1], bbox[2] + loc[0],
                bbox[3] + loc[1]
            ]
            img_name = '_'.join(
                img_id.split('_')[:-1]) + osp.splitext(img_id)[1]
            if img_name in detecions:
                detecions[img_name].append(bbox + [score, cls_id])
            else:
                detecions[img_name] = [bbox + [score, cls_id]]

        # metrics
        results = []
        for img_id in tqdm(self.img_ids):
            img_info = self.coco.loadImgs(img_id)[0]
            det = detecions[img_info['file_name']]
            gt = self.load_annotations(img_info['file_name'])
            gt, det = self.dropObjectsInIgr(gt, det)
            det = nms(det, score_threshold=0.05)[:, [0, 1, 2, 3, 5, 4]].astype(
                np.float32)
            # det = nms2(det)
            if hyp['show']:
                img = cv2.imread(
                    osp.join(self.srcimg_dir,
                             img_info['file_name']))[:, :, ::-1]
                gt_img = plot_img(img, gt, self.cat_ids)
                pred_img = plot_img(img, det, self.cat_ids)
                plt.figure(figsize=(10, 10))
                plt.subplot(1, 1, 1).imshow(gt_img)
                plt.show()
                plt.subplot(1, 1, 1).imshow(pred_img)
                plt.show()

            for bbox in det:
                bbox[2:4] = bbox[2:4] - bbox[:2]
                results.append({
                    "image_id": img_id,
                    "category_id": bbox[4],
                    "bbox": np.round(bbox[:4]),
                    "score": bbox[5]
                })

        coco_pred = self.coco.loadRes(results)
        coco_eval = COCOeval(self.coco, coco_pred, 'bbox')
        coco_eval.params.imgIds = self.coco.getImgIds()
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()

        with open('results.json', 'w') as f:
            json.dump(results, f, indent=4, cls=MyEncoder)
Example #11
0
def test(**kwargs):
    opt._parse(kwargs)
    saver = Saver(opt, "test")

    # imgs_name = os.listdir(opt.test_dir)
    imgs_set = opt.root_dir + "ImageSets/Main/val.txt"
    with open(imgs_set, 'r') as f:
        imgs_name = [x.strip() + '.jpg' for x in f.readlines()]

    resize = Letterbox(input_size=(opt.min_size, opt.max_size))
    normalize = Normalizer(mean=opt.mean, std=opt.std)

    # Define Network
    # initilize the network here.
    model = Model(opt, num_classes=10)
    model = model.to(opt.device)
    post_pro = PostProcess(**opt.nms)

    if os.path.isfile(opt.pre):
        print("=> loading checkpoint '{}'".format(opt.pre))
        checkpoint = torch.load(opt.pre)

        model.load_state_dict(checkpoint['state_dict'])
        print("=> loaded checkpoint '{}' (epoch {})".format(
            opt.pre, checkpoint['epoch']))
    else:
        raise FileNotFoundError

    results = []
    model.eval()
    with torch.no_grad():
        for ii, img_name in enumerate(tqdm(imgs_name)):
            # if ii >= 3: break;
            # data read and transforms
            img_path = osp.join(opt.test_dir, img_name)
            img = cv2.imread(img_path)[:, :, ::-1]
            sample = {'img': img, 'annot': None}
            sample = normalize(sample)
            sample = resize(sample)
            input = sample['img'].unsqueeze(0).to(opt.device).permute(
                0, 3, 1, 2)

            # predict
            scores, labels, boxes = model(input)
            scores_bt, labels_bt, boxes_bt = post_pro(scores, labels, boxes,
                                                      input.shape[-2:])

            boxes_bt[0] = re_resize(boxes_bt[0], sample['scale'],
                                    opt.resize_type)

            if show:
                # draw
                labels = labels_bt[0].float().view(-1, 1)
                scores = scores_bt[0].float().view(-1, 1)
                output = torch.cat((boxes_bt[0], labels, scores), dim=1)
                output = output.numpy()
                img = plot_img(img, output, classes)

                plt.figure(figsize=(10, 10))
                plt.subplot(1, 1, 1).imshow(img)
                plt.show()

            for box, label, score in zip(boxes_bt[0], labels_bt[0],
                                         scores_bt[0]):
                box[2:] = box[2:] - box[:2]
                results.append({
                    "image_id": img_name,
                    "category_id": label.numpy(),
                    "bbox": box[:4].numpy(),
                    "score": score.numpy()
                })

        saver.save_test_result(results)
Example #12
0
# load data
data = np.load('ORL_faces.npz')
trainX = data['trainX']
trainY = data['trainY']
testX = data['testX']
testY = data['testY']

train_images, test_images = trainX.shape[0], testX.shape[0]
trainX = trainX.astype(np.float32).reshape(train_images, 112, 92)
testX = testX.astype(np.float32).reshape(test_images, 112, 92)
trainY = trainY.astype(np.long)
testY = testY.astype(np.long)

# Image Sanity check
sample_set = np.random.randint(0, train_images, 50)
plt = plot_img(trainX[sample_set])
plt.savefig('sanity_check_img_plot')
plt.cla()
plt.clf()

trainX = torch.from_numpy(trainX)
trainY = torch.from_numpy(trainY)
testX = torch.from_numpy(testX)
testY = torch.from_numpy(testY)

train_data = torch.utils.data.TensorDataset(trainX, trainY)
test_data = torch.utils.data.TensorDataset(testX, testY)

batch_size = 10

train_loader = torch.utils.data.DataLoader(test_data,