Beispiel #1
0
 def _process_raw_image(self):
     """Process the raw temp data to a colored image. Filter if necessary"""
     # Image processing
     # Can't apply colormap before ndimage, so reversed in first two options, even though it seems slower
     if self._interpolation_index == 5:  # Scale via scipy only - slowest but seems higher quality
         self._image = ndimage.zoom(self._raw_image,
                                    25)  # interpolate with scipy
         self._image = cv2.applyColorMap(
             self._image,
             cmapy.cmap(self._colormap_list[self._colormap_index]))
     elif self._interpolation_index == 6:  # Scale partially via scipy and partially via cv2 - mix of speed and quality
         self._image = ndimage.zoom(self._raw_image,
                                    10)  # interpolate with scipy
         self._image = cv2.applyColorMap(
             self._image,
             cmapy.cmap(self._colormap_list[self._colormap_index]))
         self._image = cv2.resize(self._image, (800, 600),
                                  interpolation=cv2.INTER_CUBIC)
     else:
         self._image = cv2.applyColorMap(
             self._raw_image,
             cmapy.cmap(self._colormap_list[self._colormap_index]))
         self._image = cv2.resize(self._image, (800, 600),
                                  interpolation=self._interpolation_list[
                                      self._interpolation_index])
     self._image = cv2.flip(self._image, 1)
     if self.filter_image:
         self._image = cv2.bilateralFilter(self._image, 15, 80, 80)
Beispiel #2
0
def write_laplace_summary(model, model_input, gt, model_output, writer, total_steps, prefix='train_'):
    # Plot comparison images
    gt_img = dataio.lin2img(gt['img'])
    pred_img = dataio.lin2img(model_output['model_out'])

    output_vs_gt = torch.cat((dataio.rescale_img(gt_img), dataio.rescale_img(pred_img,perc=1e-2)), dim=-1)
    writer.add_image(prefix + 'comp_gt_vs_pred', make_grid(output_vs_gt, scale_each=False, normalize=True),
                     global_step=total_steps)

    # Plot comparisons laplacian (this is what has been fitted)
    gt_laplace = dataio.lin2img(gt['laplace'])
    pred_laplace = diff_operators.laplace(model_output['model_out'], model_output['model_in'])
    pred_laplace = dataio.lin2img(pred_laplace)

    output_vs_gt_laplace = torch.cat((gt_laplace, pred_laplace), dim=-1)
    writer.add_image(prefix + 'comp_gt_vs_pred_laplace', make_grid(output_vs_gt_laplace, scale_each=False, normalize=True),
                     global_step=total_steps)

    # Plot image gradient
    img_gradient = diff_operators.gradient(model_output['model_out'], model_output['model_in'])
    grads_img = dataio.grads2img(dataio.lin2img(img_gradient))
    writer.add_image(prefix + 'pred_grad', make_grid(grads_img, scale_each=False, normalize=True),
                     global_step=total_steps)

    # Plot gt image
    writer.add_image(prefix + 'gt_img', make_grid(gt_img, scale_each=False, normalize=True),
                     global_step=total_steps)

    # Plot gt laplacian
    # writer.add_image(prefix + 'gt_laplace', make_grid(gt_laplace, scale_each=False, normalize=True),
    #                  global_step=total_steps)
    gt_laplace_img = dataio.to_uint8(dataio.to_numpy(dataio.rescale_img(gt_laplace, 'scale', 1)))
    gt_laplace_img = cv2.applyColorMap(gt_laplace_img.squeeze(), cmapy.cmap('RdBu'))
    gt_laplace_img = cv2.cvtColor(gt_laplace_img, cv2.COLOR_BGR2RGB)
    writer.add_image(prefix + 'gt_lapl', torch.from_numpy(gt_laplace_img).permute(2, 0, 1), global_step=total_steps)

    # Plot pred image
    writer.add_image(prefix + 'pred_img', make_grid(pred_img, scale_each=False, normalize=True),
                     global_step=total_steps)

    # Plot pred gradient
    pred_gradients = diff_operators.gradient(model_output['model_out'], model_output['model_in'])
    pred_grads_img = dataio.grads2img(dataio.lin2img(pred_gradients))
    writer.add_image(prefix + 'pred_grad', make_grid(pred_grads_img, scale_each=False, normalize=True),
                     global_step=total_steps)

    # Plot pred laplacian
    # writer.add_image(prefix + 'pred_lapl', make_grid(pred_laplace, scale_each=False, normalize=True),
    #                  global_step=total_steps)
    pred_laplace_img = dataio.to_uint8(dataio.to_numpy(dataio.rescale_img(pred_laplace,'scale',1)))
    pred_laplace_img = cv2.applyColorMap(pred_laplace_img.squeeze(),cmapy.cmap('RdBu'))
    pred_laplace_img = cv2.cvtColor(pred_laplace_img, cv2.COLOR_BGR2RGB)
    writer.add_image(prefix + 'pred_lapl', torch.from_numpy(pred_laplace_img).permute(2,0,1), global_step=total_steps)

    min_max_summary(prefix + 'coords', model_input['coords'], writer, total_steps)
    min_max_summary(prefix + 'gt_laplace', gt_laplace, writer, total_steps)
    min_max_summary(prefix + 'pred_laplace', pred_laplace, writer, total_steps)
    min_max_summary(prefix + 'pred_img', pred_img, writer, total_steps)
    min_max_summary(prefix + 'gt_img', gt_img, writer, total_steps)
    def compose_frame(self, frame):
        mask = self.classifier.process(frame).segmentation_mask

        if self.threshold < 1:
            cv2.threshold(mask, self.threshold, 1, cv2.THRESH_BINARY, dst=mask)

        if self.postprocess:
            cv2.dilate(mask, np.ones((5, 5), np.uint8), iterations=1, dst=mask)
            cv2.blur(mask, (10, 10), dst=mask)

        if self.MRAR < 1:
            if self.old_mask is None:
                self.old_mask = mask
            mask = cv2.accumulateWeighted(mask, self.old_mask, self.MRAR)

        # Get background image
        if self.no_background is False:
            background_frame = next(self.images["background"])
        else:
            background_frame = cv2.GaussianBlur(
                frame, (self.background_blur, self.background_blur),
                self.sigma,
                borderType=cv2.BORDER_DEFAULT)

        # Apply colour map to the background
        if self.cmap_bg:
            cv2.applyColorMap(background_frame,
                              cmap(self.cmap_bg),
                              dst=background_frame)

        # Add hologram to the person
        if self.hologram:
            frame = hologram_effect(frame)

        # Apply colour map to the person
        if self.cmap_person:
            cv2.applyColorMap(frame, cmap(self.cmap_person), dst=frame)

        # Replace background
        if self.use_sigmoid:
            mask = sigmoid(mask)

        cv2.blendLinear(frame, background_frame, mask, 1 - mask, dst=frame)

        # Add foreground if needed
        if self.use_foreground and self.foreground_image is not None:
            cv2.blendLinear(frame,
                            self.images["foreground"],
                            self.images["inverted_foreground_mask"],
                            self.images["foreground_mask"],
                            dst=frame)

        return frame
Beispiel #4
0
def prediction(model_instance, input_data, lead_time):
    
    input_data = data_preprocessing(input_data)
    
    nwcst = []

    print("Forecasting the probability of rain within the next {} minutes...\n".format(lead_time*15))
    for i in range(lead_time):
        # make prediction
        pred = model_instance.predict(input_data)

        # print(pred.dtype)
        # pred_copy = pred.squeeze()
        # pred_copy = cv2.normalize(pred_copy, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
        # print(pred_copy.shape, type(pred_copy))
        # heatmap = cv2.applyColorMap(pred_copy, cmapy.cmap(precipitation_cmap))
        # cv2.imwrite('prediction_{}.png'.format(i), heatmap)

        # append prediction to holder
        nwcst.append(pred)
        # append prediction to the input shifted on one step ahead
        input_data = np.concatenate([input_data[::, ::, ::, 1:], pred], axis=-1)
    
    nwcst = data_postprocessing(nwcst)
    new_nwcst = cv2.normalize(nwcst[lead_time - 1], None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
    heatmap = cv2.applyColorMap(new_nwcst, cmapy.cmap(precipitation_cmap))
    #cv2.imwrite(output_img_name, heatmap)

    #for i in range(lead_time):
      #new_nwcst = cv2.normalize(nwcst[i], None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U)
      #heatmap = cv2.applyColorMap(new_nwcst, cmapy.cmap(precipitation_cmap))
      #cv2.imwrite('nwcst_{}.png'.format(i), heatmap)
    return heatmap
Beispiel #5
0
def create_mel_raw(current_window,
                   sample_rate,
                   n_mels=128,
                   f_min=50,
                   f_max=4000,
                   nfft=2048,
                   hop=512,
                   resz=1):
    S = librosa.feature.melspectrogram(y=current_window,
                                       sr=sample_rate,
                                       n_mels=n_mels,
                                       fmin=f_min,
                                       fmax=f_max,
                                       n_fft=nfft,
                                       hop_length=hop)
    S = librosa.power_to_db(S, ref=np.max)
    S = (S - S.min()) / (S.max() - S.min())
    S *= 255
    img = cv2.applyColorMap(S.astype(np.uint8), cmapy.cmap('magma'))
    height, width, _ = img.shape
    if resz > 0:
        img = cv2.resize(img, (width * resz, height * resz),
                         interpolation=cv2.INTER_LINEAR)
    img = cv2.flip(img, 0)
    return img
Beispiel #6
0
def laplacian_to_img(laplacian):
    laplacian = clip_img_by_perc(laplacian, 2)
    rescaled = rescale_img(laplacian, max_val=255, min_val=0)

    colormap_img = cv2.applyColorMap(
        np.uint8(rescaled).squeeze(), cmapy.cmap("RdBu"))
    img = cv2.cvtColor(colormap_img, cv2.COLOR_BGR2RGB)
    return img
def generate_cams(data_dir, path_to_model, save_here, label = '', how_many_to_show = 'all'):
    # pneumothorax_index = 8
    dataloader, num_samples = load_data(label='Pneumothorax')
    if how_many_to_show == 'all':
        how_many_to_show = num_samples

    labels = ['Atelectasis',
              'Cardiomegaly',
              'Effusion',
              'Infiltration',
              'Mass',
              'Nodule',
              'Pneumonia',
              'Pneumothorax',
              'Consolidation',
              'Edema',
              'Emphysema',
              'Fibrosis',
              'Pleural_Thickening',
              'Hernia']
    label_index = labels.index(label)
    # print('label_index', label_index)
    densenet = load_model(path_to_model)
    for i in range(how_many_to_show):
        img, truth, image_name = next(dataloader)

        # name of the image in the dataset
        name = image_name[0]
        name_without_type = image_name[0][0:len(image_name[0]) - 4]
        pred = densenet(img)
        pred[:, label_index].backward()
        gradients = densenet.get_activations_gradient()
        pooled_gradients = torch.mean(gradients, dim=[0, 2, 3])
        activations = densenet.get_activations(img).detach()
        for i in range(512):
            activations[:, i, :, :] *= pooled_gradients[i]
        heatmap = torch.mean(activations, dim=1).squeeze()
        heatmap = np.maximum(heatmap, 0)

        # normalize the heatmap
        heatmap /= torch.max(heatmap)
        image = cv2.imread(data_dir + '/{0}'.format(name))
        heatmap_real = heatmap
        heatmap = cv2.resize(np.float32(heatmap), (image.shape[1], image.shape[0]))
        heatmap = np.uint8(255 * heatmap)
        heatmap = cv2.applyColorMap(heatmap, cmapy.cmap('viridis'))
        superimposed_img = heatmap * 0.4 + image

        image_location = save_here + '/heatmap-{0}.jpg'.format(name_without_type)
        cv2.imwrite(image_location, superimposed_img)
        heatmap_image = Image.open(image_location)

        f, axarr = plt.subplots(1, 2)
        axarr[0].matshow(heatmap_real.squeeze())
        axarr[1].imshow(heatmap_image)
        plt.savefig(save_here + '/heatmap-image-{0}.jpg'.format(name_without_type))
        # plt.imshow(heatmap_image)
        plt.show()
Beispiel #8
0
def overlay_heatmap(image, heatmap, heatmap_weight):
    heatmap = cv2.resize(heatmap,
                         dsize=(image.shape[1], image.shape[0]),
                         interpolation=cv2.INTER_NEAREST)
    heatmap = 255 - (heatmap * 255)
    heatmap = heatmap.astype("uint8")
    heatmap = cv2.applyColorMap(heatmap, cmapy.cmap("coolwarm"))
    overlayed = cv2.addWeighted(heatmap, heatmap_weight, image,
                                1 - heatmap_weight, 0)
    return overlayed
Beispiel #9
0
def extract_images_from_summary(events_path,
                                tag_names_to_look_for,
                                suffix='',
                                img_outdir=None,
                                colormap=None):
    print("Extracting data from tensorboard summary...")
    event_acc = event_accumulator.EventAccumulator(events_path,
                                                   size_guidance={'images': 0})
    event_acc.Reload()

    # a suffix to append to the name if we save in outdir
    strsuffix = suffix

    if img_outdir is not None:
        outdir = pathlib.Path(img_outdir)
        outdir.mkdir(exist_ok=True, parents=True)

    # We are looking at all the images ...
    image_dict = defaultdict(list)
    for tag in event_acc.Tags()['images']:
        print("processing tag %s" % tag)
        events = event_acc.Images(tag)
        tag_name = tag.replace('/', '_')
        # ... that have the tag name: "tag_name_to_look_for"
        if tag_name in tag_names_to_look_for:
            tag_name = tag_name + strsuffix

            if img_outdir is not None:
                dirpath = outdir / tag_name
                dirpath.mkdir(exist_ok=True, parents=True)

            for index, event in enumerate(events):
                s = np.frombuffer(event.encoded_image_string, dtype=np.uint8)
                image = cv2.imdecode(s, cv2.IMREAD_COLOR)

                if colormap is not None:
                    image = cv2.applyColorMap(image[..., 0],
                                              cmapy.cmap(colormap))

                if img_outdir is not None:
                    outpath = dirpath / '{:04}.png'.format(index)
                    cv2.imwrite(outpath.as_posix(), image)

                image_dict[tag].append(image)
    return image_dict
Beispiel #10
0
def fitEllipseComplex(path):
    png = cv2.imread(f"savedPNGS/{randint}.png")
    start_time = time.time()

    img_name = path
    img_name = img_name.split("/")
    img_name = img_name[-1]

    first_img = cv2.imread(path)
    img = cv2.applyColorMap(first_img, cmapy.cmap('flag_r'))

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    gray = cv2.morphologyEx(
        gray, cv2.MORPH_OPEN,
        cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5)))
    thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]

    contours = cv2.findContours(thresh, cv2.RETR_EXTERNAL,
                                cv2.CHAIN_APPROX_SIMPLE)
    contours = contours[0] if len(contours) == 2 else contours[1]
    big_contour = max(contours, key=cv2.contourArea)

    ellipse = cv2.fitEllipse(big_contour)
    (xc, yc), (d1, d2), angle = ellipse

    # draw ellipse
    result = img.copy()
    cv2.ellipse(png, ellipse, (255, 0, 255), 3)

    xc, yc = ellipse[0]

    cv2.imshow("final", png)
    elapsed_time = (time.time() - start_time) * 1000

    with open('output.csv', 'w', newline='') as file:
        writer = csv.writer(file)
        writer.writerow([img_name, xc, yc, d1, d2, angle, elapsed_time])

    os.remove(os.path.join(f"savedPNGS/{randint}.png"))
    cv2.waitKey(0)
    cv2.destroyAllWindows()
Beispiel #11
0
def visu(save_path, label_path, index):
    import matplotlib
    matplotlib.use('TkAgg')
    # 存储路径
    # save_path = 'saved/picture/Unet/0309_182959'
    # label的第几张图
    # index=0
    # label路径
    # a = np.load("/Users/shanyuhai/PycharmProjects/Earthquake/data/FYP_data/fault_sub_350IL_500t_1200XL.npy")
    a = np.load(label_path)
    a = a[index]
    # b = np.load(os.path.join("/home/anyu/myproject/venv/an/pieces/HED/dropout/0.2/testGTs","0.npy"))
    import cv2
    import cmapy
    heatmap_img = cv2.applyColorMap((a * 255).astype(np.uint8),
                                    cmapy.cmap('jet_r'))
    plt.figure(figsize=(10, 8))
    plt.imshow(heatmap_img)
    # plt.colorbar(shrink=0.5)
    plt.axis('off')
    # plt.show()
    plt.savefig('{}/label_{}.png'.format(save_path, index))
def main(opts):
    gray_values = np.arange(256, dtype=np.uint8)
    color_values = map(
        tuple,
        cv2.applyColorMap(gray_values,
                          cmapy.cmap('nipy_spectral')).reshape(256, 3))
    color_to_gray_map = dict(zip(color_values, gray_values))
    paths = glob.glob("{}/*{}".format(opts.data_path, opts.img_format))
    if (not os.path.isdir(opts.output_dir)):
        os.mkdir(opts.output_dir)
    preamble = ""
    if (opts.include_set_name):
        preamble = opts.data_path.split("/")[-1] + "_"

    for _, img_path in tqdm(enumerate(paths)):
        color_image = cv2.imread(img_path)
        color_image = resize(color_image, 10)
        gray_img = cv2.cvtColor(color_image, cv2.COLOR_BGR2GRAY)
        #gray_img = np.apply_along_axis(lambda bgr: color_to_gray_map[nearest(color_to_gray_map.keys(),bgr)], 2, color_image)
        cv2.imwrite(
            "{}/{}{}".format(opts.output_dir, preamble,
                             img_path.split("/")[-1]), gray_img)
Beispiel #13
0
def main(config):
    logger = config.get_logger('test')

    # setup data_loader instances
    data_loader = config.init_obj('data_loader2', module_data)
    data_loader = data_loader.test_loader
    seis_path = config['data_loader2']['args']['seismic_path']
    seis = np.load(seis_path)
    run_id = datetime.now().strftime(r'%m%d_%H%M%S')
    save_path = 'saved/picture/' + config['arch']['type'] + '/' + run_id
    val_start = config['data_loader2']['args']['val_start']
    number_of_pictures = config['data_loader2']['args'][
        'val_number_of_pictures']
    modelNo = config['trainer']['modelNo']
    # save_path = 'saved/picture/Deeplab/Deeplab_seed2'
    IL, Z, XL = seis.shape
    im_height = Z
    im_width = XL
    splitsize = 96
    stepsize = 48  # overlap half
    overlapsize = splitsize - stepsize
    horizontal_splits_number = int(np.ceil((im_width) / stepsize))
    width_after_pad = stepsize * horizontal_splits_number + 2 * overlapsize
    left_pad = int((width_after_pad - im_width) / 2)
    right_pad = width_after_pad - im_width - left_pad
    vertical_splits_number = int(np.ceil((im_height) / stepsize))
    height_after_pad = stepsize * vertical_splits_number + 2 * overlapsize
    top_pad = int((height_after_pad - im_height) / 2)
    bottom_pad = height_after_pad - im_height - top_pad
    horizontal_splits_number = horizontal_splits_number + 1
    # print(horizontal_splits_number)
    vertical_splits_number = vertical_splits_number + 1
    # print(vertical_splits_number)
    halfoverlapsize = int(overlapsize / 2)

    # build model architecture
    model = config.init_obj('arch', module_arch)
    # logger.info(model)
    # print(config['arch']['type'])
    # summary(model, (1, splitsize, splitsize))
    print(config['arch']['type'])
    print("start:", config['data_loader2']['args']['val_start'])
    print("number_of_pictures:",
          config['data_loader2']['args']['val_number_of_pictures'])
    # get function handles of loss and metrics
    loss_fn = getattr(module_loss, config['loss'])
    metric_fns = [getattr(module_metric, met) for met in config['metrics']]

    logger.info('Loading checkpoint: {} ...'.format(config.resume))
    checkpoint = torch.load(config.resume)
    state_dict = checkpoint['state_dict']
    if config['n_gpu'] > 1:
        model = torch.nn.DataParallel(model)
    model.load_state_dict(state_dict)

    # prepare model for testing
    model = model.to(device)
    bceloss = nn.BCELoss()
    # model.eval()

    total_loss = 0.0
    total_metrics = torch.zeros(len(metric_fns))

    WINDOW_SPLINE_2D = window_2D(window_size=splitsize, power=2)
    os.makedirs(save_path, exist_ok=True)
    val_losses = []
    val_accuracies = []
    test_predictions = []
    imageNo = -1
    # modelNo = 1
    best_iou_threshold = 0.5

    with torch.no_grad():
        for images in tqdm(data_loader):
            #         images = Variable(images)

            images = Variable(images.to(device))
            outputs = model(images)
            y_preds = outputs
            if modelNo == 2 or modelNo == 21:
                y_preds = outputs[-2]
            elif modelNo == 3:
                y_preds = outputs[-1]
            #         predicted_mask = y_preds > best_iou_threshold
            test_predictions.extend(y_preds.detach().cpu())
            #         print(test_predictions[0].dtype)
            #         print(len(test_predictions))
            if len(test_predictions
                   ) >= vertical_splits_number * horizontal_splits_number:
                imageNo = imageNo + 1
                tosave = torch.stack(test_predictions).detach().cpu().numpy(
                )[0:vertical_splits_number * horizontal_splits_number]
                #             print(tosave.shape)
                test_predictions = test_predictions[vertical_splits_number *
                                                    horizontal_splits_number:]

                tosave = np.moveaxis(tosave, -3, -1)
                #             print(tosave.shape)
                tosave = np.array(
                    [patch * WINDOW_SPLINE_2D for patch in tosave])
                #             print(tosave.shape)
                #             break

                tosave = tosave.reshape(
                    (vertical_splits_number, horizontal_splits_number,
                     splitsize, splitsize, 1))
                #             print(tosave.shape)

                recover_Y_test_pred = recover_Image2(tosave,
                                                     (im_height, im_width, 1),
                                                     left_pad, right_pad,
                                                     top_pad, bottom_pad,
                                                     overlapsize)

                os.makedirs(save_path, exist_ok=True)
                np.save(
                    os.path.join(save_path, "{}".format(imageNo + val_start)),
                    np.squeeze(recover_Y_test_pred))

    print("saved")
    for i in range(0, number_of_pictures, 1):

        index = i + val_start
        a = np.load(os.path.join(save_path, str(index) + '.npy'))
        # b = np.load(os.path.join("/home/anyu/myproject/venv/an/pieces/HED/dropout/0.2/testGTs","0.npy"))
        import cv2
        import cmapy
        heatmap_img = cv2.applyColorMap((a * 255).astype(np.uint8),
                                        cmapy.cmap('jet_r'))
        plt.figure(figsize=(10, 8))
        plt.imshow(heatmap_img)
        # plt.colorbar(shrink=0.5)
        plt.axis('off')
        # plt.show()
        plt.savefig('{}/{}_{}.png'.format(save_path, run_id, str(index)))

        visu(
            save_path,
            label_path=
            "/Users/shanyuhai/PycharmProjects/Earthquake/data/FYP_data/fault_sub_350IL_500t_1200XL.npy",
            index=index)
        visu1(
            save_path,
            seismic_path=
            "/Users/shanyuhai/PycharmProjects/Earthquake/data/FYP_data/seis_sub_350IL_500t_1200XL.npy",
            index=index)
Beispiel #14
0
    def __init__(self):
        super().__init__()
        self.ready = False
        self.ui = Ui_dialog()
        self.ui.setupUi(self)
        self.threadpool = QThreadPool()
        self.show()

        ###########################################################
        # Use QSettings to save states
        ###########################################################

        self.settings = QSettings('__settings.ini', QSettings.IniFormat)
        self.settings.setFallbacksEnabled(False)

        ###########################################################
        # Connect Signal
        ###########################################################
        self.ui.lbl_JPG_img_1.installEventFilter(self)
        self.ui.lbl_JPG_img_1.setMouseTracking(True)

        ####################################################################
        # Buttons Signals
        ####################################################################
        self.ui.pushButton_Step_Left.clicked.connect(self.backward_oneImage)
        self.ui.pushButton_Step_Right.clicked.connect(self.forward_oneImage)
        self.ui.pushButton_browse.clicked.connect(self.getOutputFolderName)
        self.ui.pushButton_RunImgSeq.clicked.connect(self.startStopSlideShow)
        self.ui.pushButton_close.clicked.connect(QCoreApplication.instance().quit)
        self.ui.pushButton_close.clicked.connect(self.close)

        ###########################################################
        # ListView Signal
        ###########################################################
        self.ui.listWidget_cam1.currentItemChanged.connect(self.on_item_changed_cam1)
        self.ui.listWidget_cam2.currentItemChanged.connect(self.on_item_changed_cam2)

        self.ui.listWidget_cam1.doubleClicked.connect(self.on_item_doubleclicked_cam1)
        self.ui.listWidget_cam2.doubleClicked.connect(self.on_item_doubleclicked_cam2)

        ###########################################################
        # Labels
        ###########################################################
        self.ui.lbl_info.setText('Info')

        ###########################################################
        # QLabel - containers for images and plots
        ###########################################################
        # Set scaled properties
        self.ui.lbl_JPG_img_1.setScaledContents(True)
        self.ui.lbl_JPG_img_2.setScaledContents(True)
        self.ui.lbl_JPG_img_3.setScaledContents(True)

        self.ui.lbl_JPG_COLORMAP_1.setScaledContents(True)
        self.ui.lbl_JPG_COLORMAP_2.setScaledContents(True)
        self.ui.lbl_JPG_COLORMAP_3.setScaledContents(True)

        self.ui.lbl_JPG_RGB_HIST_1.setScaledContents(True)
        self.ui.lbl_JPG_RGB_HIST_2.setScaledContents(True)
        self.ui.lbl_JPG_RGB_HIST_3.setScaledContents(True)

        ###########################################################
        # Timer update slider values
        ###########################################################
        self.timer_update = QTimer()
        self.timer_update.start(TIMER_INTERVAL)

        ###########################################################
        # Timer for image slide show
        ###########################################################
        self.timer_slideshow = QTimer()
        self.timer_slideshow.timeout.connect(self.runSlideShow)
        # self.timer_slideshow.setInterval(SLIDESHOW_DELAY)
        self.slideshow_step = SLIDESHOW_STEP

        ###########################################################
        #  Lists with all camera 1/2 directories
        ###########################################################
        self.cam1_dirs_path_list = []
        self.cam2_dirs_path_list = []

        ###########################################################
        #  Lists concerning one directory of JPG and HDR images
        ###########################################################
        self.jpg_img_well_path_list = []
        self.jpg_img_low_path_list = []
        self.jpg_img_high_path_list = []

        self.openCVImg_JPG_img_well_list = []
        self.openCVImg_JPG_img_low_list  = []
        self.openCVImg_JPG_img_high_list = []

        self.pixMapImg_JPG_img_well_list = []
        self.pixMapImg_JPG_img_low_list  = []
        self.pixMapImg_JPG_img_high_list = []

        ###########################################################
        #  Plot Widgets used with pygraph
        ###########################################################
        self.pw_rgb_w_hist = pg.PlotWidget(name='RGB_HIST_well')
        self.pw_rgb_w_hist.setXRange(1, 350, padding=0)

        self.hist_w_plot_rgb_r = self.pw_rgb_w_hist.plot(pen ='r')
        self.hist_w_plot_rgb_g = self.pw_rgb_w_hist.plot(pen ='g')
        self.hist_w_plot_rgb_b = self.pw_rgb_w_hist.plot(pen ='b')
        self.ui.gridLayout.addWidget(self.pw_rgb_w_hist, 0, 2, 1, 1)

        self.pw_rgb_l_hist = pg.PlotWidget(name='RGB_HIST_low')
        self.pw_rgb_l_hist.setXRange(1, 350, padding=0)

        self.hist_l_plot_rgb_r = self.pw_rgb_l_hist.plot(pen ='r')
        self.hist_l_plot_rgb_g = self.pw_rgb_l_hist.plot(pen ='g')
        self.hist_l_plot_rgb_b = self.pw_rgb_l_hist.plot(pen ='b')
        self.ui.gridLayout.addWidget(self.pw_rgb_l_hist, 1, 2, 1, 1)

        self.pw_rgb_h_hist = pg.PlotWidget(name='RGB_HIST_high')
        self.pw_rgb_h_hist.setXRange(1, 350, padding=0)

        self.hist_h_plot_rgb_r = self.pw_rgb_h_hist.plot(pen='r')
        self.hist_h_plot_rgb_g = self.pw_rgb_h_hist.plot(pen='g')
        self.hist_h_plot_rgb_b = self.pw_rgb_h_hist.plot(pen='b')
        self.ui.gridLayout.addWidget(self.pw_rgb_h_hist, 2, 2, 1, 1)

        ###########################################################
        #  Settings and initial values
        ###########################################################
        self.qimage_width  = 2592
        self.qimage_height = 1944
        self.COLORMAP_1 = cmapy.cmap('jet')
        self.COLORMAP_2 = cv2.COLORMAP_HSV
        self.USE_CMAP = '1'                   # Set type of color map used '1' or '2'
        self.database_name = "img_analysis.db"
        self.database_avaiable = False

        ###########################################################
        #  Misc Variables
        ###########################################################
        self.tot_numb_of_images = 0
        self.name_of_current_imgProcFunc = None  # name (string) of currently used img proc function
        self.image_mask = None
        self.imageLoaded = False
        self.scale_fac_width  = None
        self.scale_fac_height = None
        self.qLable_width = None
        self.qLable_height = None
        self.pass_this_imgProcFunc = None  # Placeholder for currently used img proc function
        self.curr_item_slec_cam1 = None
        self.curr_item_slec_cam2 = None
        self.curr_item_slec_path = None
        self.CAM = None                    # indicating weather camera_1 or camera_2 was used

        ###########################################################
        #  Boolean variables
        ###########################################################
        self.optFlowList_exists = False  # If a list of optical flow img's exists
        self.loading_complete   = False  # process of loading images is completed
        self.hdr_imgs_exist = False      # True if output folder with hdr images exists

        ###########################################################
        # Load images from last session as background task
        ###########################################################
        self.root_path_to_images = None   # root containing folders camera_1 resp camera_2
        self.root_path_to_images = self.settings.value("path_to_images")
        temp_text = self.load_last_root_path_from_settings(self.root_path_to_images)
        self.ui.lineEdit.setText(temp_text)

        ###########################################################
        # All variables declared and ready to be used
        ###########################################################
        self.ready = True  # If iInitialization completed
def detail(Y1):
    #equalise the image to increase contrast
    I = cv2.equalizeHist(Y1)
    #apply color map function to image
    newI = cv2.applyColorMap(I, cmapy.cmap('inferno'))
    return newI
def main():
    # arguments
    parser = ArgumentParser()

    parser.add_argument(
        "-m",
        "--model",
        help="Required. Path to an .xml file with a trained model",
        required=True,
        type=str)
    parser.add_argument("-i",
                        "--input",
                        help="Required. Input device (webcam)",
                        default=0,
                        type=int)
    parser.add_argument(
        "-l",
        "--cpu_extension",
        help=
        "Optional. Required for CPU custom layers. Absolute MKLDNN (CPU)-targeted custom layers. "
        "Absolute path to a shared library with the kernels implementations",
        type=str,
        default=None)
    parser.add_argument(
        "-d",
        "--device",
        help=
        "Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is acceptable. "
        "Sample will look for a suitable plugin for device specified. Default value is CPU",
        default="CPU",
        type=str)
    parser.add_argument(
        "-c",
        "--colormap",
        help=
        "Optional. Specify the colormap for the depth output. Default value is inferno",
        default="inferno",
        type=str)

    args = parser.parse_args()

    # logging
    log.basicConfig(format="[ %(levelname)s ] %(message)s",
                    level=log.INFO,
                    stream=sys.stdout)

    log.info("creating inference engine")
    ie = IECore()
    if args.cpu_extension and "CPU" in args.device:
        ie.add_extension(args.cpu_extension, "CPU")

    log.info("Loading network")
    net = ie.read_network(args.model, os.path.splitext(args.model)[0] + ".bin")

    assert len(
        net.input_info) == 1, "Sample supports only single input topologies"
    assert len(
        net.outputs) == 1, "Sample supports only single output topologies"

    log.info("preparing input blobs")
    input_blob = next(iter(net.input_info))
    out_blob = next(iter(net.outputs))
    net.batch_size = 1

    # loading model to the plugin
    log.info("loading model to the plugin")
    exec_net = ie.load_network(network=net, device_name=args.device)

    print("starting webcam...")
    cv2.namedWindow("preview")
    vc = cv2.VideoCapture(args.input)

    colormap = cmapy.cmap(args.colormap)

    # try to get the first frame
    if vc.isOpened():
        rval, frame = vc.read()
    else:
        rval = False

    while rval:
        # read and pre-process input image
        _, _, height, width = net.input_info[input_blob].input_data.shape

        image = frame
        (input_height, input_width) = image.shape[:-1]

        # resize
        if (input_height, input_width) != (height, width):
            image = cv2.resize(image, (width, height), cv2.INTER_CUBIC)

        # prepare input
        image = image.astype(np.float32)
        image = image.transpose((2, 0, 1))
        image_input = np.expand_dims(image, 0)

        # start sync inference
        res = exec_net.infer(inputs={input_blob: image_input})

        # processing output blob
        disp = res[out_blob][0]

        # resize disp to input resolution
        disp = cv2.resize(disp, (input_width, input_height), cv2.INTER_CUBIC)

        # rescale disp
        disp_min = disp.min()
        disp_max = disp.max()

        if disp_max - disp_min > 1e-6:
            disp = (disp - disp_min) / (disp_max - disp_min)
        else:
            disp.fill(0.5)

        disp_image = np.uint8(disp * 255.0)
        disp_colored = cv2.applyColorMap(disp_image, colormap)

        output = np.vstack((frame, disp_colored))
        cv2.imshow("preview", output)

        rval, frame = vc.read()

        key = cv2.waitKey(20)
        if key == 27:  # exit on ESC
            break
Beispiel #17
0
    #mag, ang = cv2.cartToPolar(noRotateFlow[...,0], noRotateFlow[...,1])
    #hsv[...,0] = ang*180/np.pi/2
    #hsv[...,2] = cv2.normalize(mag,None,0,255,cv2.NORM_MINMAX)
    #hsv[...,2] = 6 * mag
    #bgr = cv2.cvtColor(hsv,cv2.COLOR_HSV2BGR)
    #cv2.imshow('nrFlow', bgr)
    #writer_nrflow.write(bgr)

    #cv2.imshow("frame", frame)
    #writer.write(frame)

    depth *= 255 / 20
    depth = 255 - depth
    depth = depth * (depth > 0)
    depth = depth.astype(np.uint8)
    depth = cv2.applyColorMap(depth, cmapy.cmap('viridis'))
    cv2.imshow("depth", depth)
    #writer_depth.write(depth)

    #fdepth *= 255 / 20
    #fdepth = 255 - fdepth
    #fdepth = fdepth * (fdepth > 0)
    #fdepth = fdepth.astype(np.uint8)
    #fdepth = cv2.applyColorMap(fdepth, cmapy.cmap('viridis'))
    #cv2.imshow("fdepth", fdepth)

    showFrame = frame.copy()
    for i in range(len(contours)):
        if (cv2.contourArea(contours[i]) > 2000):
            showFrame = cv2.drawContours(showFrame, contours, i, (0, 0, 255),
                                         -1)
import cv2
import cmapy

CMAPS = {
    'ясно': cmapy.cmap('Wistia_r'),
    'дождь': cv2.COLORMAP_BONE,
    'дождь/гроза': cmapy.cmap('bone_r'),
    'облачно': cmapy.cmap('PuBu_r'),
    'осадки': cv2.COLORMAP_OCEAN,  # снег с дождём
    'снег': cv2.COLORMAP_WINTER,
    'метель': cv2.COLORMAP_OCEAN,
    'дождь/град': cv2.COLORMAP_OCEAN,
}

ICONS = {
    'ясно': '\uF00D',
    'дождь': '\uF019',
    'дождь/гроза': '\uF01D',
    'облачно': '\uF013',
    'осадки': '\uF017',
    'снег': '\uF01B',
    'метель': '\uF082',
    'дождь/град': '\uF017',
    'press': '\uF079',
    'humidity': '\uF078',
    'wind': '\uF050',
}

BACKGROUND_IM = 'assets/big_bg.png'
FONT_REGULAR = 'assets/RobotoSlab-Regular.ttf'
FONT_BOLD = 'assets/RobotoSlab-Medium.ttf'
Beispiel #19
0
def write_image_summary(image_resolution,
                        model,
                        model_input,
                        gt,
                        model_output,
                        writer,
                        total_steps,
                        prefix='train_'):
    gt_img = dataio.lin2img(gt['img'], image_resolution)
    pred_img = dataio.lin2img(model_output['model_out'], image_resolution)

    img_gradient = diff_operators.gradient(model_output['model_out'],
                                           model_output['model_in'])
    img_laplace = diff_operators.laplace(model_output['model_out'],
                                         model_output['model_in'])

    output_vs_gt = torch.cat((gt_img, pred_img), dim=-1)
    writer.add_image(prefix + 'gt_vs_pred',
                     make_grid(output_vs_gt, scale_each=False, normalize=True),
                     global_step=total_steps)

    pred_img = dataio.rescale_img(
        (pred_img + 1) / 2,
        mode='clamp').permute(0, 2, 3, 1).squeeze(0).detach().cpu().numpy()
    pred_grad = dataio.grads2img(dataio.lin2img(img_gradient)).permute(
        1, 2, 0).squeeze().detach().cpu().numpy()
    pred_lapl = cv2.cvtColor(
        cv2.applyColorMap(
            dataio.to_uint8(
                dataio.rescale_img(dataio.lin2img(img_laplace),
                                   perc=2).permute(
                                       0, 2, 3,
                                       1).squeeze(0).detach().cpu().numpy()),
            cmapy.cmap('RdBu')), cv2.COLOR_BGR2RGB)

    gt_img = dataio.rescale_img(
        (gt_img + 1) / 2,
        mode='clamp').permute(0, 2, 3, 1).squeeze(0).detach().cpu().numpy()
    gt_grad = dataio.grads2img(dataio.lin2img(gt['gradients'])).permute(
        1, 2, 0).squeeze().detach().cpu().numpy()
    gt_lapl = cv2.cvtColor(
        cv2.applyColorMap(
            dataio.to_uint8(
                dataio.rescale_img(dataio.lin2img(gt['laplace']),
                                   perc=2).permute(
                                       0, 2, 3,
                                       1).squeeze(0).detach().cpu().numpy()),
            cmapy.cmap('RdBu')), cv2.COLOR_BGR2RGB)

    writer.add_image(prefix + 'pred_img',
                     torch.from_numpy(pred_img).permute(2, 0, 1),
                     global_step=total_steps)
    writer.add_image(prefix + 'pred_grad',
                     torch.from_numpy(pred_grad).permute(2, 0, 1),
                     global_step=total_steps)
    writer.add_image(prefix + 'pred_lapl',
                     torch.from_numpy(pred_lapl).permute(2, 0, 1),
                     global_step=total_steps)
    writer.add_image(prefix + 'gt_img',
                     torch.from_numpy(gt_img).permute(2, 0, 1),
                     global_step=total_steps)
    writer.add_image(prefix + 'gt_grad',
                     torch.from_numpy(gt_grad).permute(2, 0, 1),
                     global_step=total_steps)
    writer.add_image(prefix + 'gt_lapl',
                     torch.from_numpy(gt_lapl).permute(2, 0, 1),
                     global_step=total_steps)

    write_psnr(dataio.lin2img(model_output['model_out'], image_resolution),
               dataio.lin2img(gt['img'], image_resolution), writer,
               total_steps, prefix + 'img_')
Beispiel #20
0
def main():
    # Create Kinect object and initialize
    kin = kinz.Kinect(resolution=1080,
                      wfov=True,
                      binned=True,
                      framerate=30,
                      imu_sensors=False,
                      body_tracking=True)

    # Get depth aligned with color?
    align_frames = False
    image_scale = 0.5  # visualized image scale

    # initialize fps counter
    t = cv2.getTickCount()
    fps_count = 0
    fps = 0

    while True:
        if fps_count == 0:
            t = cv2.getTickCount()

        # read kinect frames. If frames available return 1
        if kin.get_frames(get_color=True,
                          get_depth=True,
                          get_ir=False,
                          get_sensors=False,
                          get_body=True,
                          get_body_index=True,
                          align_depth=align_frames):

            color_data = kin.get_color_data()
            depth_data = kin.get_depth_data()
            bodies = kin.get_bodies()
            body_index_data = kin.get_body_index_map(returnId=True,
                                                     inColor=False)

            print("bodies:", bodies)

            # extract frames to np arrays
            depth_image = np.array(depth_data.buffer, copy=True)
            color_image = np.array(color_data.buffer,
                                   copy=True)  # image is BGRA
            color_image = cv2.cvtColor(color_image,
                                       cv2.COLOR_BGRA2BGR)  # to BGR
            body_index_image = np.array(body_index_data.buffer, copy=True)
            print(body_index_image.shape)

            # Apply colormap on depth image (image must be converted to 8-bit per pixel first)
            depth_colormap = cv2.applyColorMap(
                cv2.convertScaleAbs(depth_image, alpha=0.03), cv2.COLORMAP_JET)

            # Apply colormap on body index image
            body_index_image = cv2.applyColorMap(body_index_image * 10,
                                                 cmapy.cmap('tab20'))

            # Draw bodies on the RGB image
            draw_keypoints(color_image, bodies, img_type='rgb')

            # Draw bodies on the depth image
            draw_keypoints(depth_colormap, bodies, img_type='depth')

            # Resize images
            if align_frames:
                depth_colormap = cv2.resize(depth_colormap,
                                            None,
                                            fx=image_scale,
                                            fy=image_scale)
                body_index_image = cv2.resize(body_index_image,
                                              None,
                                              fx=image_scale,
                                              fy=image_scale)

            color_small = cv2.resize(color_image,
                                     None,
                                     fx=image_scale,
                                     fy=image_scale)
            size = color_small.shape[0:2]
            cv2.putText(color_small, "{0:.2f}-FPS".format(fps),
                        (20, size[0] - 20), cv2.FONT_HERSHEY_COMPLEX, 0.8,
                        (0, 0, 255), 2)

            cv2.imshow('Depth', depth_colormap)
            cv2.imshow('Color', color_small)
            cv2.imshow('Body index', body_index_image)

        k = cv2.waitKey(1) & 0xFF
        if k == 27:
            break
        elif k == ord('s'):
            cv2.imwrite("color.jpg", color_image)
            print("Image saved")

        # increment frame counter and calculate FPS
        fps_count = fps_count + 1
        if (fps_count == 30):
            t = (cv2.getTickCount() - t) / cv2.getTickFrequency()
            fps = 30.0 / t
            fps_count = 0

    kin.close()  # close Kinect
    cv2.destroyAllWindows()
Beispiel #21
0
    z = np.zeros((224, 224, 3))
    z = z + diff
    diff = np.array(z, dtype=np.uint8)
    x = preprocess_input(np.expand_dims(diff, 0))
    fmaps = model.predict(x)[0]
    probs = resnet.predict(x)
    pred = np.argmax(probs[0])
    w = W[:, pred]
    cam = fmaps.dot(w)
    cam = sp.ndimage.zoom(cam, (32, 32), order=1)
    cam = np.array(cam, dtype=np.uint8) * 3
    cam = np.reshape(cam, (224, 224, 1))
    z = np.zeros((224, 224, 3))
    z = z + cam
    X = np.array(z, dtype=np.uint8)
    X = cv2.applyColorMap(X, cmapy.cmap('inferno'))

    img = cv2.resize(frame2, (700, 500))
    X = cv2.resize(X, (700, 500))
    fin = cv2.addWeighted(img, 0.4, X, 0.6, 0)
    cv2.imshow('original', img)
    cv2.imshow('activationMap', X)
    cv2.imshow('frame', fin)

    key = cv2.waitKey(1)
    if key == ord('q'):
        break

camera.release()
cv2.destroyAllWindows()
t1 = time.time()
saveResults(save_path, test_loader)
t2 = time.time()
print('save in {} sec'.format(t2 - t1))

# In[ ]:

# save_path = 'augtest/noaug'
import numpy as np
import os

# In[ ]:

a = np.load(os.path.join(save_path, "0.npy"))
# b = np.load(os.path.join("/home/anyu/myproject/venv/an/pieces/HED/dropout/0.2/testGTs","0.npy"))

import cv2
import cmapy

# In[ ]:

heatmap_img = cv2.applyColorMap((a * 255).astype(np.uint8),
                                cmapy.cmap('jet_r'))
plt.figure(figsize=(10, 8))
plt.imshow(heatmap_img)
# plt.colorbar(shrink=0.5)
# plt.axis('off')
# plt.show()

plt.savefig('{}_0.png'.format(save_path))
        "camera_" + str(camera_num),
        "departure_" + str(args.match_number).zfill(3) + ".mp4",
    )
    video = utils.read_video(video_path)
    video = utils.resize_video(video, (480, 270))

    input_tensor = input_tensor.astype(float)
    input_tensor = smooth_trajectory_tensor(input_tensor, 1)
    input_tensor = normalize_trajectory_tensor(input_tensor)
    for frame in range(10):
        this_heatmap = input_tensor[camera_num - 1, frame]
        this_heatmap = cv2.resize(this_heatmap,
                                  dsize=(480, 270),
                                  interpolation=cv2.INTER_NEAREST)
        this_heatmap = 255 - (this_heatmap * 255)
        this_heatmap = this_heatmap.astype("uint8")
        this_heatmap = cv2.applyColorMap(this_heatmap, cmapy.cmap("coolwarm"))
        video[frame + 10] = cv2.addWeighted(this_heatmap, 0.6,
                                            video[frame + 10], 0.4, 0)
        video[frame + 10] = utils.draw_grid(video[frame + 10], (27, 48), 1)

    start_x = int(math.floor((camera_num - 1) / 4) * 270)
    start_y = int(((camera_num - 1) % 4) * 480)
    output_video[:, start_x:start_x + 270, start_y:start_y + 480, :] = video

utils.write_image(
    "trajectory_tensor_input_example_" + args.day + "_" + args.match_number +
    ".jpg", output_video[10])

utils.write_video("trajectory_tensor_input_example.mp4", output_video, fps=5)
Beispiel #24
0
        g = 0
        b = 0
        if i < half:
            r = ((half - i) / half)
            r = r + ((1 - r) * adj)
        else:
            b = ((i - half) / half)
            b = b + ((1 - b) * adj)
        custom_map.append([r, g, b, 1.0])

    # plt.get_cmap('twilight')
    plt_color_map = ListedColormap(np.power(np.asarray(custom_map), 3 / 2))
    return plt_color_map


CUSTOM_COLOR_MAP = cmapy.cmap(build_custom_color_map_1())


def demo_color_map(plt_color_map):
    gradient = np.linspace(0, 1, 256)
    gradient = np.vstack((gradient, gradient))

    def plot_color_gradients(cmap):
        fig, ax = plt.subplots()
        ax.imshow(gradient, aspect='auto', cmap=cmap)
        pos = list(ax.get_position().bounds)
        x_text = pos[0] - 0.01
        y_text = pos[1] + pos[3] / 2.

    plot_color_gradients(plt_color_map)
def colorFrame(f):
    f = f/f.max() 
    f = np.uint8(f*255)
    f = cv2.applyColorMap(f, cmapy.cmap('magma'))
    f = cv2.resize(f,(448,768), interpolation=cv2.INTER_LINEAR)
    return f
Beispiel #26
0
import cv2
import cmapy #para instalar este paquete hay que instalar mathplot

imagen = cv2.imread("constante.jpg")

imagen_color = cv2.applyColorMap(imagen, cmapy.cmap('CMRmap'))
cv2.imwrite("constante_color.jpg", imagen_color)


imagen = cv2.imread("variable.jpg")

imagen_color = cv2.applyColorMap(imagen, cmapy.cmap('CMRmap'))
cv2.imwrite("variable_color.jpg", imagen_color)

imagen = cv2.imread("color_img.jpg")

imagen_color = cv2.applyColorMap(imagen, cmapy.cmap('CMRmap'))
cv2.imwrite("color_img_color.jpg", imagen_color)