Ejemplo n.º 1
0
 def get_batch(self):
     idx_video = random.randint(0, self.video_list.__len__() - 1)
     idx_frame = random.randint(1, 100 - self.T_in)
     lr_dir = self.trainset_dir + '/' + self.video_list[idx_video]  + '/lr_x' + str(self.upscale_factor) + '_BI'
     hr_dir = self.trainset_dir + '/' + self.video_list[idx_video]  + '/hr'
     # read HR & LR frames
     L_frames = []
     for i in range(self.T_in):
         L_frames.append(LoadImage(lr_dir + '/lr' + str(idx_frame + i) + '.bmp'))
     H_frames = LoadImage(hr_dir + '/hr' + str(idx_frame + self.T_in // 2) + '.bmp')
     L_frames = np.asarray(L_frames)
     # pad L_frame
     L_frames_padded = np.lib.pad(L_frames, pad_width=((self.T_in // 2, self.T_in // 2), (0, 0), (0, 0), (0, 0)), mode='constant')
     #H_frames = np.asarray(H_frames[np.newaxis,np.newaxis,:,:,:])
     return L_frames, H_frames
Ejemplo n.º 2
0
def get_y(path):
    dir_frames = glob.glob(path + "*.png")
    dir_frames.sort()
    frames = []
    for f in dir_frames:
        frames.append(LoadImage(f))
    frames = np.asarray(frames)
    return frames
Ejemplo n.º 3
0
def get_x(path):
    dir_frames=glob.glob(path+"*.png")
    dir_frames.sort()
    frames=[]
    for f in dir_frames:
        frames.append(LoadImage(f))
    frames = np.asarray(frames) # print(frames.shape) (20, 100, 115, 3)
    frames_padded = np.lib.pad(frames, pad_width=((T_in // 2, T_in // 2), (0, 0), (0, 0), (0, 0)), mode='constant') # print(frames_padded.shape) (26, 100, 115, 3)
    return frames,frames_padded
Ejemplo n.º 4
0
def detect(model,
           source,
           out,
           imgsz,
           conf_thres,
           iou_thres,
           names,
           colors=[(255, 30, 0), (50, 0, 255)],
           device=torch.device('cpu')):
    img, img0 = LoadImage(source, img_size=imgsz)

    # Run inference
    img, im0 = LoadImage(source, img_size=imgsz)
    img = torch.from_numpy(img).to(device)
    img = img.float()
    img /= 255.0  # 0 - 255 to 0.0 - 1.0
    if img.ndimension() == 3:
        img = img.unsqueeze(0)

    # Inference
    with torch.no_grad():
        pred = model(img)[0]

    # Apply NMS
    pred = non_max_suppression(pred, conf_thres, iou_thres)

    # Process detections
    det = pred[0]  # detections
    if det is not None and len(det):
        # Rescale boxes from img_size to im0 size
        det[:, :4] = scale_coords(img.shape[2:], det[:, :4], im0.shape).round()

        # Print results
        for c in det[:, -1].unique():
            n = (det[:, -1] == c).sum()  # detections per class

        # Write results
        for *xyxy, conf, cls in det:
            label = '%s %.2f' % (names[int(cls)], conf)
            # if cls == 0:
            plot_fire(xyxy,
                      im0,
                      clas=cls,
                      label=label,
                      color=colors[int(cls)],
                      line_thickness=2)

    # Save results (image with detections)

    cv2.imwrite(out, im0)
    return im0
Ejemplo n.º 5
0
    #LoadParams(sess, [params_G], in_file='params_{}L_x{}.h5'.format(args.L, R))

    if args.T == 'G':
        # Test using GT videos
        avg_psnrs = []
        dir_inputs = glob.glob('./inputs/G/*')
        for v in dir_inputs:
            scene_name = v.split('/')[-1]
            os.mkdir('./results/{}L/G/{}/'.format(args.L, scene_name))

            dir_frames = glob.glob(v + '/*.png')
            dir_frames.sort()

            frames = []
            for f in dir_frames:
                frames.append(LoadImage(f))
            frames = np.asarray(frames)
            frames_padded = np.lib.pad(frames,
                                       pad_width=((T_in // 2, T_in // 2),
                                                  (0, 0), (0, 0), (0, 0)),
                                       mode='constant')
            frames_padded = np.lib.pad(frames_padded,
                                       pad_width=((0, 0), (8, 8), (8, 8), (0,
                                                                           0)),
                                       mode='reflect')

            out_Hs = []
            for i in range(frames.shape[0]):
                print('Scene {}: Frame {}/{} processing'.format(
                    scene_name, i + 1, frames.shape[0]))
                in_H = frames_padded[i:i + T_in]  # select T_in frames
    randoutputpath = options.randoutputpath
    computnormalmasks = options.normal_masks

    # default parameters
    # pair saliency automatically computes smoothgrad squared
    smoothgradsq = False  # turn off smoothgradsquared
    nsamples_sg = 50  # number of noisy samples to compute
    xsteps_ig = 50  # interpolation steps for integrated gradients
    stdev_spread_sg = 0.15  # std for smoothgrad noisy samples
    gradcam_three_dims = True  # gradcam should be 3 channels

    # assemble a list of these images
    listfiles = tf.io.gfile.listdir(inputimgfolderpath)
    demo_batch = []
    for fl in listfiles:
        demo_batch.append(LoadImage(inputimgfolderpath + fl, resize=True))
    demo_batch = np.array(demo_batch)
    if logging:
        print(demo_batch.shape)

    layer_randomization_order = inception_block_names()

    # compute normal saliency masks.
    if computnormalmasks:
        # load of inception model
        inception_model = Inceptionv3_Wrapper(chkpointpath=chkpntpath,
                                              lblmetadatapath=labeldatapath)

        # specify necessary saliency setup functions.
        saliency_dict, n_selector = get_saliency_constructors(
            inception_model.graph,