Beispiel #1
0
def save_flow(input_path, ref_path, flow_path):
    inputs = glob.glob(os.path.join(input_path, '*.png'))

    for i, input in enumerate(tqdm(inputs)):
        img_i = cv2.imread(input)
        img_i = cv2.cvtColor(img_i, cv2.COLOR_BGR2GRAY)
        img_r = cv2.imread(os.path.join(ref_path, input.split('/')[-1]))
        img_r = cv2.cvtColor(img_r, cv2.COLOR_BGR2GRAY)

        tmp_flow = compute_flow(img_i, img_r)
        # tmp_flow = ToImg(tmp_flow)

        if not os.path.exists(os.path.join(flow_path, 'color')):
            # os.makedirs os.mkdir
            os.makedirs(os.path.join(flow_path, 'color'))
        tmp_flow_color = flow_vis.flow_to_color(tmp_flow, convert_to_bgr=False)
        cv2.imwrite(os.path.join(flow_path, 'color', input.split('/')[-1]), tmp_flow_color)

        if not os.path.exists(os.path.join(flow_path, 'u')):
            # os.makedirs os.mkdir
            os.makedirs(os.path.join(flow_path, 'u'))
        cv2.imwrite(os.path.join(flow_path, 'u', input.split('/')[-1]), tmp_flow[:, :, 0])

        if not os.path.exists(os.path.join(flow_path, 'v')):
            # os.makedirs os.mkdir
            os.makedirs(os.path.join(flow_path, 'v'))
        cv2.imwrite(os.path.join(flow_path, 'v', input.split('/')[-1]), tmp_flow[:, :, 1])
    print('complete:' + flow_path)
    return
 def process_all_images(self):
     # get a list of png imgs
     curr_img_list = glob.glob(os.path.join(self.input_dir,
                                            "image_2/*.png"))
     curr_img_list = sorted(curr_img_list)
     log_fn = os.path.join(self.output_dir, 'get_optical_flow.log')
     for curr_img_path in curr_img_list:
         im2_fn = curr_img_path
         seq = curr_img_path.split("/")[-1].split(".")[
             0]  # get serial number of the img
         im1_fn = os.path.join(self.prev_dir, seq +
                               '_01.png')  # get path of current-1 image
         im0_fn = os.path.join(self.prev_dir, seq +
                               '_02.png')  # get path of current-2 image
         print("processing img #" + seq)
         import time
         start = time.time()
         of = self.pwc_fusion(
             im0_fn, im1_fn, im2_fn
         )  # select either self.pwc_net(im1_fn, im2_fn) or self.pwc_fusion(im0_fn, im1_fn, im2_fn)
         of_fn = os.path.join(self.output_dir, seq + '_pwc_fusion.npy')
         np.save(of_fn, of)
         flow_color = flow_vis.flow_to_color(
             of, convert_to_bgr=True
         )  # Apply the coloring (for OpenCV, set convert_to_bgr=True)
         flow_color_fn = os.path.join(self.output_dir,
                                      seq + '_pwc_fusion.png')
         cv2.imwrite(flow_color_fn, flow_color)
         end = time.time()
         print("completed in " + str(end - start) + "s")
         with open(log_fn, "a") as f:
             f.write("completed img #" + seq + " in " + str(end - start) +
                     "s")
Beispiel #3
0
def main():
    dataset_dir = cfg.DATA_ROOT_DIR
    alignments_path = os.path.join(
        dataset_dir, "{0}_selfsupervised.json".format(cfg.DATA_TYPE))
    flow_normalization = 100.0
    # in pixels

    with open(alignments_path, "r") as f:
        pairs = json.load(f)

    for pair in pairs:
        source_color = cv2.imread(
            os.path.join(dataset_dir, pair["source_color"]))
        target_color = cv2.imread(
            os.path.join(dataset_dir, pair["target_color"]))
        optical_flow = utils.load_flow(
            os.path.join(dataset_dir, pair["optical_flow"]))

        optical_flow = np.moveaxis(optical_flow, 0, -1)  # (h, w, 2)

        invalid_flow = optical_flow == -np.Inf
        optical_flow[invalid_flow] = 0.0

        flow_color = flow_vis.flow_to_color(optical_flow, convert_to_bgr=False)

        cv2.imshow("Source", source_color)
        cv2.imshow("Target", target_color)
        cv2.imshow("Flow", flow_color)

        cv2.waitKey(0)
Beispiel #4
0
def save_flow_video(flow_array, save_path):
    def get_img_size(image):
        height, width, _ = image.shape
        return width, height

    save_dir = os.path.dirname(save_path)
    if not os.path.exists(save_dir):
        os.makedirs(save_dir)

    flow_imgs = []
    for flow in flow_array:
        flow_img = flow_vis.flow_to_color(flow, convert_to_bgr=False)
        flow_imgs.append(flow_img)

    fps = 10
    size = get_img_size(flow_imgs[0])

    out = cv2.VideoWriter(save_path, cv2.VideoWriter_fourcc(*'DIVX'), fps,
                          size)
    for i in range(len(flow_imgs)):
        out.write(flow_imgs[i])

        if i % 2500 == 0:
            logging.info('Building video: {0}'.format(i))

    out.release()

    print("Video saved at " + save_path)
def visualize_flows(frames, flows, num_imgs, video_save_path=None):
    
    combined = list(zip(frames, flows))
    enumerated = list(enumerate(combined))
    random_samples = random.choices(enumerated[1:len(enumerated)-1], k=num_imgs)
    
    f, axes = plt.subplots(math.ceil(num_imgs/4), 4, figsize = (60, 40))
    
    print("Raw Images")
    for num, axs in enumerate(axes):
        for i, ax in enumerate(axs):
            index = (num * 4 + i)
            if  index < num_imgs:
                img = random_samples[index][1][0]
                ax.imshow(img, aspect='auto')
                
    plt.show()
              
    print("FlowNet2 Images")
    f2, axes2 = plt.subplots(math.ceil(num_imgs/4), 4, figsize = (60, 40))
    for num, axs in enumerate(axes2):
        for i, ax in enumerate(axs):
            index = (num * 4 + i)
            if  index < num_imgs:
                flow_color = flow_vis.flow_to_color(random_samples[index][1][1], convert_to_bgr=False)
                ax.imshow(flow_color, aspect='auto')
                
    plt.show()
                
    print("OpenCV Images")
    f3, axes3 = plt.subplots(math.ceil(num_imgs/4), 4, figsize = (60, 40))
    for num, axs in enumerate(axes3):
        for i, ax in enumerate(axs):
            index = (num * 4 + i)
            if  index < num_imgs:
                img_index = random_samples[index][0]
                frame1 = frames[img_index]
                frame2 = frames[img_index + 1]
                flow = FlowUtils.calc_opt_flow(frame1, frame2)
                flow_color = flow_vis.flow_to_color(flow, convert_to_bgr=False)
                ax.imshow(flow_color, aspect='auto')

    plt.show()
    
    if video_save_path:
        VideoUtils.save_flow_video(flows, video_save_path)
Beispiel #6
0
 def magnitudeOP(self,flow_img, path):
     img = plt.imread(path)
     flow_color = flow_vis.flow_to_color(flow_img[:, :, :2], convert_to_bgr=False)
     plt.imshow(flow_color)
     plt.imshow(img, alpha=0.2, cmap='gray')
     plt.title('Magnitude OF')
     plt.xticks([])
     plt.yticks([])
     plt.show()
Beispiel #7
0
    def magnitudeOP_save(self, flow_img, path, alg, type):
        img = plt.imread(path)
        flow_color = flow_vis.flow_to_color(flow_img[:, :, :2],
                                            convert_to_bgr=False)
        plt.imshow(flow_color)
        plt.imshow(img, alpha=0.2, cmap='gray')
        plt.title('Magnitude OF - ' + alg + ' - ' + type)
        plt.xticks([])
        plt.yticks([])
        plt.show()

        plt.savefig('magnitudeOP - ' + alg + ' - ' + type)
Beispiel #8
0
def show_df_from_tensor(tensor, path):
    """
    tensor = torch.sqrt(torch.abs(tensor.cpu()))
    temp = torch.zeros(1, H, H)
    tensor_ = torch.cat([tensor, temp], 0).permute(1, 2, 0)
    tensor_ = tensor_.numpy() * 255
    cv2.imwrite(path, tensor_)
    """
    tensor = tensor.cpu().permute(1, 2, 0).numpy()
    #tensor = np.rollaxis(tensor, 2, 0)
    flow_color = flow_vis.flow_to_color(tensor, convert_to_bgr=True)
    cv2.imwrite(path, flow_color)
Beispiel #9
0
def get_optical_flow(pre, last):
    #use flownet to get flow

    with torch.no_grad():
        first = torch.FloatTensor(np.ascontiguousarray(pre))
        second = torch.FloatTensor(np.ascontiguousarray(last))
        flow = estimate(first.permute(2, 0, 1), second.permute(2, 0, 1),
                        G.flow_network).detach()
        #flow to rgb
        flow = flow_vis.flow_to_color(flow.numpy().transpose(1, 2, 0),
                                      convert_to_bgr=True)

    return flow / 255.0
Beispiel #10
0
def run(imagefile1, imagefile2, save_file):
    tensorFirst = torch.FloatTensor(
        numpy.array(PIL.Image.open(imagefile1))[:, :, ::-1].transpose(
            2, 0, 1).astype(numpy.float32) * (1.0 / 255.0))
    tensorSecond = torch.FloatTensor(
        numpy.array(PIL.Image.open(imagefile2))[:, :, ::-1].transpose(
            2, 0, 1).astype(numpy.float32) * (1.0 / 255.0))

    tensorOutput = estimate(tensorFirst, tensorSecond)

    flow_color = flow_vis.flow_to_color(tensorOutput.numpy().transpose(
        1, 2, 0),
                                        convert_to_bgr=True)
    cv2.imwrite(save_file, flow_color)
Beispiel #11
0
def get_optical_flow(pre, last):
    '''
    use liteflownet to get flow
    input:pre is the last rgb picture;last is the current rgb picture
    output:dense flow picture;need flow_vis to convert flow array to rgb picture
    '''

    with torch.no_grad():
        first = torch.FloatTensor(np.ascontiguousarray(pre))
        second = torch.FloatTensor(np.ascontiguousarray(last))
        flow = estimate(first.permute(2, 0, 1), second.permute(2, 0, 1),
                        G.flow_network).detach()
        #flow to rgb
        flow = flow_vis.flow_to_color(flow.numpy().transpose(1, 2, 0),
                                      convert_to_bgr=True)

    return flow / 255.0
Beispiel #12
0
def gen_optflow(flow_filename, target_folder):
    with open(str(flow_filename), 'r') as f:
        header = np.fromfile(f, dtype=np.uint8, count=4)
        size = np.fromfile(f, dtype=np.int32, count=2)
        flow_uv = np.fromfile(f, dtype=np.float32) \
            .reshape(config.cropped_height, config.cropped_width, 2)# .transpose(2,0,1)

        # visuzlize using color model
        flow_color = flow_vis.flow_to_color(flow_uv, convert_to_bgr=False)
        cv2.imwrite(str(target_folder / (flow_filename.stem + '_color.png')),
                    flow_color)

        # visualize with arrows
        h, w = flow_uv.shape[:2]
        x, y = np.meshgrid(np.arange(w), np.arange(h))
        new_x = np.rint(x + flow_uv[:, :, 0]).astype(dtype=np.int64)
        new_y = np.rint(y + flow_uv[:, :, 1]).astype(dtype=np.int64)
        # new_x = new_x * ((new_x >= 0) & (new_x < w))
        new_x = np.clip(new_x, 0, w)
        # new_y = new_y * ((new_y >= 0) & (new_y < h))
        new_y = np.clip(new_y, 0, h)

        # blank image
        coords_origin = np.array([x.flatten(), y.flatten()]).T
        coords_new = np.array([new_x.flatten(), new_y.flatten()]).T

        img = np.ones(flow_color.shape, np.uint8) * 255
        for i in range(0, len(coords_origin), 1000):
            cv2.arrowedLine(img, tuple(coords_origin[i]), tuple(coords_new[i]),
                            (255, 0, 0), 2)
        cv2.imwrite(str(target_folder / (flow_filename.stem + '_pos_arr.png')),
                    img)

        img_inv = np.ones(flow_color.shape, np.uint8) * 255
        for i in range(0, len(coords_origin), 1000):
            cv2.arrowedLine(img_inv, tuple(coords_new[i]),
                            tuple(coords_origin[i]), (255, 0, 0), 2)
            cv2.imwrite(
                str(target_folder / (flow_filename.stem + '_neg_arr.png')),
                img_inv)
Beispiel #13
0
    def infer_sequence(self, lr_data, device):
        """
            Parameters:
                :param lr_data: torch.FloatTensor in shape tchw
                :param device: torch.device

                :return hr_seq: uint8 np.ndarray in shape tchw
        """

        # setup params
        tot_frm, c, h, w = lr_data.size()
        s = self.scale

        # forward
        hr_seq = []
        lr_prev = torch.zeros(1, c, h, w, dtype=torch.float32).to(device)
        hr_prev = torch.zeros(1, c, s * h, s * w,
                              dtype=torch.float32).to(device)

        for i in range(tot_frm):
            with torch.no_grad():
                self.eval()

                lr_curr = lr_data[i:i + 1, ...].to(device)
                hr_curr, hr_flow, hr_warp = self.forward(
                    lr_curr, lr_prev, hr_prev)
                lr_prev, hr_prev = lr_curr, hr_curr

                hr_warp = hr_warp.squeeze(0).cpu().numpy()  # chw|rgb|uint8
                hr_frm = hr_warp.transpose(1, 2, 0)  # hwc
                flow_frm = hr_flow.squeeze(0).cpu().numpy()  # chw|rgb|uint8
                flow_uv = flow_frm.transpose(1, 2, 0)  # hwc
                flow_color = flow_vis.flow_to_color(flow_uv,
                                                    convert_to_bgr=False)

            hr_seq.append(float32_to_uint8(hr_frm))
            # hr_seq.append(float32_to_uint8(flow_color))

        return np.stack(hr_seq)
Beispiel #14
0
def run_training(args):

    print('---------- Perform Training ----------')

    savedir = args.savepath
    if not os.path.exists(savedir):
        os.mkdir(savedir)

    head_tail = os.path.split(args.dataset)
    savedir = os.path.join(savedir, head_tail[1])

    if not os.path.exists(savedir):
        os.mkdir(savedir)

    if not os.path.exists(os.path.join(savedir, "trained model")):
        os.mkdir(os.path.join(savedir, "trained model"))
        print('creating directory %s' %
              (os.path.join(savedir, "trained model")))

    if not os.path.exists(os.path.join(savedir, "saved training")):
        os.mkdir(os.path.join(savedir, "saved training"))
        print('creating directory %s' %
              (os.path.join(savedir, "saved training")))

    print('XField type: %s' % (args.type))
    print('Dimension of input xfield: %s' % (args.dim))

    #loading images
    images, coordinates, all_pairs, h_res, w_res = load_imgs(args)

    dims = args.dim
    num_n = args.num_n  # number of neighbors
    min_ = np.min(coordinates)
    max_ = np.max(coordinates)

    print('\n ------- Creating the model -------')

    # batch size is num_n + 1 (number of neighbors + target)
    inputs = tf.placeholder(tf.float32, shape=[num_n + 1, 1, 1, len(dims)])

    # Jacobian network
    num_output = len(args.type) * 2

    with tf.variable_scope("gen_flows"):
        flows = Flow(inputs, h_res, w_res, num_output, args.nfg, min_, max_)

    nparams_decoder = np.sum([
        np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
        if v.name.startswith("gen_flows")
    ])
    print('Number of learnable parameters (decoder): %d' % (nparams_decoder))

    # learnt albedo
    # The albedos are initialized with constant 1.0
    if args.type == ['light', 'view', 'time']:

        with tf.variable_scope("gen_flows"):

            # For light-view-time interpolation, we consider num_views*num_times albedos
            albedos = tf.Variable(tf.constant(
                1.0, shape=[dims[1] * dims[2], h_res, w_res, 3]),
                                  name='albedo')
            index_albedo = tf.placeholder(tf.int32, shape=(1, ))
            albedo = tf.gather(albedos, index_albedo, 0)

        nparams = np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
            if v.name.startswith("gen_flows")
        ])
        print(
            'Number of learnable parameters (%d albedos with res %d x %d ): %d'
            % (dims[1] * dims[2], h_res, w_res, nparams - nparams_decoder))

    elif args.type == ['light']:

        with tf.variable_scope("gen_flows"):
            # For light interpolation, we consider just one albedo
            albedo = tf.Variable(tf.constant(1.0, shape=[1, h_res, w_res, 3]),
                                 name='albedo')

        nparams = np.sum([
            np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()
            if v.name.startswith("gen_flows")
        ])
        print(
            'Number of learnable parameters (%d albedos with res %d x %d ): %d'
            % (1, h_res, w_res, nparams - nparams_decoder))

    else:
        # For view and time interpolation, we do not train for albedo, we consider it as a constant non-learnable parameter
        albedo = tf.constant(1.0, shape=[1, h_res, w_res, 3])

    Neighbors = tf.placeholder(tf.float32, shape=[num_n, h_res, w_res, 3])

    # soft blending
    interpolated = Blending_train(inputs, Neighbors, flows, albedo, h_res,
                                  w_res, args)

    Reference = tf.placeholder(tf.float32, shape=[1, h_res, w_res, 3])

    # L1 loss
    loss = tf.reduce_mean((tf.abs(interpolated - Reference)))

    gen_tvars = [
        var for var in tf.trainable_variables()
        if var.name.startswith("gen_flows")
    ]
    learning_rate = tf.placeholder(tf.float32, shape=())
    gen_optim = tf.train.AdamOptimizer(learning_rate)
    gen_grads = gen_optim.compute_gradients(loss, var_list=gen_tvars)
    gen_train = gen_optim.apply_gradients(gen_grads)

    saver = tf.train.Saver(max_to_keep=1000)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    if args.load_pretrained:
        ckpt = tf.train.get_checkpoint_state("%s\\trained model" % (savedir))
        if ckpt:
            print('\n loading pretrained model  ' + ckpt.model_checkpoint_path)
            saver.restore(sess, ckpt.model_checkpoint_path)

    print('------------ Start Training ------------')

    lr = args.lr
    print('Starting learning rate with %0.4f' % (lr))

    stop_l1_thr = 0.01

    iter_end = 100000  # total number of iterations

    indices = np.array([i for i in range(len(all_pairs))])
    if len(indices
           ) < 500:  # we considered around 500 iterations per each epoch
        indices = np.repeat(indices, 500 // len(indices))

    epoch_size = len(indices)
    epoch_end = iter_end // epoch_size  # total number of epochs

    if args.type == ['light', 'view', 'time']:

        st = time.time()
        min_loss = 1000
        l1_loss_t = 1
        epoch = 0

        while l1_loss_t > stop_l1_thr and epoch <= epoch_end:

            l1_loss_t = 0
            np.random.shuffle(indices)

            for id in range(epoch_size):

                pair = all_pairs[indices[id], ::]

                input_coords = coordinates[pair[:num_n + 1], ::]
                reference_img = images[pair[:1], ::]
                Neighbors_img = images[pair[1:num_n + 1], ::]
                _index = [pair[-1]]

                _, l1loss = sess.run(
                    [gen_train, loss],
                    feed_dict={
                        inputs: input_coords,
                        Reference: reference_img,
                        Neighbors: Neighbors_img,
                        learning_rate: lr,
                        index_albedo: _index
                    })
                l1_loss_t = l1_loss_t + l1loss

                print(
                    '\r Epoch %3.0d  Iteration %3.0d of %3.0d   Cumulative L1 loss = %3.3f'
                    % (epoch, id + 1, epoch_size, l1_loss_t),
                    end=" ")

            l1_loss_t = l1_loss_t / epoch_size
            print(" elapsed time %3.1f m  Averaged L1 loss = %3.5f " %
                  ((time.time() - st) / 60, l1_loss_t))

            if l1_loss_t < min_loss:
                saver.save(sess, "%s\\trained model\\model.ckpt" % (savedir))
                min_loss = l1_loss_t

            center = np.prod(dims) // 2
            cv2.imwrite("%s/saved training/reference.png" % (savedir),
                        np.uint8(images[center, ::] * 255))

            pair = all_pairs[3 * center + 0, ::]

            out_img, flows_out = sess.run(
                [interpolated, flows],
                feed_dict={
                    inputs: coordinates[pair[:num_n + 1], ::],
                    Neighbors: images[pair[1:num_n + 1], ::],
                    index_albedo: [pair[-1]]
                })

            out_img = np.minimum(np.maximum(out_img, 0.0), 1.0)
            cv2.imwrite("%s/saved training/recons_light.png" % (savedir),
                        np.uint8(out_img[0, ::] * 255))

            flow_color = flow_vis.flow_to_color(flows_out[0, :, :, 0:2],
                                                convert_to_bgr=False)
            cv2.imwrite("%s/saved training/flow_light.png" % (savedir),
                        np.uint8(flow_color))

            flow_color = flow_vis.flow_to_color(flows_out[0, :, :, 2:4],
                                                convert_to_bgr=False)
            cv2.imwrite("%s/saved training/flow_view.png" % (savedir),
                        np.uint8(flow_color))

            flow_color = flow_vis.flow_to_color(flows_out[0, :, :, 4:6],
                                                convert_to_bgr=False)
            cv2.imwrite("%s/saved training/flow_time.png" % (savedir),
                        np.uint8(flow_color))

            pair = all_pairs[3 * center + 1, ::]
            out_img = sess.run(interpolated,
                               feed_dict={
                                   inputs: coordinates[pair[:num_n + 1], ::],
                                   Neighbors: images[pair[1:num_n + 1], ::],
                                   index_albedo: [pair[-1]]
                               })

            out_img = np.minimum(np.maximum(out_img, 0.0), 1.0)
            cv2.imwrite("%s/saved training/recons_view.png" % (savedir),
                        np.uint8(out_img[0, ::] * 255))

            pair = all_pairs[3 * center + 2, ::]
            out_img = sess.run(interpolated,
                               feed_dict={
                                   inputs: coordinates[pair[:num_n + 1], ::],
                                   Neighbors: images[pair[1:num_n + 1], ::],
                                   index_albedo: [pair[-1]]
                               })

            out_img = np.minimum(np.maximum(out_img, 0.0), 1.0)
            cv2.imwrite("%s/saved training/recons_time.png" % (savedir),
                        np.uint8(out_img[0, ::] * 255))
            epoch = epoch + 1

            if epoch == epoch_end // 2:
                lr = 0.00005

    if args.type == ['view'] or args.type == ['time'
                                              ] or args.type == ['light']:

        st = time.time()
        img_mov = cv2.VideoWriter(
            '%s/saved training/epoch_recons.mp4' % (savedir),
            cv2.VideoWriter_fourcc(*'mp4v'), 10, (w_res, h_res))
        flow_mov = cv2.VideoWriter(
            '%s/saved training/epoch_flows.mp4' % (savedir),
            cv2.VideoWriter_fourcc(*'mp4v'), 10, (w_res, h_res))

        min_loss = 1000
        l1_loss_t = 1
        epoch = 0

        while l1_loss_t > stop_l1_thr and epoch <= epoch_end:

            l1_loss_t = 0
            np.random.shuffle(indices)

            for id in range(epoch_size):

                pair = all_pairs[indices[id], ::]
                input_coords = coordinates[pair[:num_n + 1], ::]
                reference_img = images[pair[:1], ::]
                Neighbors_img = images[pair[1:num_n + 1], ::]

                _, l1loss = sess.run(
                    [gen_train, loss],
                    feed_dict={
                        inputs: input_coords,
                        Reference: reference_img,
                        Neighbors: Neighbors_img,
                        learning_rate: lr,
                    })

                l1_loss_t = l1_loss_t + l1loss
                print(
                    '\r Epoch %3.0d  Iteration %3.0d of %3.0d   Cumulative L1 loss = %3.3f'
                    % (epoch, id + 1, epoch_size, l1_loss_t),
                    end=" ")

            l1_loss_t = l1_loss_t / epoch_size
            print(" elapsed time %3.1f m  Averaged L1 loss = %3.5f" %
                  ((time.time() - st) / 60, l1_loss_t))

            if l1_loss_t < min_loss:
                saver.save(sess, "%s\\trained model\\model.ckpt" % (savedir))
                min_loss = l1_loss_t

            if args.type == ['light']:

                albedo_out = np.minimum(np.maximum(sess.run(albedo), 0.0), 1.0)
                cv2.imwrite("%s/saved training/albedo.png" % (savedir),
                            np.uint8(albedo_out[0, :, :, :] * 255))

            center = np.prod(dims) // 2
            cv2.imwrite("%s/saved training/reference.png" % (savedir),
                        np.uint8(images[center, ::] * 255))

            pair = all_pairs[(len(all_pairs) // len(images)) * center, ::]

            out_img, flows_out = sess.run(
                [interpolated, flows],
                feed_dict={
                    inputs: coordinates[pair[:num_n + 1], ::],
                    Neighbors: images[pair[1:num_n + 1], ::]
                })

            out_img = np.minimum(np.maximum(out_img, 0.0), 1.0)
            cv2.imwrite("%s/saved training/recons.png" % (savedir),
                        np.uint8(out_img[0, ::] * 255))

            flow_color = flow_vis.flow_to_color(flows_out[0, :, :, 0:2],
                                                convert_to_bgr=False)
            cv2.imwrite("%s/saved training/flow.png" % (savedir),
                        np.uint8(flow_color))
            img_mov.write(np.uint8(out_img[0, ::] * 255))
            flow_mov.write(np.uint8(flow_color))
            epoch = epoch + 1

            if epoch == epoch_end // 2:
                lr = 0.00005

        img_mov.release()
        flow_mov.release()
Beispiel #15
0
from skimage.io import imread
from scipy import signal, ndimage
import numpy as np
import time
import scipy.io as sio
from matplotlib.pyplot import imshow, show, figure
import skimage.transform as tf
import IPython
import flow_vis

image1 = imread('frame1.png', as_gray=True)
image2 = imread('frame2.png', as_gray=True)

flow_gt = sio.loadmat('flow_gt.mat')['groundTruth']
flow_image_gt = flow_vis.flow_to_color(flow_gt)


#PARTA:Lukas_kanade
def lukas_kanade(I1, I2, window_size=5):

    w = window_size // 2  # window_size is odd, all the pixels with offset in between [-w, w] are inside the window
    I1 = I1 / 255.  # normalize pixels
    I2 = I2 / 255.  # normalize pixels

    # Define convolution kernels.
    kernel_x = [[-1 / 4, 1 / 4], [-1 / 4, 1 / 4]]
    kernel_y = [[-1 / 4, -1 / 4], [1 / 4, 1 / 4]]
    kernel_t = [[1 / 4, 1 / 4], [1 / 4, 1 / 4]]

    # Compute partial derivatives.
    Ix = signal.convolve2d(I1 + I2, kernel_x, mode='same')
Beispiel #16
0
def main():
    transform = torchvision.transforms.ColorJitter(brightness=(0.6, 1.0),
                                                   contrast=(0.6, 1.0))
    train_loader = torch.utils.data.DataLoader(KITTI(mode="training",
                                                     transform=transform),
                                               batch_size=4,
                                               shuffle=True)
    test_loader = torch.utils.data.DataLoader(KITTI(mode="testing"),
                                              batch_size=4,
                                              shuffle=False)
    model = PWCDCNet()
    device = torch.device("cuda")
    model.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=0.0005)
    wandb.watch(model)

    for epoch in range(1, 300):
        training_loss = 0.0
        for i, data in enumerate(train_loader, 0):
            optimizer.zero_grad()
            left_image, right_image, target_flows, valids = data[0].to(
                device), data[1].to(device), data[2].to(device), data[3].to(
                    device)
            # Shapes:
            ## Left Image: (batch_size, 3, 320, 896)
            ## Right Image: (batch_size, 3, 320, 896)
            ## Target Flows: (batch_size, 2, 320, 896)
            ## Valids: (batch_size, 320, 896)
            flow2, flow3, flow4, flow5, flow6 = model(
                torch.cat((left_image, right_image), 1))
            loss = ProbabilisticNLLLoss([flow2, flow3, flow4, flow5, flow6],
                                        target_flows, valids)
            loss.backward()
            optimizer.step()
            training_loss += loss.item()
        print("[Epoch {}] Training loss: {:.5f}".format(epoch, training_loss))
        wandb.log({"Epoch Number": epoch, "Training Loss": training_loss})
        running_loss = 0.0
        with torch.no_grad():
            model.eval()
            validation_loss = 0.0
            rgb_images = []
            predicted_flow_images = []
            target_flow_images = []
            for i, data in enumerate(test_loader, 0):
                left_image, right_image, target_flows, valids = data[0].to(
                    device), data[1].to(device), data[2].to(
                        device), data[3].to(device)
                flow2 = model(torch.cat((left_image, right_image), 1))
                upsample = torch.nn.Upsample(size=(320, 896),
                                             mode='bilinear',
                                             align_corners=False)
                upsampled_flow2 = upsample(flow2)
                upsampled_flow2 = upsampled_flow2 * (320 // flow2.shape[2])
                validation_loss += total_EPE_loss([upsampled_flow2],
                                                  target_flows, valids)

                if i == 0:
                    for idx in range(4):
                        img_show = left_image[idx].permute(1, 2,
                                                           0).cpu().numpy()
                        img_show = img_show.clip(0, 1)
                        rgb_images.append(wandb.Image(img_show, grouping=3))
                        flowcolored_pred = flow_vis.flow_to_color(
                            upsampled_flow2[idx, :2, :, :].permute(
                                1, 2, 0).cpu().numpy())
                        predicted_flow_images.append(
                            wandb.Image(flowcolored_pred))

                        flowcolored_gt = flow_vis.flow_to_color(
                            (target_flows[idx].permute(
                                1, 2, 0)[:, :, :2]).cpu().numpy())
                        target_flow_images.append(wandb.Image(flowcolored_gt))
                    images_for_wandb = list(
                        chain.from_iterable(
                            zip(rgb_images, predicted_flow_images,
                                target_flow_images)))
                    wandb.log({'examples': images_for_wandb}, commit=False)
            print("Validation loss: {:.5f}".format(validation_loss))
            wandb.log({"Validation Loss": validation_loss})
        model.train()
    torch.save(model.state_dict(), './feb22/last_aug_300_06.pth')
Beispiel #17
0
                        else:
                            embedding[..., 0] -= np.arange(embedding.shape[0])[:, None]
                            embedding[..., 1] -= np.arange(embedding.shape[1])[None, :]

                        print(embedding.shape)
                        outpath = f"/groups/funke/home/wolfs2/local/data/lsl/evaluation/{expname_plus_setup}"
                        base_name = f"{i:02}_{bw}_{sf}_{idx}_{postfix}"

                        os.makedirs(outpath, exist_ok=True)
                        # imsave(
                        #     f"{outpath}/{base_name}_0_embedding.png", embedding[..., :3])
                        # print(embedding.shape)
                        # exit()

                        if SAVEIMG:
                            rgb = flow_vis.flow_to_color(embedding[..., :2], convert_to_bgr=False)
                            imsave(
                                f"{outpath}/{base_name}_0_embedding.png", rgb)

                        seg = pred_zarr[f"inference/{expname_plus_setup}/pn_embedding/{idx}/inst_embedding"][:].transpose(1,2,0)


                        if postfix=="_full":
                            predicted_segmentation += 1
                            predicted_segmentation[gt_segmentation==0] = 0

                        # size filter
                        if sf > 0:
                            values, counts = np.unique(
                                predicted_segmentation, return_counts=True)
                            # could be vectorized!
Beispiel #18
0
def train(args):
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)

    if args.cuda:
        torch.cuda.manual_seed(args.seed)
        kwargs = {'num_workers': 0, 'pin_memory': False}
    else:
        kwargs = {}

    if args.model_type == "rnn":
        transformer = transformer_net.TransformerRNN(args.pad_type)
        seq_size = 4
    else:
        transformer = transformer_net.TransformerNet(args.pad_type)
        seq_size = 2

    train_dataset = dataset.DAVISDataset(args.dataset,
                                         seq_size=seq_size,
                                         use_flow=args.flow)
    train_loader = DataLoader(train_dataset, batch_size=1, **kwargs)

    if args.model_type == "rnn":
        transformer = transformer_net.TransformerRNN(args.pad_type)
    else:
        transformer = transformer_net.TransformerNet(args.pad_type)
    model_path = args.init_model
    print("=> Load from model file %s" % model_path)
    transformer.load_state_dict(torch.load(model_path))
    transformer.train()
    if args.model_type == "rnn":
        transformer.conv1 = transformer_net.ConvLayer(6,
                                                      32,
                                                      kernel_size=9,
                                                      stride=1,
                                                      pad_type=args.pad_type)
    optimizer = torch.optim.Adam(transformer.parameters(), args.lr)
    mse_loss = torch.nn.MSELoss()
    l1_loss = torch.nn.SmoothL1Loss()

    vgg = Vgg16()
    vgg.load_state_dict(
        torch.load(os.path.join(args.vgg_model, "vgg16.weight")))
    vgg.eval()

    if args.cuda:
        transformer.cuda()
        vgg.cuda()
        mse_loss.cuda()
        l1_loss.cuda()

    style = utils.tensor_load_resize(args.style_image, args.style_size)
    style = style.unsqueeze(0)
    print("=> Style image size: " + str(style.size()))
    print("=> Pixel OFB loss weight: %f" % args.time_strength)

    style = utils.preprocess_batch(style)
    if args.cuda: style = style.cuda()
    utils.tensor_save_bgrimage(
        style[0].detach(), os.path.join(args.save_model_dir,
                                        'train_style.jpg'), args.cuda)
    style = utils.subtract_imagenet_mean_batch(style)
    features_style = vgg(style)
    gram_style = [utils.gram_matrix(y).detach() for y in features_style]

    for e in range(args.epochs):
        train_loader.dataset.reset()
        transformer.train()
        transformer.cuda()
        agg_content_loss = agg_style_loss = agg_pixelofb_loss = 0.
        iters = 0
        anormaly = False
        for batch_id, (x, flow, conf) in enumerate(train_loader):
            x, flow, conf = x[0], flow[0], conf[0]
            iters += 1

            optimizer.zero_grad()
            x = utils.preprocess_batch(x)  # (N, 3, 256, 256)
            if args.cuda:
                x = x.cuda()
                flow = flow.cuda()
                conf = conf.cuda()
            y = transformer(x)  # (N, 3, 256, 256)

            xc = center_crop(x.detach(), y.size(2), y.size(3))

            vgg_y = utils.subtract_imagenet_mean_batch(y)
            vgg_x = utils.subtract_imagenet_mean_batch(xc)

            features_y = vgg(vgg_y)
            features_xc = vgg(vgg_x)

            #content target
            f_xc_c = features_xc[2].detach()
            # content
            f_c = features_y[2]

            #content_feature_target = center_crop(f_xc_c, f_c.size(2), f_c.size(3))
            content_loss = args.content_weight * mse_loss(f_c, f_xc_c)

            style_loss = 0.
            for m in range(len(features_y)):
                gram_s = gram_style[m]
                gram_y = utils.gram_matrix(features_y[m])
                batch_style_loss = 0
                for n in range(gram_y.shape[0]):
                    batch_style_loss += args.style_weight * mse_loss(
                        gram_y[n], gram_s[0])
                style_loss += batch_style_loss / gram_y.shape[0]

            warped_y, warped_y_mask = warp(y[1:], flow)
            warped_y = warped_y.detach()
            warped_y_mask *= conf
            pixel_ofb_loss = args.time_strength * weighted_mse(
                y[:-1], warped_y, warped_y_mask)

            total_loss = content_loss + style_loss + pixel_ofb_loss

            total_loss.backward()
            optimizer.step()

            if (batch_id + 1) % 100 == 0:
                prefix = args.save_model_dir + "/"
                idx = (batch_id + 1) // 100
                flow_image = flow_to_color(
                    flow[0].detach().cpu().numpy().transpose(1, 2, 0))
                utils.save_image(prefix + "forward_flow_%d.png" % idx,
                                 flow_image)
                warped_x, warped_x_mask = warp(x[1:], flow)
                warped_x = warped_x.detach()
                warped_x_mask *= conf
                for i in range(2):
                    utils.tensor_save_bgrimage(
                        y.data[i], prefix + "out_%d-%d.png" % (idx, i),
                        args.cuda)
                    utils.tensor_save_bgrimage(
                        x.data[i], prefix + "in_%d-%d.png" % (idx, i),
                        args.cuda)
                    if i < warped_y.shape[0]:
                        utils.tensor_save_bgrimage(
                            warped_y.data[i],
                            prefix + "wout_%d-%d.png" % (idx, i), args.cuda)
                        utils.tensor_save_bgrimage(
                            warped_x.data[i],
                            prefix + "win_%d-%d.png" % (idx, i), args.cuda)
                        utils.tensor_save_image(
                            prefix + "conf_%d-%d.png" % (idx, i),
                            warped_x_mask.data[i])

            agg_content_loss += content_loss.data
            agg_style_loss += style_loss.data
            agg_pixelofb_loss += pixel_ofb_loss.data

            agg_total = agg_content_loss + agg_style_loss + agg_pixelofb_loss
            mesg = "{}\tEpoch {}:\t[{}/{}]\tcontent: {:.6f}\tstyle: {:.6f}\tpixel ofb: {:.6f}\ttotal: {:.6f}".format(
                time.ctime(), e + 1, batch_id + 1, len(train_loader),
                agg_content_loss / iters, agg_style_loss / iters,
                agg_pixelofb_loss / iters, agg_total / iters)
            print(mesg)
            agg_content_loss = agg_style_loss = agg_pixelofb_loss = 0.0
            iters = 0

        # save model
        transformer.eval()
        transformer.cpu()
        save_model_filename = "epoch_" + str(e) + "_" + str(
            args.content_weight) + "_" + str(args.style_weight) + ".model"
        save_model_path = os.path.join(args.save_model_dir,
                                       save_model_filename)
        torch.save(transformer.state_dict(), save_model_path)

    print("\nDone, trained model saved at", save_model_path)
    _, filename, _ = fileparts(image_path)

    frame2 = load_image(image_path)
    next = rgb2gray(frame2)
    # save_image(next, '/home/xinshuo/aa.jpg')
    # zxc
    # cv2.imshow('asd', next)
    # k = cv2.waitKey(30)
    # zxc

    # ret, frame2 = cap.read()
    # next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)

    flow_uv = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 5, 1.2, 0)

    rgb = flow_to_color(flow_uv, convert_to_bgr=False)

    # mag, ang = cv2.cartToPolar(flow[...,0], flow[...,1])
    # hsv[...,0] = ang * 180 / np.pi / 2
    # hsv[...,2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
    # rgb = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)

    # cv2.imshow('frame2', rgb)
    # k = cv2.waitKey(30) & 0xff
    # if k == 27:
        # break
    # elif k == ord('s'):
    save_path_tmp = os.path.join(save_dir, filename+'.jpg')
    save_image(rgb, save_path=save_path_tmp)
    # cv2.imwrite('opticalfb.png', frame2)
    # cv2.imwrite('opticalhsv.png', rgb)
Beispiel #20
0
def visualiseFlow(img_pair, flow_gt, flow_pred, flow_prob, ds, idx=0):
    dim = flow_pred[0].size(1)
    H, W = img_pair[0].size()[2:]
    with torch.no_grad():
        full_vis_list = []
        for idx in range(img_pair[0].size(0)):
            raw_img0 = recoverImage(img_pair[0][idx].data)
            raw_img1 = recoverImage(img_pair[1][idx].data)
            raw_img0 = raw_img0 * 255.0
            raw_img1 = raw_img1 * 255.0

            for l in range(len(flow_pred)):
                # Image

                # for i in range(3):
                #     raw_img0[i, :, :] = raw_img0[i, :, :] * 255.0 / torch.max(raw_img0[i, :, :])
                #     raw_img1[i, :, :] = raw_img1[i, :, :] * 255.0 / torch.max(raw_img1[i, :, :])

                vis_list = [raw_img0, raw_img1]

                # Ground truth flow
                gt_flow, valid_mask = downsample_flow(flow_gt, 1. / 2 ** (ds - l))
                gt_flow = F.interpolate(gt_flow, (H, W), mode="nearest", recompute_scale_factor=True)[idx]
                valid_mask = F.interpolate(valid_mask, (H, W), mode="nearest", recompute_scale_factor=True)[idx]

                max_mag1 = torch.max(torch.norm(gt_flow, 2, 0))

                # predicted flow
                pred_flow = flow_pred[l]
                pred_flow = F.interpolate(pred_flow, (H, W), mode='nearest', recompute_scale_factor=True)[idx]
                max_mag2 = torch.max(torch.norm(pred_flow, 2, 0))

                max_mag = max(float(max_mag1), float(max_mag2))
                # print("GT Flow", gt_flow.size())
                # print("GT Flow", gt_flow.shape)
                gt_flow_np = gt_flow.detach().cpu().permute(1, 2, 0).numpy()
                pred_flow_np = pred_flow.cpu().permute(1, 2, 0).numpy()
                gt_flow_vis = flow_vis.flow_to_color(gt_flow_np, convert_to_bgr=False)
                pred_flow_vis = flow_vis.flow_to_color(pred_flow_np, convert_to_bgr=False)
                vis_list.append(torch.from_numpy(gt_flow_vis).permute(2, 0, 1).cuda())
                vis_list.append(torch.from_numpy(pred_flow_vis).permute(2, 0, 1).cuda())

                # epe error visualization
                epe_error = torch.norm(pred_flow - gt_flow, 2, 0, keepdim=False) * valid_mask[0, :, :]
                normalizer = max(torch.max(epe_error), 1)
                epe_error = 1 - epe_error / normalizer
                vis_list.append(visualiseHeatmap(epe_error))

                # confidence map visualization
                prob = flow_prob[l].data
                prob = probGather(prob, normalize=True)
                if prob.size(2) != H or prob.size(3) != W:
                    prob = F.interpolate(prob, (H, W), mode='nearest', recompute_scale_factor=True)
                vis_list.append(visualiseHeatmap(prob[idx].squeeze(), cv2.COLORMAP_BONE))

                vis = torch.cat(vis_list, dim=2)
                if l == 0:
                    ms_vis = vis
                else:
                    ms_vis = torch.cat([ms_vis, vis], dim=1)

            full_vis_list.append(ms_vis.unsqueeze(0))
        return full_vis_list
Beispiel #21
0
from skimage.io import imread
from scipy import signal, ndimage
import numpy as np
import time
import scipy.io as sio
from matplotlib.pyplot import imshow, show, figure
import skimage.transform as tf
import IPython
import flow_vis

image1 = imread('frame1.png', as_gray=True)
image2 = imread('frame2.png', as_gray=True)

flow_gt = sio.loadmat('flow_gt.mat')['groundTruth']
flow_image_gt = flow_vis.flow_to_color(flow_gt)


#PARTA:Lukas_kanade
def lukas_kanade(I1, I2, window_size=5):

    w = window_size // 2  # window_size is odd, all the pixels with offset in between [-w, w] are inside the window
    I1 = I1 / 255.  # normalize pixels
    I2 = I2 / 255.  # normalize pixels

    # Define convolution kernels.
    kernel_x = [[-1 / 4, 1 / 4], [-1 / 4, 1 / 4]]
    kernel_y = [[-1 / 4, -1 / 4], [1 / 4, 1 / 4]]
    kernel_t = [[1 / 4, 1 / 4], [1 / 4, 1 / 4]]

    # Compute partial derivatives.
    Ix = signal.convolve2d(I1 + I2, kernel_x, mode='same')
Beispiel #22
0
                         align_corners=False)
    img2 = F.interpolate(img2,
                         size=input_size,
                         mode='bilinear',
                         align_corners=False)
else:
    input_size = orig_size

input_t = torch.cat([img1, img2], 1).cuda()

output = model(input_t).data

flow = div_flow * F.interpolate(
    output, size=input_size, mode='bilinear', align_corners=False)

if input_size != orig_size:
    scale_h = orig_size[0] / input_size[0]
    scale_w = orig_size[1] / input_size[1]
    flow = F.interpolate(flow,
                         size=orig_size,
                         mode='bilinear',
                         align_corners=False)
    flow[:, 0, :, :] *= scale_w
    flow[:, 1, :, :] *= scale_h

flow = flow[0].cpu().permute(1, 2, 0).numpy()

flow_color = flow_to_color(flow, convert_to_bgr=True)

cv2.imwrite('./data/flow.png', flow_color)
Beispiel #23
0
# Sets image saturation to maximum
mask[..., 1] = 255

while(cap.isOpened()):
    # ret = a boolean return value from getting the frame, frame = the current frame being projected in the video
    ret, frame = cap.read()
    # Opens a new window and displays the input frame
    cv.imshow("input", frame)
    # Converts each frame to grayscale - we previously only converted the first frame to grayscale
    gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)
    # Calculates dense optical flow by Farneback method
    # https://docs.opencv.org/3.0-beta/modules/video/doc/motion_analysis_and_object_tracking.html#calcopticalflowfarneback
    flow = cv.calcOpticalFlowFarneback(prev_gray, gray, None, 0.5, 3, 15, 3, 5, 1.2, 0)
    
    # Apply the coloring (for OpenCV, set convert_to_bgr=True)
    flow_color = flow_vis.flow_to_color(flow, convert_to_bgr=False)
    
    
 
    # Computes the magnitude and angle of the 2D vectors
    magnitude, angle = cv.cartToPolar(flow[..., 0], flow[..., 1])
    # Sets image hue according to the optical flow direction
    mask[..., 0] = angle * 180 / np.pi / 2
    # Sets image value according to the optical flow magnitude (normalized)
    mask[..., 2] = cv.normalize(magnitude, None, 0, 255, cv.NORM_MINMAX)
    # Converts HSV to RGB (BGR) color representation
    rgb = cv.cvtColor(mask, cv.COLOR_HSV2BGR)
    
    # Open a new window and displays the output frame
    dense_flow = cv.addWeighted(frame, 1,rgb, 2, 0)
    cv.imshow('flow', draw_flow(gray, flow))
Beispiel #24
0
def main():
    data = get_data()

    # ==========================================================================
    # Sidebar 
    # ==========================================================================
    select_flux_scaling = st.sidebar.selectbox(
        "Flux scaling", 
        ["Linear", "Log"]
    )

    select_band = st.sidebar.selectbox(
        "Band",
        [i for i in range(data["flux"].shape[-1])]
    )

    select_n = st.sidebar.select_slider(
        "nth Closest Source",
        [str(i) for i in range(1, data["cm"].shape[-1]+1)]
    )

    select_src = st.sidebar.select_slider(
        "Select Deblended Source",
        [str(i) for i in range(1, len(data["srcs"])+1)]
    )

    select_blend_alpha = st.sidebar.slider(
        "Select Source Mask Alpha",
        min_value=0.0, 
        max_value=1.0, 
        step=0.1,
        value=0.5,
    )
    # ==========================================================================


    st.write("""
    # Morpheus-Deblend View
    """)


    # ==========================================================================
    # Input Flux Display
    # ==========================================================================
    flux = data["flux"][...,select_band]
    if select_flux_scaling=="Linear":
        flux_img = scale_flux(flux)
    elif select_flux_scaling=="Log":
        flux_img = rescale(
            np.log10(
                flux - flux.min() + 1e-3
            )
        )

    st.write("### Input Image")
    st.image(flux_img,use_column_width=True)
    # ==========================================================================
        

    # ==========================================================================
    # Center of Mass
    # ==========================================================================
    com = data["com"]
    suppressed = data["suppressed"]

    st.write("### Identified Sources")
    
    col_com_raw, col_com_suppressed = st.beta_columns(2)
    col_com_raw.write("Center of Mass Raw")
    col_com_raw.image(colorize_array(com[...,0]), use_column_width=True)
    col_com_suppressed.write("Center of Mass Suppressed")
    col_com_suppressed.image(suppressed, use_column_width=True)
    # ==========================================================================


    # ==========================================================================
    # Claim Vectors/Claim Maps
    # ==========================================================================
    n = int(select_n) - 1
    cm = data["cm"][...,select_band, n]
    cv = flow_vis.flow_to_color(
        data["cv"][:, :, n, [1,0]]
    )

    nth_string = select_n + resolve_n_post(select_n)
    col_cv, col_cm = st.beta_columns(2)
    col_cv.write(f"Claim Vector for {nth_string} Source")
    col_cv.image(cv, use_column_width=True)
    col_cm.write(f"Claim Map for {nth_string} Source")
    col_cm.image(colorize_array(cm), use_column_width=True)
    # ==========================================================================


    # ==========================================================================
    # Deblended Sources
    # ==========================================================================
    n_src = int(select_src) - 1
    src = data["srcs"][n_src][:, :, select_band]


    if select_flux_scaling=="Linear":
        src_img = scale_flux(src)
    elif select_flux_scaling=="Log":
        src_img = rescale(
            np.log10(
                src - src.min() + 1e-3
            )
        )
    mask = src != 0
    mask_img = np.zeros(list(mask.shape) + [4], dtype=np.float32)
    
    mask_img[mask, :] = np.array([1,0,0,1.0])
    #mask_img[~mask, :] = np.array([0,0,0,0])

    rgba_flux = (np.dstack(
        [flux_img, flux_img, flux_img, np.ones_like(flux_img)]
        ) * 255).astype(np.uint8)

    img1 = Image.fromarray((mask_img * 255).astype(np.uint8))
    img2 = Image.fromarray(rgba_flux)
    blended = Image.blend(img1, img2, select_blend_alpha)



    st.write("Source {select_src}")
    col_mask, col_src_flux = st.beta_columns(2)
    col_mask.image(np.array(blended), use_column_width=True)
    col_src_flux.image(src_img, use_column_width=True)
Beispiel #25
0
 def vis_flow(self, flow):
     return flow_vis.flow_to_color(flow, convert_to_bgr=True)
Beispiel #26
0

def read_flow(filename):
    TAG_FLOAT = 202021.25

    with open(filename, 'rb') as f:
        flo_number = np.fromfile(f, np.float32, count=1)[0]
        assert flo_number == TAG_FLOAT, 'Flow number %r incorrect. Invalid .flo file' % flo_number

        w = np.fromfile(f, np.int32, count=1)[0]
        h = np.fromfile(f, np.int32, count=1)[0]
        data = np.fromfile(f, np.float32, count=2 * w * h)

        return np.resize(data, (int(h), int(w), 2))


parser = argparse.ArgumentParser(description='Process some integers.')
parser.add_argument('input', type=str, help='input .flo files')
parser.add_argument('output', type=str, help='output .png files')
args = parser.parse_args()
input = args.input
output = args.output
images = glob.glob(input)
images.sort()

for i, img_path in enumerate(images):
    name = os.path.basename(img_path)
    img = read_flow(img_path)
    img = flow_vis.flow_to_color(img, convert_to_bgr=True)
    cv2.imwrite(output + f'/{i:06d}.png', img)